I\'d like to know if it is an easy way of determining the maximum number of characters to print a decimal int
.
I know
contains
The maximum number of decimal digits d of a signed or unsigned integer x of b bits matches the number of decimal digits of the number 2^b. In the case of signed numbers, an extra character must be added for the sign.
The number of decimal digits of x can be calculated as log_10(x), rounded up.
Therefore, the maximum number of decimal digits of x will be log_10(2^b) = b * log_10(2) = b * 0.301029995663981, rounded up.
If s is the size in bytes (given by the sizeof operator) of a certain type of integer used to store x, its size b in bits will be b = s * 8. So, the maximum number of decimal digits d will be (s * 8) * 0.301029995663981, rounded up. Rounding up will consist of truncating (converting to an integer), and adding 1.
Of course, all these constants will have to be added 1 to count the final 0 byte (see IntegerString in the following example).
#include
#include
#include
#define COMMON_LOG_OF_2 0.301029995663981
#define MAX_DECIMAL_DIGITS_UCHAR ((unsigned) (sizeof (unsigned char ) * 8 * COMMON_LOG_OF_2) + 1)
#define MAX_DECIMAL_DIGITS_USHORT ((unsigned) (sizeof (unsigned short ) * 8 * COMMON_LOG_OF_2) + 1)
#define MAX_DECIMAL_DIGITS_UINT ((unsigned) (sizeof (unsigned int ) * 8 * COMMON_LOG_OF_2) + 1)
#define MAX_DECIMAL_DIGITS_ULONG ((unsigned) (sizeof (unsigned long ) * 8 * COMMON_LOG_OF_2) + 1)
#define MAX_DECIMAL_DIGITS_ULONGLONG ((unsigned) (sizeof (unsigned long long) * 8 * COMMON_LOG_OF_2) + 1)
#define MAX_DECIMAL_DIGITS_UINT128 ((unsigned) (sizeof (unsigned __int128 ) * 8 * COMMON_LOG_OF_2) + 1)
#define MAX_DECIMAL_DIGITS_CHAR (1 + MAX_DECIMAL_DIGITS_UCHAR )
#define MAX_DECIMAL_DIGITS_SHORT (1 + MAX_DECIMAL_DIGITS_USHORT )
#define MAX_DECIMAL_DIGITS_INT (1 + MAX_DECIMAL_DIGITS_UINT )
#define MAX_DECIMAL_DIGITS_LONG (1 + MAX_DECIMAL_DIGITS_ULONG )
#define MAX_DECIMAL_DIGITS_LONGLONG (1 + MAX_DECIMAL_DIGITS_ULONGLONG)
#define MAX_DECIMAL_DIGITS_INT128 (1 + MAX_DECIMAL_DIGITS_UINT128 )
int main (void)
{
char IntegerString[MAX_DECIMAL_DIGITS_INT + 1];
printf ("MAX_DECIMAL_DIGITS_UCHAR = %2u\n",MAX_DECIMAL_DIGITS_UCHAR );
printf ("MAX_DECIMAL_DIGITS_USHORT = %2u\n",MAX_DECIMAL_DIGITS_USHORT );
printf ("MAX_DECIMAL_DIGITS_UINT = %2u\n",MAX_DECIMAL_DIGITS_UINT );
printf ("MAX_DECIMAL_DIGITS_ULONG = %2u\n",MAX_DECIMAL_DIGITS_ULONG );
printf ("MAX_DECIMAL_DIGITS_ULONGLONG = %2u\n",MAX_DECIMAL_DIGITS_ULONGLONG);
printf ("MAX_DECIMAL_DIGITS_UINT128 = %2u\n",MAX_DECIMAL_DIGITS_UINT128 );
printf ("MAX_DECIMAL_DIGITS_CHAR = %2u\n",MAX_DECIMAL_DIGITS_CHAR );
printf ("MAX_DECIMAL_DIGITS_SHORT = %2u\n",MAX_DECIMAL_DIGITS_SHORT );
printf ("MAX_DECIMAL_DIGITS_INT = %2u\n",MAX_DECIMAL_DIGITS_INT );
printf ("MAX_DECIMAL_DIGITS_LONG = %2u\n",MAX_DECIMAL_DIGITS_LONG );
printf ("MAX_DECIMAL_DIGITS_LONGLONG = %2u\n",MAX_DECIMAL_DIGITS_LONGLONG );
printf ("MAX_DECIMAL_DIGITS_INT128 = %2u\n",MAX_DECIMAL_DIGITS_INT128 );
sprintf (IntegerString,"%d",INT_MAX);
printf ("INT_MAX = %d\n",INT_MAX);
printf ("IntegerString = %s\n",IntegerString);
sprintf (IntegerString,"%d",INT_MIN);
printf ("INT_MIN = %d\n",INT_MIN);
printf ("IntegerString = %s\n",IntegerString);
return EXIT_SUCCESS;
}
EDIT:
Unfortunately, the use of floating point may cause problems when evaluating the expressions as constants. I have modified them by multiplying by 2 ^ 11 and dividing by 2 ^ 8, so that all calculations should be performed by the preprocessor with integers:
#include
#include
#include
#define LOG2_x_2_11 616 // log(2) * 2^11
#define MAX_DECIMAL_DIGITS_UCHAR (((sizeof (unsigned char ) * LOG2_x_2_11) >> 8) + 1)
#define MAX_DECIMAL_DIGITS_USHORT (((sizeof (unsigned short ) * LOG2_x_2_11) >> 8) + 1)
#define MAX_DECIMAL_DIGITS_UINT (((sizeof (unsigned int ) * LOG2_x_2_11) >> 8) + 1)
#define MAX_DECIMAL_DIGITS_ULONG (((sizeof (unsigned long ) * LOG2_x_2_11) >> 8) + 1)
#define MAX_DECIMAL_DIGITS_ULONGLONG (((sizeof (unsigned long long) * LOG2_x_2_11) >> 8) + 1)
#define MAX_DECIMAL_DIGITS_UINT128 (((sizeof (unsigned __int128 ) * LOG2_x_2_11) >> 8) + 1)
#define MAX_DECIMAL_DIGITS_CHAR (1 + MAX_DECIMAL_DIGITS_UCHAR )
#define MAX_DECIMAL_DIGITS_SHORT (1 + MAX_DECIMAL_DIGITS_USHORT )
#define MAX_DECIMAL_DIGITS_INT (1 + MAX_DECIMAL_DIGITS_UINT )
#define MAX_DECIMAL_DIGITS_LONG (1 + MAX_DECIMAL_DIGITS_ULONG )
#define MAX_DECIMAL_DIGITS_LONGLONG (1 + MAX_DECIMAL_DIGITS_ULONGLONG)
#define MAX_DECIMAL_DIGITS_INT128 (1 + MAX_DECIMAL_DIGITS_UINT128 )
int main (void)
{
char IntegerString[MAX_DECIMAL_DIGITS_INT + 1];
printf ("MAX_DECIMAL_DIGITS_UCHAR = %2zu\n",MAX_DECIMAL_DIGITS_UCHAR );
printf ("MAX_DECIMAL_DIGITS_USHORT = %2zu\n",MAX_DECIMAL_DIGITS_USHORT );
printf ("MAX_DECIMAL_DIGITS_UINT = %2zu\n",MAX_DECIMAL_DIGITS_UINT );
printf ("MAX_DECIMAL_DIGITS_ULONG = %2zu\n",MAX_DECIMAL_DIGITS_ULONG );
printf ("MAX_DECIMAL_DIGITS_ULONGLONG = %2zu\n",MAX_DECIMAL_DIGITS_ULONGLONG);
printf ("MAX_DECIMAL_DIGITS_UINT128 = %2zu\n",MAX_DECIMAL_DIGITS_UINT128 );
printf ("MAX_DECIMAL_DIGITS_CHAR = %2zu\n",MAX_DECIMAL_DIGITS_CHAR );
printf ("MAX_DECIMAL_DIGITS_SHORT = %2zu\n",MAX_DECIMAL_DIGITS_SHORT );
printf ("MAX_DECIMAL_DIGITS_INT = %2zu\n",MAX_DECIMAL_DIGITS_INT );
printf ("MAX_DECIMAL_DIGITS_LONG = %2zu\n",MAX_DECIMAL_DIGITS_LONG );
printf ("MAX_DECIMAL_DIGITS_LONGLONG = %2zu\n",MAX_DECIMAL_DIGITS_LONGLONG );
printf ("MAX_DECIMAL_DIGITS_INT128 = %2zu\n",MAX_DECIMAL_DIGITS_INT128 );
sprintf (IntegerString,"%d",INT_MAX);
printf ("INT_MAX = %d\n",INT_MAX);
printf ("IntegerString = %s\n",IntegerString);
sprintf (IntegerString,"%d",INT_MIN);
printf ("INT_MIN = %d\n",INT_MIN);
printf ("IntegerString = %s\n",IntegerString);
return EXIT_SUCCESS;
}