BaseTools: use stdint.h for GCC ProcessorBind.h typedefs

The AArch64 definitions of UINT64/INT64 differ from the X64 ones.
Since this is on the tool side, doing like X64 and picking the
definitions from stdint.h feels like a better idea than hardcoding
them. So copy the pattern from X64/ProcessorBind.h.

Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Bob Feng <bob.c.feng@intel.com>
Cc: Liming Gao <liming.gao@intel.com>
Cc: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Leif Lindholm <leif.lindholm@linaro.org>
Reviewed-by: Liming Gao <liming.gao@intel.com>
Reviewed-by: Bob Feng <bob.c.feng@intel.com>
Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Philippe Mathieu-Daude <philmd@redhat.com>
This commit is contained in:
Leif Lindholm 2019-09-26 20:28:18 +01:00
parent ed9db1b91c
commit 5be5439a5a
1 changed files with 13 additions and 13 deletions

View File

@ -41,21 +41,21 @@
typedef signed char INT8; typedef signed char INT8;
#else #else
// //
// Assume standard AARCH64 alignment. // Use ANSI C 2000 stdint.h integer width declarations
// //
typedef unsigned long long UINT64; #include <stdint.h>
typedef long long INT64; typedef uint8_t BOOLEAN;
typedef unsigned int UINT32; typedef int8_t INT8;
typedef int INT32; typedef uint8_t UINT8;
typedef unsigned short UINT16; typedef int16_t INT16;
typedef unsigned short CHAR16; typedef uint16_t UINT16;
typedef short INT16; typedef int32_t INT32;
typedef unsigned char BOOLEAN; typedef uint32_t UINT32;
typedef unsigned char UINT8; typedef int64_t INT64;
typedef char CHAR8; typedef uint64_t UINT64;
typedef signed char INT8; typedef char CHAR8;
typedef uint16_t CHAR16;
#define UINT8_MAX 0xff
#endif #endif
/// ///