@@ -1272,7 +1272,7 @@ _Py_GetAllocatedBlocks(void)
1272
1272
bit allocation for keys
1273
1273
1274
1274
64-bit pointers and 2^20 arena size:
1275
- 16 -> ignored (BITS - PHYSICAL_BITS )
1275
+ 16 -> ignored (POINTER_BITS - ADDRESS_BITS )
1276
1276
10 -> MAP_TOP
1277
1277
10 -> MAP_MID
1278
1278
8 -> MAP_BOT
@@ -1291,21 +1291,21 @@ _Py_GetAllocatedBlocks(void)
1291
1291
#if SIZEOF_VOID_P == 8
1292
1292
1293
1293
/* number of bits in a pointer */
1294
- #define BITS 64
1294
+ #define POINTER_BITS 64
1295
1295
1296
1296
/* Current 64-bit processors are limited to 48-bit physical addresses. For
1297
1297
* now, the top 17 bits of addresses will all be equal to bit 2**47. If that
1298
1298
* changes in the future, this must be adjusted upwards.
1299
1299
*/
1300
- #define PHYSICAL_BITS 48
1300
+ #define ADDRESS_BITS 48
1301
1301
1302
1302
/* use the top and mid layers of the radix tree */
1303
1303
#define USE_INTERIOR_NODES
1304
1304
1305
1305
#elif SIZEOF_VOID_P == 4
1306
1306
1307
- #define BITS 32
1308
- #define PHYSICAL_BITS 32
1307
+ #define POINTER_BITS 32
1308
+ #define ADDRESS_BITS 32
1309
1309
1310
1310
#else
1311
1311
@@ -1321,7 +1321,7 @@ _Py_GetAllocatedBlocks(void)
1321
1321
1322
1322
#ifdef USE_INTERIOR_NODES
1323
1323
/* number of bits used for MAP_TOP and MAP_MID nodes */
1324
- #define INTERIOR_BITS ((PHYSICAL_BITS - ARENA_BITS + 2) / 3)
1324
+ #define INTERIOR_BITS ((ADDRESS_BITS - ARENA_BITS + 2) / 3)
1325
1325
#else
1326
1326
#define INTERIOR_BITS 0
1327
1327
#endif
@@ -1334,7 +1334,7 @@ _Py_GetAllocatedBlocks(void)
1334
1334
#define MAP_MID_LENGTH (1 << MAP_MID_BITS)
1335
1335
#define MAP_MID_MASK (MAP_MID_LENGTH - 1)
1336
1336
1337
- #define MAP_BOT_BITS (PHYSICAL_BITS - ARENA_BITS - 2*INTERIOR_BITS)
1337
+ #define MAP_BOT_BITS (ADDRESS_BITS - ARENA_BITS - 2*INTERIOR_BITS)
1338
1338
#define MAP_BOT_LENGTH (1 << MAP_BOT_BITS)
1339
1339
#define MAP_BOT_MASK (MAP_BOT_LENGTH - 1)
1340
1340
@@ -1347,10 +1347,13 @@ _Py_GetAllocatedBlocks(void)
1347
1347
#define MAP_MID_INDEX (p ) ((AS_UINT(p) >> MAP_MID_SHIFT) & MAP_MID_MASK)
1348
1348
#define MAP_TOP_INDEX (p ) ((AS_UINT(p) >> MAP_TOP_SHIFT) & MAP_TOP_MASK)
1349
1349
1350
- #if PHYSICAL_BITS > BITS
1351
- /* Return non-physical bits of pointer. Should be same for all valid
1352
- * pointers if PHYSICAL_BITS set correctly. */
1353
- #define HIGH_BITS (p ) (AS_UINT(p) >> PHYSICAL_BITS)
1350
+ #if ADDRESS_BITS > POINTER_BITS
1351
+ /* Return non-physical address bits of a pointer. Those bits should be same
1352
+ * for all valid pointers if ADDRESS_BITS set correctly. Linux has support for
1353
+ * 57-bit address space (Intel 5-level paging) but will not currently give
1354
+ * those addresses to user space.
1355
+ */
1356
+ #define HIGH_BITS (p ) (AS_UINT(p) >> ADDRESS_BITS)
1354
1357
#else
1355
1358
#define HIGH_BITS (p ) 0
1356
1359
#endif
@@ -1400,7 +1403,7 @@ static arena_map_bot_t *
1400
1403
arena_map_get (block * p , int create )
1401
1404
{
1402
1405
#ifdef USE_INTERIOR_NODES
1403
- /* sanity check that PHYSICAL_BITS is correct */
1406
+ /* sanity check that ADDRESS_BITS is correct */
1404
1407
assert (HIGH_BITS (p ) == HIGH_BITS (& arena_map_root ));
1405
1408
int i1 = MAP_TOP_INDEX (p );
1406
1409
if (arena_map_root .ptrs [i1 ] == NULL ) {
@@ -1460,7 +1463,7 @@ arena_map_get(block *p, int create)
1460
1463
static int
1461
1464
arena_map_mark_used (uintptr_t arena_base , int is_used )
1462
1465
{
1463
- /* sanity check that PHYSICAL_BITS is correct */
1466
+ /* sanity check that ADDRESS_BITS is correct */
1464
1467
assert (HIGH_BITS (arena_base ) == HIGH_BITS (& arena_map_root ));
1465
1468
arena_map_bot_t * n_hi = arena_map_get ((block * )arena_base , is_used );
1466
1469
if (n_hi == NULL ) {
0 commit comments