@@ -849,30 +849,6 @@ static int running_on_valgrind = -1;
849
849
850
850
/*==========================================================================*/
851
851
852
- /*
853
- * Locking
854
- *
855
- * To reduce lock contention, it would probably be better to refine the
856
- * crude function locking with per size class locking. I'm not positive
857
- * however, whether it's worth switching to such locking policy because
858
- * of the performance penalty it might introduce.
859
- *
860
- * The following macros describe the simplest (should also be the fastest)
861
- * lock object on a particular platform and the init/fini/lock/unlock
862
- * operations on it. The locks defined here are not expected to be recursive
863
- * because it is assumed that they will always be called in the order:
864
- * INIT, [LOCK, UNLOCK]*, FINI.
865
- */
866
-
867
- /*
868
- * Python's threads are serialized, so object malloc locking is disabled.
869
- */
870
- #define SIMPLELOCK_DECL (lock ) /* simple lock declaration */
871
- #define SIMPLELOCK_INIT (lock ) /* allocate (if needed) and initialize */
872
- #define SIMPLELOCK_FINI (lock ) /* free/destroy an existing lock */
873
- #define SIMPLELOCK_LOCK (lock ) /* acquire released lock */
874
- #define SIMPLELOCK_UNLOCK (lock ) /* release acquired lock */
875
-
876
852
/* When you say memory, my mind reasons in terms of (pointers to) blocks */
877
853
typedef uint8_t block ;
878
854
@@ -944,15 +920,6 @@ struct arena_object {
944
920
945
921
/*==========================================================================*/
946
922
947
- /*
948
- * This malloc lock
949
- */
950
- SIMPLELOCK_DECL (_malloc_lock )
951
- #define LOCK () SIMPLELOCK_LOCK(_malloc_lock)
952
- #define UNLOCK () SIMPLELOCK_UNLOCK(_malloc_lock)
953
- #define LOCK_INIT () SIMPLELOCK_INIT(_malloc_lock)
954
- #define LOCK_FINI () SIMPLELOCK_FINI(_malloc_lock)
955
-
956
923
/*
957
924
* Pool table -- headed, circular, doubly-linked lists of partially used pools.
958
925
@@ -1381,7 +1348,6 @@ pymalloc_alloc(void *ctx, void **ptr_p, size_t nbytes)
1381
1348
return 0 ;
1382
1349
}
1383
1350
1384
- LOCK ();
1385
1351
/*
1386
1352
* Most frequent paths first
1387
1353
*/
@@ -1537,13 +1503,11 @@ pymalloc_alloc(void *ctx, void **ptr_p, size_t nbytes)
1537
1503
goto init_pool ;
1538
1504
1539
1505
success :
1540
- UNLOCK ();
1541
1506
assert (bp != NULL );
1542
1507
* ptr_p = (void * )bp ;
1543
1508
return 1 ;
1544
1509
1545
1510
failed :
1546
- UNLOCK ();
1547
1511
return 0 ;
1548
1512
}
1549
1513
@@ -1612,8 +1576,6 @@ pymalloc_free(void *ctx, void *p)
1612
1576
}
1613
1577
/* We allocated this address. */
1614
1578
1615
- LOCK ();
1616
-
1617
1579
/* Link p to the start of the pool's freeblock list. Since
1618
1580
* the pool had at least the p block outstanding, the pool
1619
1581
* wasn't empty (so it's already in a usedpools[] list, or
@@ -1798,7 +1760,6 @@ pymalloc_free(void *ctx, void *p)
1798
1760
goto success ;
1799
1761
1800
1762
success :
1801
- UNLOCK ();
1802
1763
return 1 ;
1803
1764
}
1804
1765
0 commit comments