23
23
#ifndef UMF_POOL_JEMALLOC_ENABLED
24
24
25
25
umf_memory_pool_ops_t * umfJemallocPoolOps (void ) { return NULL ; }
26
+ umf_result_t
27
+ umfJemallocPoolParamsCreate (umf_jemalloc_pool_params_handle_t * hParams ) {
28
+ (void )hParams ; // unused
29
+ return UMF_RESULT_ERROR_NOT_SUPPORTED ;
30
+ }
31
+
32
+ umf_result_t
33
+ umfJemallocPoolParamsDestroy (umf_jemalloc_pool_params_handle_t hParams ) {
34
+ (void )hParams ; // unused
35
+ return UMF_RESULT_ERROR_NOT_SUPPORTED ;
36
+ }
37
+
38
+ umf_result_t
39
+ umfJemallocPoolParamsSetNumArenas (umf_jemalloc_pool_params_handle_t hParams ,
40
+ size_t numArenas ) {
41
+ (void )hParams ; // unused
42
+ (void )numArenas ; // unused
43
+ return UMF_RESULT_ERROR_NOT_SUPPORTED ;
44
+ }
26
45
27
46
#else
28
47
29
48
#include <jemalloc/jemalloc.h>
30
49
31
50
#define MALLOCX_ARENA_MAX (MALLCTL_ARENAS_ALL - 1)
32
51
52
+ typedef struct umf_jemalloc_pool_params_t {
53
+ size_t n_arenas ;
54
+ } umf_jemalloc_pool_params_t ;
55
+
33
56
typedef struct jemalloc_memory_pool_t {
34
57
umf_memory_provider_handle_t provider ;
35
- unsigned int arena_index ; // index of jemalloc arena
58
+ size_t n_arenas ;
59
+ unsigned int arena_index [];
36
60
} jemalloc_memory_pool_t ;
37
61
38
62
static __TLS umf_result_t TLS_last_allocation_error ;
@@ -47,6 +71,14 @@ static jemalloc_memory_pool_t *get_pool_by_arena_index(unsigned arena_ind) {
47
71
return pool_by_arena_index [arena_ind ];
48
72
}
49
73
74
+ // SplitMix64 hash
75
+ static uint64_t hash64 (uint64_t x ) {
76
+ x += 0x9e3779b97f4a7c15 ;
77
+ x = (x ^ (x >> 30 )) * 0xbf58476d1ce4e5b9 ;
78
+ x = (x ^ (x >> 27 )) * 0x94d049bb133111eb ;
79
+ return x ^ (x >> 31 );
80
+ }
81
+
50
82
// arena_extent_alloc - an extent allocation function conforms to the extent_alloc_t type and upon
51
83
// success returns a pointer to size bytes of mapped memory on behalf of arena arena_ind such that
52
84
// the extent's base address is a multiple of alignment, as well as setting *zero to indicate
@@ -285,12 +317,22 @@ static extent_hooks_t arena_extent_hooks = {
285
317
.merge = arena_extent_merge ,
286
318
};
287
319
320
+ static unsigned get_arena_index (jemalloc_memory_pool_t * pool ) {
321
+ static __TLS unsigned tid = 0 ;
322
+
323
+ if (tid == 0 ) {
324
+ tid = utils_gettid ();
325
+ }
326
+
327
+ return pool -> arena_index [hash64 (tid ) % pool -> n_arenas ];
328
+ }
329
+
288
330
static void * op_malloc (void * pool , size_t size ) {
289
331
assert (pool );
290
332
jemalloc_memory_pool_t * je_pool = (jemalloc_memory_pool_t * )pool ;
291
333
// MALLOCX_TCACHE_NONE is set, because jemalloc can mix objects from different arenas inside
292
334
// the tcache, so we wouldn't be able to guarantee isolation of different providers.
293
- int flags = MALLOCX_ARENA (je_pool -> arena_index ) | MALLOCX_TCACHE_NONE ;
335
+ int flags = MALLOCX_ARENA (get_arena_index ( je_pool ) ) | MALLOCX_TCACHE_NONE ;
294
336
void * ptr = je_mallocx (size , flags );
295
337
if (ptr == NULL ) {
296
338
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
@@ -343,7 +385,7 @@ static void *op_realloc(void *pool, void *ptr, size_t size) {
343
385
jemalloc_memory_pool_t * je_pool = (jemalloc_memory_pool_t * )pool ;
344
386
// MALLOCX_TCACHE_NONE is set, because jemalloc can mix objects from different arenas inside
345
387
// the tcache, so we wouldn't be able to guarantee isolation of different providers.
346
- int flags = MALLOCX_ARENA (je_pool -> arena_index ) | MALLOCX_TCACHE_NONE ;
388
+ int flags = MALLOCX_ARENA (get_arena_index ( je_pool ) ) | MALLOCX_TCACHE_NONE ;
347
389
void * new_ptr = je_rallocx (ptr , size , flags );
348
390
if (new_ptr == NULL ) {
349
391
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
@@ -364,7 +406,8 @@ static void *op_realloc(void *pool, void *ptr, size_t size) {
364
406
static void * op_aligned_alloc (void * pool , size_t size , size_t alignment ) {
365
407
assert (pool );
366
408
jemalloc_memory_pool_t * je_pool = (jemalloc_memory_pool_t * )pool ;
367
- unsigned arena = je_pool -> arena_index ;
409
+
410
+ unsigned arena = get_arena_index (je_pool );
368
411
// MALLOCX_TCACHE_NONE is set, because jemalloc can mix objects from different arenas inside
369
412
// the tcache, so we wouldn't be able to guarantee isolation of different providers.
370
413
int flags =
@@ -382,62 +425,91 @@ static void *op_aligned_alloc(void *pool, size_t size, size_t alignment) {
382
425
383
426
static umf_result_t op_initialize (umf_memory_provider_handle_t provider ,
384
427
void * params , void * * out_pool ) {
385
- (void )params ; // unused
386
428
assert (provider );
387
429
assert (out_pool );
388
430
389
431
extent_hooks_t * pHooks = & arena_extent_hooks ;
390
432
size_t unsigned_size = sizeof (unsigned );
391
433
int err ;
434
+ umf_jemalloc_pool_params_t * jemalloc_params =
435
+ (umf_jemalloc_pool_params_t * )params ;
392
436
393
- jemalloc_memory_pool_t * pool =
394
- umf_ba_global_alloc (sizeof (jemalloc_memory_pool_t ));
395
- if (!pool ) {
396
- return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
437
+ size_t n_arenas = 0 ;
438
+ if (jemalloc_params ) {
439
+ n_arenas = jemalloc_params -> n_arenas ;
397
440
}
398
441
399
- pool -> provider = provider ;
400
-
401
- unsigned arena_index ;
402
- err = je_mallctl ("arenas.create" , (void * )& arena_index , & unsigned_size ,
403
- NULL , 0 );
404
- if (err ) {
405
- LOG_ERR ("Could not create arena." );
406
- goto err_free_pool ;
442
+ if (n_arenas == 0 ) {
443
+ n_arenas = utils_get_num_cores () * 4 ;
407
444
}
408
-
409
- // setup extent_hooks for newly created arena
410
- char cmd [64 ];
411
- snprintf (cmd , sizeof (cmd ), "arena.%u.extent_hooks" , arena_index );
412
- err = je_mallctl (cmd , NULL , NULL , (void * )& pHooks , sizeof (void * ));
413
- if (err ) {
414
- snprintf (cmd , sizeof (cmd ), "arena.%u.destroy" , arena_index );
415
- (void )je_mallctl (cmd , NULL , 0 , NULL , 0 );
416
- LOG_ERR ("Could not setup extent_hooks for newly created arena." );
417
- goto err_free_pool ;
445
+ if (n_arenas > MALLOCX_ARENA_MAX ) {
446
+ LOG_ERR ("Number of arenas exceeds the limit." );
447
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
418
448
}
419
449
420
- pool -> arena_index = arena_index ;
421
- pool_by_arena_index [arena_index ] = pool ;
450
+ jemalloc_memory_pool_t * pool = umf_ba_global_alloc (
451
+ sizeof (* pool ) + n_arenas * sizeof (* pool -> arena_index ));
452
+ if (!pool ) {
453
+ return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
454
+ }
422
455
456
+ pool -> provider = provider ;
457
+ pool -> n_arenas = n_arenas ;
458
+
459
+ size_t num_created = 0 ;
460
+ for (size_t i = 0 ; i < n_arenas ; i ++ ) {
461
+ unsigned arena_index ;
462
+ err = je_mallctl ("arenas.create" , (void * )& arena_index , & unsigned_size ,
463
+ NULL , 0 );
464
+ if (err ) {
465
+ LOG_ERR ("Could not create arena." );
466
+ goto err_cleanup ;
467
+ }
468
+
469
+ pool -> arena_index [num_created ++ ] = arena_index ;
470
+ if (arena_index >= MALLOCX_ARENA_MAX ) {
471
+ LOG_ERR ("Number of arenas exceeds the limit." );
472
+ goto err_cleanup ;
473
+ }
474
+
475
+ pool_by_arena_index [arena_index ] = pool ;
476
+
477
+ // Setup extent_hooks for the newly created arena.
478
+ char cmd [64 ];
479
+ snprintf (cmd , sizeof (cmd ), "arena.%u.extent_hooks" , arena_index );
480
+ err = je_mallctl (cmd , NULL , NULL , (void * )& pHooks , sizeof (void * ));
481
+ if (err ) {
482
+ LOG_ERR ("Could not setup extent_hooks for newly created arena." );
483
+ goto err_cleanup ;
484
+ }
485
+ }
423
486
* out_pool = (umf_memory_pool_handle_t )pool ;
424
487
425
488
VALGRIND_DO_CREATE_MEMPOOL (pool , 0 , 0 );
426
489
427
490
return UMF_RESULT_SUCCESS ;
428
491
429
- err_free_pool :
492
+ err_cleanup :
493
+ // Destroy any arenas that were successfully created.
494
+ for (size_t i = 0 ; i < num_created ; i ++ ) {
495
+ char cmd [64 ];
496
+ unsigned arena = pool -> arena_index [i ];
497
+ snprintf (cmd , sizeof (cmd ), "arena.%u.destroy" , arena );
498
+ (void )je_mallctl (cmd , NULL , 0 , NULL , 0 );
499
+ }
430
500
umf_ba_global_free (pool );
431
501
return UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC ;
432
502
}
433
503
434
504
static void op_finalize (void * pool ) {
435
505
assert (pool );
436
506
jemalloc_memory_pool_t * je_pool = (jemalloc_memory_pool_t * )pool ;
437
- char cmd [64 ];
438
- snprintf (cmd , sizeof (cmd ), "arena.%u.destroy" , je_pool -> arena_index );
439
- (void )je_mallctl (cmd , NULL , 0 , NULL , 0 );
440
- pool_by_arena_index [je_pool -> arena_index ] = NULL ;
507
+ for (size_t i = 0 ; i < je_pool -> n_arenas ; i ++ ) {
508
+ char cmd [64 ];
509
+ unsigned arena = je_pool -> arena_index [i ];
510
+ snprintf (cmd , sizeof (cmd ), "arena.%u.destroy" , arena );
511
+ (void )je_mallctl (cmd , NULL , 0 , NULL , 0 );
512
+ }
441
513
umf_ba_global_free (je_pool );
442
514
443
515
VALGRIND_DO_DESTROY_MEMPOOL (pool );
@@ -469,4 +541,33 @@ static umf_memory_pool_ops_t UMF_JEMALLOC_POOL_OPS = {
469
541
umf_memory_pool_ops_t * umfJemallocPoolOps (void ) {
470
542
return & UMF_JEMALLOC_POOL_OPS ;
471
543
}
544
+
545
+ umf_result_t
546
+ umfJemallocPoolParamsCreate (umf_jemalloc_pool_params_handle_t * hParams ) {
547
+ umf_jemalloc_pool_params_t * params = umf_ba_global_alloc (sizeof (* params ));
548
+ if (!params ) {
549
+ return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
550
+ }
551
+ memset (params , 0 , sizeof (* params ));
552
+ * hParams = params ;
553
+ return UMF_RESULT_SUCCESS ;
554
+ }
555
+
556
+ umf_result_t
557
+ umfJemallocPoolParamsDestroy (umf_jemalloc_pool_params_handle_t hParams ) {
558
+ umf_ba_global_free (hParams );
559
+ return UMF_RESULT_SUCCESS ;
560
+ }
561
+
562
+ umf_result_t
563
+ umfJemallocPoolParamsSetNumArenas (umf_jemalloc_pool_params_handle_t hParams ,
564
+ size_t numArenas ) {
565
+ if (!hParams ) {
566
+ LOG_ERR ("jemalloc pool params handle is NULL" );
567
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
568
+ }
569
+ hParams -> n_arenas = numArenas ;
570
+ return UMF_RESULT_SUCCESS ;
571
+ }
572
+
472
573
#endif /* UMF_POOL_JEMALLOC_ENABLED */
0 commit comments