diff --git a/compat/mimalloc/alloc.c b/compat/mimalloc/alloc.c index b6cdca1882ecb7..c0ada97c90daf8 100644 --- a/compat/mimalloc/alloc.c +++ b/compat/mimalloc/alloc.c @@ -59,7 +59,6 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_ // zero the block? note: we need to zero the full block size (issue #63) if mi_unlikely(zero) { mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic) - mi_assert_internal(!mi_page_is_huge(page)); #if MI_PADDING mi_assert_internal(page->block_size >= MI_PADDING_SIZE); #endif @@ -528,7 +527,7 @@ static std_new_handler_t mi_get_new_handler(void) { } #else // note: on windows we could dynamically link to `?get_new_handler@std@@YAP6AXXZXZ`. -static std_new_handler_t mi_get_new_handler() { +static std_new_handler_t mi_get_new_handler(void) { return NULL; } #endif diff --git a/compat/mimalloc/mimalloc.h b/compat/mimalloc/mimalloc.h index fcd19cc9bc2e15..6a926d1c802a65 100644 --- a/compat/mimalloc/mimalloc.h +++ b/compat/mimalloc/mimalloc.h @@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file #ifndef MIMALLOC_H #define MIMALLOC_H -#define MI_MALLOC_VERSION 226 // major + 2 digits minor +#define MI_MALLOC_VERSION 227 // major + 2 digits minor // ------------------------------------------------------ // Compiler specific attributes diff --git a/compat/mimalloc/mimalloc/internal.h b/compat/mimalloc/mimalloc/internal.h index 6845c9b5df3faf..e78d0fc06f2d14 100644 --- a/compat/mimalloc/mimalloc/internal.h +++ b/compat/mimalloc/mimalloc/internal.h @@ -1093,7 +1093,7 @@ static inline size_t mi_popcount(size_t x) { extern mi_decl_hidden bool _mi_cpu_has_fsrm; extern mi_decl_hidden bool _mi_cpu_has_erms; static inline void _mi_memcpy(void* dst, const void* src, size_t n) { - if ((_mi_cpu_has_fsrm && n <= 128) || (_mi_cpu_has_erms && n > 128)) { + if (_mi_cpu_has_fsrm && n <= 127) { // || (_mi_cpu_has_erms && n > 128)) { __movsb((unsigned char*)dst, (const unsigned char*)src, n); } else { @@ -1101,7 +1101,7 @@ static inline void _mi_memcpy(void* dst, const void* src, size_t n) { } } static inline void _mi_memzero(void* dst, size_t n) { - if ((_mi_cpu_has_fsrm && n <= 128) || (_mi_cpu_has_erms && n > 128)) { + if (_mi_cpu_has_fsrm && n <= 127) { // || (_mi_cpu_has_erms && n > 128)) { __stosb((unsigned char*)dst, 0, n); } else { diff --git a/compat/mimalloc/mimalloc/types.h b/compat/mimalloc/mimalloc/types.h index f52d37a82b19b6..e778e8788908fc 100644 --- a/compat/mimalloc/mimalloc/types.h +++ b/compat/mimalloc/mimalloc/types.h @@ -480,6 +480,7 @@ typedef struct mi_segment_s { struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`) bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation) bool dont_free; // can be temporarily true to ensure the segment is not freed + bool free_is_zero; // if free spans are zero size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`) size_t abandoned_visits; // count how often this segment is visited during abondoned reclamation (to force reclaim if it takes too long) diff --git a/compat/mimalloc/page.c b/compat/mimalloc/page.c index 34dae9f5e473cb..aeea9eeaa85e0c 100644 --- a/compat/mimalloc/page.c +++ b/compat/mimalloc/page.c @@ -1031,17 +1031,9 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al mi_assert_internal(mi_page_block_size(page) >= size); // and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc) - void* p; - if mi_unlikely(zero && mi_page_is_huge(page)) { - // note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case. - p = _mi_page_malloc_zero(heap, page, size, false, usable); - mi_assert_internal(p != NULL); - _mi_memzero_aligned(p, mi_page_usable_block_size(page)); - } - else { - p = _mi_page_malloc_zero(heap, page, size, zero, usable); - mi_assert_internal(p != NULL); - } + void* const p = _mi_page_malloc_zero(heap, page, size, zero, usable); + mi_assert_internal(p != NULL); + // move singleton pages to the full queue if (page->reserved == page->used) { mi_page_to_full(page, mi_page_queue_of(page)); diff --git a/compat/mimalloc/segment.c b/compat/mimalloc/segment.c index 6f398822dfb421..f440dc01a1db40 100644 --- a/compat/mimalloc/segment.c +++ b/compat/mimalloc/segment.c @@ -773,6 +773,7 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i // and initialize the page page->is_committed = true; + page->is_zero_init = segment->free_is_zero; page->is_huge = (segment->kind == MI_SEGMENT_HUGE); segment->used++; return page; @@ -882,6 +883,7 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment segment->subproc = tld->subproc; segment->commit_mask = commit_mask; segment->purge_expire = 0; + segment->free_is_zero = memid.initially_zero; mi_commit_mask_create_empty(&segment->purge_mask); mi_segments_track_size((long)(segment_size), tld); @@ -1024,7 +1026,7 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld _mi_stat_decrease(&tld->stats->page_committed, inuse); _mi_stat_decrease(&tld->stats->pages, 1); _mi_stat_decrease(&tld->stats->page_bins[_mi_page_stats_bin(page)], 1); - + // reset the page memory to reduce memory pressure? if (segment->allow_decommit && mi_option_is_enabled(mi_option_deprecated_page_reset)) { size_t psize; @@ -1043,6 +1045,8 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld // and free it mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld); segment->used--; + segment->free_is_zero = false; + // cannot assert segment valid as it is called during reclaim // mi_assert_expensive(mi_segment_is_valid(segment, tld)); return slice;