Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 13 additions & 11 deletions base/timing.jl
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,8 @@ end
@static if Base.USING_STOCK_GC
# must be kept in sync with `src/gc-stock.h``
const FULL_SWEEP_REASONS = [:FULL_SWEEP_REASON_SWEEP_ALWAYS_FULL, :FULL_SWEEP_REASON_FORCED_FULL_SWEEP,
:FULL_SWEEP_REASON_USER_MAX_EXCEEDED, :FULL_SWEEP_REASON_LARGE_PROMOTION_RATE]
:FULL_SWEEP_REASON_ALLOCATION_INTERVAL_ABOVE_MAXMEM, :FULL_SWEEP_REASON_LIVE_BYTES_ABOVE_MAX_TOTAL_MEMORY,
:FULL_SWEEP_REASON_LARGE_INTERGEN_FRONTIER]
end

"""
Expand All @@ -124,21 +125,22 @@ Return a dictionary of the number of times each full sweep reason has occurred.
The reasons are:
- `:FULL_SWEEP_REASON_SWEEP_ALWAYS_FULL`: Full sweep was caused due to `always_full` being set in the GC debug environment
- `:FULL_SWEEP_REASON_FORCED_FULL_SWEEP`: Full sweep was forced by `GC.gc(true)`
- `:FULL_SWEEP_REASON_USER_MAX_EXCEEDED`: Full sweep was forced due to the system reaching the heap soft size limit
- `:FULL_SWEEP_REASON_LARGE_PROMOTION_RATE`: Full sweep was forced by a large promotion rate across GC generations
- `:FULL_SWEEP_REASON_ALLOCATION_INTERVAL_ABOVE_MAXMEM`: Full sweep was forced by the allocation interval being above the total
memory in the machine (as returned by LibUV) divided by the number of mutator threads
- `:FULL_SWEEP_REASON_LIVE_BYTES_ABOVE_MAX_TOTAL_MEMORY`: Full sweep was caused due to live bytes being above the
soft heap limit size (which is either automatically computed at initialization based on the total memory provided by LibUV,
or set by the user via `--heap-size-hint`)
- `:FULL_SWEEP_REASON_LARGE_INTERGEN_FRONTIER`: Full sweep was forced by the intergenerational frontier being too large
(i.e. too many pointers in the remembered set)

Note that the set of reasons is not guaranteed to be stable across minor versions of Julia.
"""
function full_sweep_reasons()
reason = cglobal(:jl_full_sweep_reasons, UInt64)
reasons_as_array = Base.unsafe_wrap(Vector{UInt64}, reason, length(FULL_SWEEP_REASONS), own=false)
d = Dict{Symbol, Int64}()
# populate the dictionary according to the reasons above for the stock GC
# otherwise return an empty dictionary for now
@static if Base.USING_STOCK_GC
reason = cglobal(:jl_full_sweep_reasons, UInt64)
reasons_as_array = Base.unsafe_wrap(Vector{UInt64}, reason, length(FULL_SWEEP_REASONS), own=false)
for (i, r) in enumerate(FULL_SWEEP_REASONS)
d[r] = reasons_as_array[i]
end
for (i, r) in enumerate(FULL_SWEEP_REASONS)
d[r] = reasons_as_array[i]
end
return d
end
Expand Down
16 changes: 3 additions & 13 deletions src/gc-debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -1103,25 +1103,15 @@ void gc_count_pool(void)
jl_safe_printf("************************\n");
}

void _report_gc_finished(uint64_t pause, uint64_t freed, int full, int recollect, int64_t live_bytes) JL_NOTSAFEPOINT {
void _report_gc_finished(uint64_t pause, uint64_t freed, int full, int recollect) JL_NOTSAFEPOINT {
if (!gc_logging_enabled) {
return;
}
jl_safe_printf("\nGC: pause %.2fms. collected %fMB. %s %s\n",
pause/1e6, freed/(double)(1<<20),
jl_safe_printf("GC: pause %.2fms. collected %fMB. %s %s\n",
pause/1e6, freed/1e6,
full ? "full" : "incr",
recollect ? "recollect" : ""
);

jl_safe_printf("Heap stats: bytes_mapped %.2f MB, bytes_resident %.2f MB,\nheap_size %.2f MB, heap_target %.2f MB, Fragmentation %.3f\n",
jl_atomic_load_relaxed(&gc_heap_stats.bytes_mapped)/(double)(1<<20),
jl_atomic_load_relaxed(&gc_heap_stats.bytes_resident)/(double)(1<<20),
// live_bytes/(double)(1<<20), live byes tracking is not accurate.
jl_atomic_load_relaxed(&gc_heap_stats.heap_size)/(double)(1<<20),
jl_atomic_load_relaxed(&gc_heap_stats.heap_target)/(double)(1<<20),
(double)live_bytes/(double)jl_atomic_load_relaxed(&gc_heap_stats.heap_size)
);
// Should fragmentation use bytes_resident instead of heap_size?
}

#ifdef __cplusplus
Expand Down
17 changes: 8 additions & 9 deletions src/gc-pages.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,12 @@ JL_DLLEXPORT uint64_t jl_get_pg_size(void)
#define MIN_BLOCK_PG_ALLOC (1) // 16 KB

static int block_pg_cnt = DEFAULT_BLOCK_PG_ALLOC;
static _Atomic(uint64_t) current_pg_count = 0;

// Julia allocates large blocks (64M) with mmap. These are never
// unmapped but the underlying physical memory may be released
// with calls to madvise(MADV_DONTNEED).
static uint64_t poolmem_blocks_allocated = 0;
static uint64_t poolmem_blocks_allocated_total = 0;

JL_DLLEXPORT uint64_t jl_poolmem_blocks_allocated_total(void)
Expand All @@ -40,14 +42,12 @@ JL_DLLEXPORT uint64_t jl_poolmem_blocks_allocated_total(void)

JL_DLLEXPORT uint64_t jl_poolmem_bytes_allocated(void)
{
return jl_atomic_load_relaxed(&gc_heap_stats.bytes_resident);
return poolmem_blocks_allocated;
}

JL_DLLEXPORT uint64_t jl_current_pg_count(void)
{
assert(jl_page_size == GC_PAGE_SZ && "RAI fork of Julia should be running on platforms for which jl_page_size == GC_PAGE_SZ");
size_t nb = jl_atomic_load_relaxed(&gc_heap_stats.bytes_resident);
return nb / GC_PAGE_SZ; // exact division
return (uint64_t)jl_atomic_load(&current_pg_count);
}

void jl_gc_init_page(void)
Expand Down Expand Up @@ -77,6 +77,8 @@ char *jl_gc_try_alloc_pages_(int pg_cnt) JL_NOTSAFEPOINT
MAP_NORESERVE | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mem == MAP_FAILED)
return NULL;
poolmem_blocks_allocated += pages_sz;
poolmem_blocks_allocated_total++;

#ifdef MADV_NOHUGEPAGE
madvise(mem, pages_sz, MADV_NOHUGEPAGE);
Expand All @@ -87,9 +89,6 @@ char *jl_gc_try_alloc_pages_(int pg_cnt) JL_NOTSAFEPOINT
// round data pointer up to the nearest gc_page_data-aligned
// boundary if mmap didn't already do so.
mem = (char*)gc_page_data(mem + GC_PAGE_SZ - 1);
jl_atomic_fetch_add_relaxed(&gc_heap_stats.bytes_mapped, pages_sz);
jl_atomic_fetch_add_relaxed(&gc_heap_stats.bytes_resident, pages_sz);
poolmem_blocks_allocated_total++; // RAI-specific
return mem;
}

Expand Down Expand Up @@ -153,7 +152,6 @@ NOINLINE jl_gc_pagemeta_t *jl_gc_alloc_page(void) JL_NOTSAFEPOINT
// try to get page from `pool_freed`
meta = pop_lf_back(&global_page_pool_freed);
if (meta != NULL) {
jl_atomic_fetch_add_relaxed(&gc_heap_stats.bytes_resident, GC_PAGE_SZ);
gc_alloc_map_set(meta->data, GC_PAGE_ALLOCATED);
goto exit;
}
Expand Down Expand Up @@ -187,6 +185,7 @@ NOINLINE jl_gc_pagemeta_t *jl_gc_alloc_page(void) JL_NOTSAFEPOINT
SetLastError(last_error);
#endif
errno = last_errno;
jl_atomic_fetch_add(&current_pg_count, 1);
return meta;
}

Expand Down Expand Up @@ -227,7 +226,7 @@ void jl_gc_free_page(jl_gc_pagemeta_t *pg) JL_NOTSAFEPOINT
madvise(p, decommit_size, MADV_DONTNEED);
#endif
msan_unpoison(p, decommit_size);
jl_atomic_fetch_add_relaxed(&gc_heap_stats.bytes_resident, -decommit_size);
jl_atomic_fetch_add(&current_pg_count, -1);
}

#ifdef __cplusplus
Expand Down
Loading