14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
17#include "ruby/internal/config.h"
24#define sighandler_t ruby_sighandler_t
31#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
32# include "wasm/setjmp.h"
33# include "wasm/machine.h"
41#ifndef HAVE_MALLOC_USABLE_SIZE
43# define HAVE_MALLOC_USABLE_SIZE
44# define malloc_usable_size(a) _msize(a)
45# elif defined HAVE_MALLOC_SIZE
46# define HAVE_MALLOC_USABLE_SIZE
47# define malloc_usable_size(a) malloc_size(a)
51#ifdef HAVE_MALLOC_USABLE_SIZE
52# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
54# elif defined(HAVE_MALLOC_H)
56# elif defined(HAVE_MALLOC_NP_H)
57# include <malloc_np.h>
58# elif defined(HAVE_MALLOC_MALLOC_H)
59# include <malloc/malloc.h>
63#if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
73#ifdef HAVE_SYS_RESOURCE_H
74# include <sys/resource.h>
77#if defined _WIN32 || defined __CYGWIN__
79#elif defined(HAVE_POSIX_MEMALIGN)
80#elif defined(HAVE_MEMALIGN)
87#include <emscripten.h>
90#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
91# include <mach/task.h>
92# include <mach/mach_init.h>
93# include <mach/mach_port.h>
98#include "debug_counter.h"
99#include "eval_intern.h"
103#include "internal/class.h"
104#include "internal/complex.h"
105#include "internal/cont.h"
106#include "internal/error.h"
107#include "internal/eval.h"
108#include "internal/gc.h"
109#include "internal/hash.h"
110#include "internal/imemo.h"
111#include "internal/io.h"
112#include "internal/numeric.h"
113#include "internal/object.h"
114#include "internal/proc.h"
115#include "internal/rational.h"
116#include "internal/sanitizers.h"
117#include "internal/struct.h"
118#include "internal/symbol.h"
119#include "internal/thread.h"
120#include "internal/variable.h"
121#include "internal/warnings.h"
131#include "ruby_assert.h"
132#include "ruby_atomic.h"
134#include "transient_heap.h"
137#include "vm_callinfo.h"
138#include "ractor_core.h"
143#define rb_setjmp(env) RUBY_SETJMP(env)
144#define rb_jmp_buf rb_jmpbuf_t
145#undef rb_data_object_wrap
147#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
148#define MAP_ANONYMOUS MAP_ANON
151static inline struct rbimpl_size_mul_overflow_tag
152size_add_overflow(size_t x, size_t y)
158#elif __has_builtin(__builtin_add_overflow)
159 p = __builtin_add_overflow(x, y, &z);
161#elif defined(DSIZE_T)
173 return (
struct rbimpl_size_mul_overflow_tag) { p, z, };
176static inline struct rbimpl_size_mul_overflow_tag
177size_mul_add_overflow(size_t x, size_t y, size_t z)
179 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
180 struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
181 return (
struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
184static inline struct rbimpl_size_mul_overflow_tag
185size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w)
187 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
188 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
189 struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
190 return (
struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
193PRINTF_ARGS(NORETURN(
static void gc_raise(
VALUE,
const char*, ...)), 2, 3);
196size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
198 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
199 if (LIKELY(!t.left)) {
202 else if (rb_during_gc()) {
208 "integer overflow: %"PRIuSIZE
211 x, y, (
size_t)SIZE_MAX);
216rb_size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
218 return size_mul_or_raise(x, y, exc);
222size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
224 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
225 if (LIKELY(!t.left)) {
228 else if (rb_during_gc()) {
234 "integer overflow: %"PRIuSIZE
238 x, y, z, (
size_t)SIZE_MAX);
243rb_size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
245 return size_mul_add_or_raise(x, y, z, exc);
249size_mul_add_mul_or_raise(
size_t x,
size_t y,
size_t z,
size_t w,
VALUE exc)
251 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
252 if (LIKELY(!t.left)) {
255 else if (rb_during_gc()) {
261 "integer overflow: %"PRIdSIZE
266 x, y, z, w, (
size_t)SIZE_MAX);
270#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
272volatile VALUE rb_gc_guarded_val;
274rb_gc_guarded_ptr_val(
volatile VALUE *ptr,
VALUE val)
276 rb_gc_guarded_val = val;
282#ifndef GC_HEAP_INIT_SLOTS
283#define GC_HEAP_INIT_SLOTS 10000
285#ifndef GC_HEAP_FREE_SLOTS
286#define GC_HEAP_FREE_SLOTS 4096
288#ifndef GC_HEAP_GROWTH_FACTOR
289#define GC_HEAP_GROWTH_FACTOR 1.8
291#ifndef GC_HEAP_GROWTH_MAX_SLOTS
292#define GC_HEAP_GROWTH_MAX_SLOTS 0
294#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
295#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
298#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
299#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
301#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
302#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
304#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
305#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
308#ifndef GC_MALLOC_LIMIT_MIN
309#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
311#ifndef GC_MALLOC_LIMIT_MAX
312#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
314#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
315#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
318#ifndef GC_OLDMALLOC_LIMIT_MIN
319#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
321#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
322#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
324#ifndef GC_OLDMALLOC_LIMIT_MAX
325#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
328#ifndef PRINT_MEASURE_LINE
329#define PRINT_MEASURE_LINE 0
331#ifndef PRINT_ENTER_EXIT_TICK
332#define PRINT_ENTER_EXIT_TICK 0
334#ifndef PRINT_ROOT_TICKS
335#define PRINT_ROOT_TICKS 0
338#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
342 size_t heap_init_slots;
343 size_t heap_free_slots;
344 double growth_factor;
345 size_t growth_max_slots;
347 double heap_free_slots_min_ratio;
348 double heap_free_slots_goal_ratio;
349 double heap_free_slots_max_ratio;
350 double oldobject_limit_factor;
352 size_t malloc_limit_min;
353 size_t malloc_limit_max;
354 double malloc_limit_growth_factor;
356 size_t oldmalloc_limit_min;
357 size_t oldmalloc_limit_max;
358 double oldmalloc_limit_growth_factor;
366 GC_HEAP_GROWTH_FACTOR,
367 GC_HEAP_GROWTH_MAX_SLOTS,
369 GC_HEAP_FREE_SLOTS_MIN_RATIO,
370 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
371 GC_HEAP_FREE_SLOTS_MAX_RATIO,
372 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
376 GC_MALLOC_LIMIT_GROWTH_FACTOR,
378 GC_OLDMALLOC_LIMIT_MIN,
379 GC_OLDMALLOC_LIMIT_MAX,
380 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
401#define RGENGC_DEBUG -1
403#define RGENGC_DEBUG 0
406#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
407# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
408#elif defined(HAVE_VA_ARGS_MACRO)
409# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
411# define RGENGC_DEBUG_ENABLED(level) 0
413int ruby_rgengc_debug;
423#ifndef RGENGC_CHECK_MODE
424#define RGENGC_CHECK_MODE 0
428#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
436#ifndef RGENGC_OLD_NEWOBJ_CHECK
437#define RGENGC_OLD_NEWOBJ_CHECK 0
445#ifndef RGENGC_PROFILE
446#define RGENGC_PROFILE 0
455#ifndef RGENGC_ESTIMATE_OLDMALLOC
456#define RGENGC_ESTIMATE_OLDMALLOC 1
462#ifndef RGENGC_FORCE_MAJOR_GC
463#define RGENGC_FORCE_MAJOR_GC 0
466#ifndef GC_PROFILE_MORE_DETAIL
467#define GC_PROFILE_MORE_DETAIL 0
469#ifndef GC_PROFILE_DETAIL_MEMORY
470#define GC_PROFILE_DETAIL_MEMORY 0
472#ifndef GC_ENABLE_INCREMENTAL_MARK
473#define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
475#ifndef GC_ENABLE_LAZY_SWEEP
476#define GC_ENABLE_LAZY_SWEEP 1
478#ifndef CALC_EXACT_MALLOC_SIZE
479#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
481#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
482#ifndef MALLOC_ALLOCATED_SIZE
483#define MALLOC_ALLOCATED_SIZE 0
486#define MALLOC_ALLOCATED_SIZE 0
488#ifndef MALLOC_ALLOCATED_SIZE_CHECK
489#define MALLOC_ALLOCATED_SIZE_CHECK 0
492#ifndef GC_DEBUG_STRESS_TO_CLASS
493#define GC_DEBUG_STRESS_TO_CLASS 0
496#ifndef RGENGC_OBJ_INFO
497#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
501 GPR_FLAG_NONE = 0x000,
503 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
504 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
505 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
506 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
507#if RGENGC_ESTIMATE_OLDMALLOC
508 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
510 GPR_FLAG_MAJOR_MASK = 0x0ff,
513 GPR_FLAG_NEWOBJ = 0x100,
514 GPR_FLAG_MALLOC = 0x200,
515 GPR_FLAG_METHOD = 0x400,
516 GPR_FLAG_CAPI = 0x800,
517 GPR_FLAG_STRESS = 0x1000,
520 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
521 GPR_FLAG_HAVE_FINALIZE = 0x4000,
522 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
523 GPR_FLAG_FULL_MARK = 0x10000,
524 GPR_FLAG_COMPACT = 0x20000,
527 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
528 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
529} gc_profile_record_flag;
535 double gc_invoke_time;
537 size_t heap_total_objects;
538 size_t heap_use_size;
539 size_t heap_total_size;
540 size_t moved_objects;
542#if GC_PROFILE_MORE_DETAIL
544 double gc_sweep_time;
546 size_t heap_use_pages;
547 size_t heap_live_objects;
548 size_t heap_free_objects;
550 size_t allocate_increase;
551 size_t allocate_limit;
554 size_t removing_objects;
555 size_t empty_objects;
556#if GC_PROFILE_DETAIL_MEMORY
562#if MALLOC_ALLOCATED_SIZE
563 size_t allocated_size;
566#if RGENGC_PROFILE > 0
568 size_t remembered_normal_objects;
569 size_t remembered_shady_objects;
577 shape_id_t original_shape_id;
580#define RMOVED(obj) ((struct RMoved *)(obj))
631 uint32_t _ractor_belonging_id;
640# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, _ractor_belonging_id))
642# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, file))
644# define RVALUE_OVERHEAD 0
650typedef uintptr_t bits_t;
652 BITS_SIZE =
sizeof(bits_t),
653 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
655#define popcount_bits rb_popcount_intptr
672#define STACK_CHUNK_SIZE 500
675 VALUE data[STACK_CHUNK_SIZE];
685 size_t unused_cache_size;
688#define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
689#define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
693 struct ccan_list_head pages;
696 uintptr_t compact_cursor_index;
697#if GC_ENABLE_INCREMENTAL_MARK
707 size_t allocatable_pages;
710 size_t total_allocated_pages;
711 size_t total_freed_pages;
712 size_t force_major_gc_count;
735#if MALLOC_ALLOCATED_SIZE
736 size_t allocated_size;
743 unsigned int mode : 2;
744 unsigned int immediate_sweep : 1;
745 unsigned int dont_gc : 1;
746 unsigned int dont_incremental : 1;
747 unsigned int during_gc : 1;
748 unsigned int during_compacting : 1;
749 unsigned int gc_stressful: 1;
750 unsigned int has_hook: 1;
751 unsigned int during_minor_gc : 1;
752#if GC_ENABLE_INCREMENTAL_MARK
753 unsigned int during_incremental_marking : 1;
755 unsigned int measure_gc : 1;
759 size_t total_allocated_objects;
760 VALUE next_object_id;
773 size_t allocated_pages;
774 size_t allocatable_pages;
775 size_t sorted_length;
777 size_t freeable_pages;
781 VALUE deferred_final;
788 unsigned int latest_gc_info;
794#if GC_PROFILE_MORE_DETAIL
799 size_t minor_gc_count;
800 size_t major_gc_count;
801 size_t compact_count;
802 size_t read_barrier_faults;
803#if RGENGC_PROFILE > 0
804 size_t total_generated_normal_object_count;
805 size_t total_generated_shady_object_count;
806 size_t total_shade_operation_count;
807 size_t total_promoted_count;
808 size_t total_remembered_normal_object_count;
809 size_t total_remembered_shady_object_count;
811#if RGENGC_PROFILE >= 2
812 size_t generated_normal_object_count_types[
RUBY_T_MASK];
813 size_t generated_shady_object_count_types[
RUBY_T_MASK];
816 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
817 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
822 double gc_sweep_start_time;
823 size_t total_allocated_objects_at_gc_start;
824 size_t heap_used_at_gc_start;
828 size_t total_freed_objects;
829 uint64_t total_time_ns;
834 VALUE gc_stress_mode;
839 size_t last_major_gc;
840 size_t uncollectible_wb_unprotected_objects;
841 size_t uncollectible_wb_unprotected_objects_limit;
843 size_t old_objects_limit;
845#if RGENGC_ESTIMATE_OLDMALLOC
846 size_t oldmalloc_increase;
847 size_t oldmalloc_increase_limit;
850#if RGENGC_CHECK_MODE >= 2
857 size_t considered_count_table[
T_MASK];
858 size_t moved_count_table[
T_MASK];
859 size_t moved_up_count_table[
T_MASK];
860 size_t moved_down_count_table[
T_MASK];
864#if GC_ENABLE_INCREMENTAL_MARK
874#if GC_DEBUG_STRESS_TO_CLASS
875 VALUE stress_to_class;
880#ifndef HEAP_PAGE_ALIGN_LOG
882#define HEAP_PAGE_ALIGN_LOG 16
885#define BASE_SLOT_SIZE sizeof(RVALUE)
887#define CEILDIV(i, mod) roomof(i, mod)
889 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
890 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
891 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
892 HEAP_PAGE_OBJ_LIMIT = (
unsigned int)((HEAP_PAGE_SIZE -
sizeof(
struct heap_page_header)) / BASE_SLOT_SIZE),
893 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, BASE_SLOT_SIZE), BITS_BITLENGTH),
894 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
896#define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
897#define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
899#if GC_ENABLE_INCREMENTAL_MARK && !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
900# define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
903#undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
909static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
911#elif defined(__wasm__)
915static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
917#elif HAVE_CONST_PAGE_SIZE
919static const bool HEAP_PAGE_ALLOC_USE_MMAP = (PAGE_SIZE <= HEAP_PAGE_SIZE);
921#elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
923static const bool HEAP_PAGE_ALLOC_USE_MMAP =
true;
925#elif defined(PAGE_SIZE)
927# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
929#elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
931# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
935static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
938#ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
940# define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
942static bool heap_page_alloc_use_mmap;
951 unsigned int before_sweep : 1;
952 unsigned int has_remembered_objects : 1;
953 unsigned int has_uncollectible_shady_objects : 1;
954 unsigned int in_tomb : 1;
962 struct ccan_list_node page_node;
964 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
966 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
967 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
968 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
971 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
978asan_lock_freelist(
struct heap_page *page)
980 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
987asan_unlock_freelist(
struct heap_page *page)
989 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
992#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
993#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
994#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
996#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / BASE_SLOT_SIZE)
997#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
998#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
999#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
1002#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
1003#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
1004#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
1007#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
1008#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
1009#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
1010#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
1011#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
1013#define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
1016#define rb_objspace (*rb_objspace_of(GET_VM()))
1017#define rb_objspace_of(vm) ((vm)->objspace)
1019#define ruby_initial_gc_stress gc_params.gc_stress
1021VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
1023#define malloc_limit objspace->malloc_params.limit
1024#define malloc_increase objspace->malloc_params.increase
1025#define malloc_allocated_size objspace->malloc_params.allocated_size
1026#define heap_pages_sorted objspace->heap_pages.sorted
1027#define heap_allocated_pages objspace->heap_pages.allocated_pages
1028#define heap_pages_sorted_length objspace->heap_pages.sorted_length
1029#define heap_pages_lomem objspace->heap_pages.range[0]
1030#define heap_pages_himem objspace->heap_pages.range[1]
1031#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
1032#define heap_pages_final_slots objspace->heap_pages.final_slots
1033#define heap_pages_deferred_final objspace->heap_pages.deferred_final
1034#define size_pools objspace->size_pools
1035#define during_gc objspace->flags.during_gc
1036#define finalizing objspace->atomic_flags.finalizing
1037#define finalizer_table objspace->finalizer_table
1038#define global_list objspace->global_list
1039#define ruby_gc_stressful objspace->flags.gc_stressful
1040#define ruby_gc_stress_mode objspace->gc_stress_mode
1041#if GC_DEBUG_STRESS_TO_CLASS
1042#define stress_to_class objspace->stress_to_class
1044#define stress_to_class 0
1048#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
1049#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
1050#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
1051#define dont_gc_val() (objspace->flags.dont_gc)
1053#define dont_gc_on() (objspace->flags.dont_gc = 1)
1054#define dont_gc_off() (objspace->flags.dont_gc = 0)
1055#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
1056#define dont_gc_val() (objspace->flags.dont_gc)
1059static inline enum gc_mode
1060gc_mode_verify(
enum gc_mode mode)
1062#if RGENGC_CHECK_MODE > 0
1065 case gc_mode_marking:
1066 case gc_mode_sweeping:
1067 case gc_mode_compacting:
1070 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
1079 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1080 if (SIZE_POOL_EDEN_HEAP(&size_pools[i])->sweeping_page) {
1091 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1092 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_pages;
1101 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1102 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_slots;
1111 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1112 count += SIZE_POOL_TOMB_HEAP(&size_pools[i])->total_pages;
1121 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1122 count += size_pools[i].allocatable_pages;
1131 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1133 int slot_size_multiple = size_pool->slot_size / BASE_SLOT_SIZE;
1134 count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple;
1143 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1145 count += size_pool->total_allocated_pages;
1154 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1156 count += size_pool->total_freed_pages;
1161#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1162#define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
1164#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1165#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1166#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1167#if GC_ENABLE_INCREMENTAL_MARK
1168#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1170#define is_incremental_marking(objspace) FALSE
1172#if GC_ENABLE_INCREMENTAL_MARK
1173#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1175#define will_be_incremental_marking(objspace) FALSE
1177#if GC_ENABLE_INCREMENTAL_MARK
1178#define GC_INCREMENTAL_SWEEP_SLOT_COUNT 2048
1180#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1182#if SIZEOF_LONG == SIZEOF_VOIDP
1183# define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
1184# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
1185#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1186# define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
1187# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1188 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1190# error not supported
1193#define RANY(o) ((RVALUE*)(o))
1198 void (*dfree)(
void *);
1202#define RZOMBIE(o) ((struct RZombie *)(o))
1204#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1206#if RUBY_MARK_FREE_DEBUG
1207int ruby_gc_debug_indent = 0;
1210int ruby_disable_gc = 0;
1211int ruby_enable_autocompact = 0;
1213void rb_iseq_mark(
const rb_iseq_t *iseq);
1214void rb_iseq_update_references(
rb_iseq_t *iseq);
1215void rb_iseq_free(
const rb_iseq_t *iseq);
1216size_t rb_iseq_memsize(
const rb_iseq_t *iseq);
1217void rb_vm_update_references(
void *ptr);
1219void rb_gcdebug_print_obj_condition(
VALUE obj);
1223NORETURN(
static void *gc_vraise(
void *ptr));
1224NORETURN(
static void gc_raise(
VALUE exc,
const char *fmt, ...));
1225NORETURN(
static void negative_size_allocation_error(
const char *));
1231static int garbage_collect(
rb_objspace_t *,
unsigned int reason);
1233static int gc_start(
rb_objspace_t *objspace,
unsigned int reason);
1236enum gc_enter_event {
1237 gc_enter_event_start,
1238 gc_enter_event_mark_continue,
1239 gc_enter_event_sweep_continue,
1240 gc_enter_event_rest,
1241 gc_enter_event_finalizer,
1242 gc_enter_event_rb_memerror,
1245static inline void gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1246static inline void gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1248static void gc_marks(
rb_objspace_t *objspace,
int full_mark);
1249static void gc_marks_start(
rb_objspace_t *objspace,
int full);
1271static int gc_mark_stacked_objects_incremental(
rb_objspace_t *,
size_t count);
1276NO_SANITIZE(
"memory",
static inline int is_pointer_to_heap(
rb_objspace_t *objspace,
void *ptr));
1281static void shrink_stack_chunk_cache(
mark_stack_t *stack);
1283static size_t obj_memsize_of(
VALUE obj,
int use_all_types);
1284static void gc_verify_internal_consistency(
rb_objspace_t *objspace);
1291static double getrusage_time(
void);
1292static inline void gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason);
1295static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
1297static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
1298static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
1302#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1303 if (gc_object_moved_p((_objspace), (VALUE)(_thing))) { \
1304 *(_type *)&(_thing) = (_type)RMOVED(_thing)->destination; \
1308#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1310#define gc_prof_record(objspace) (objspace)->profile.current_record
1311#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1313#ifdef HAVE_VA_ARGS_MACRO
1314# define gc_report(level, objspace, ...) \
1315 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1317# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1319PRINTF_ARGS(
static void gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...), 3, 4);
1320static const char *obj_info(
VALUE obj);
1321static const char *obj_type_name(
VALUE obj);
1341#if defined(__GNUC__) && defined(__i386__)
1342typedef unsigned long long tick_t;
1343#define PRItick "llu"
1347 unsigned long long int x;
1348 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
1352#elif defined(__GNUC__) && defined(__x86_64__)
1353typedef unsigned long long tick_t;
1354#define PRItick "llu"
1356static __inline__ tick_t
1359 unsigned long hi, lo;
1360 __asm__ __volatile__ (
"rdtsc" :
"=a"(lo),
"=d"(hi));
1361 return ((
unsigned long long)lo)|( ((
unsigned long long)hi)<<32);
1364#elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1365typedef unsigned long long tick_t;
1366#define PRItick "llu"
1368static __inline__ tick_t
1371 unsigned long long val = __builtin_ppc_get_timebase();
1378#elif defined(__POWERPC__) && defined(__APPLE__)
1379typedef unsigned long long tick_t;
1380#define PRItick "llu"
1382static __inline__ tick_t
1385 unsigned long int upper, lower, tmp;
1386 # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1387 # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1392 }
while (tmp != upper);
1393 return ((tick_t)upper << 32) | lower;
1396#elif defined(__aarch64__) && defined(__GNUC__)
1397typedef unsigned long tick_t;
1400static __inline__ tick_t
1404 __asm__ __volatile__ (
"mrs %0, cntvct_el0" :
"=r" (val));
1409#elif defined(_WIN32) && defined(_MSC_VER)
1411typedef unsigned __int64 tick_t;
1412#define PRItick "llu"
1421typedef clock_t tick_t;
1422#define PRItick "llu"
1432typedef double tick_t;
1433#define PRItick "4.9f"
1438 return getrusage_time();
1441#error "choose tick type"
1444#define MEASURE_LINE(expr) do { \
1445 volatile tick_t start_time = tick(); \
1446 volatile tick_t end_time; \
1448 end_time = tick(); \
1449 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1453#define MEASURE_LINE(expr) expr
1457asan_unpoison_object_temporary(
VALUE obj)
1459 void *ptr = asan_poisoned_object_p(obj);
1460 asan_unpoison_object(obj,
false);
1465asan_poison_object_restore(
VALUE obj,
void *ptr)
1468 asan_poison_object(obj);
1473#define asan_unpoisoning_object(obj) \
1474 for (void *poisoned = asan_unpoison_object_temporary(obj), \
1475 *unpoisoning = &poisoned; \
1477 unpoisoning = asan_poison_object_restore(obj, poisoned))
1479#define FL_CHECK2(name, x, pred) \
1480 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1481 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1482#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1483#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1484#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1486#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1487#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1488#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1490#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1491#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1492#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1494#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1495#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1496#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1498#define RVALUE_OLD_AGE 3
1499#define RVALUE_AGE_SHIFT 5
1514check_rvalue_consistency_force(
const VALUE obj,
int terminate)
1519 RB_VM_LOCK_ENTER_NO_BARRIER();
1522 fprintf(stderr,
"check_rvalue_consistency: %p is a special const.\n", (
void *)obj);
1525 else if (!is_pointer_to_heap(objspace, (
void *)obj)) {
1528 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1530 ccan_list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
1531 if (page->start <= (uintptr_t)obj &&
1532 (uintptr_t)obj < (page->start + (page->total_slots * size_pool->slot_size))) {
1533 fprintf(stderr,
"check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1534 (
void *)obj, (
void *)page);
1541 fprintf(stderr,
"check_rvalue_consistency: %p is not a Ruby object.\n", (
void *)obj);
1547 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1548 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1549 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1550 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0, remembered_bit = marking_bit;
1551 const int age = RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
1553 if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1554 fprintf(stderr,
"check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1558 fprintf(stderr,
"check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1562 fprintf(stderr,
"check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1566 obj_memsize_of((
VALUE)obj, FALSE);
1572 if (age > 0 && wb_unprotected_bit) {
1573 fprintf(stderr,
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1577 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1578 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1582 if (!is_full_marking(objspace)) {
1583 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1584 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1585 obj_info(obj), age);
1588 if (remembered_bit && age != RVALUE_OLD_AGE) {
1589 fprintf(stderr,
"check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1590 obj_info(obj), age);
1602 if (is_incremental_marking(objspace) && marking_bit) {
1603 if (!is_marking(objspace) && !mark_bit) {
1604 fprintf(stderr,
"check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1610 RB_VM_LOCK_LEAVE_NO_BARRIER();
1612 if (err > 0 && terminate) {
1613 rb_bug(
"check_rvalue_consistency_force: there is %d errors.", err);
1618#if RGENGC_CHECK_MODE == 0
1620check_rvalue_consistency(
const VALUE obj)
1626check_rvalue_consistency(
const VALUE obj)
1628 check_rvalue_consistency_force(obj, TRUE);
1640 void *poisoned = asan_unpoison_object_temporary(obj);
1646 asan_poison_object(obj);
1653RVALUE_MARKED(
VALUE obj)
1655 check_rvalue_consistency(obj);
1656 return RVALUE_MARK_BITMAP(obj) != 0;
1660RVALUE_PINNED(
VALUE obj)
1662 check_rvalue_consistency(obj);
1663 return RVALUE_PIN_BITMAP(obj) != 0;
1667RVALUE_WB_UNPROTECTED(
VALUE obj)
1669 check_rvalue_consistency(obj);
1670 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1674RVALUE_MARKING(
VALUE obj)
1676 check_rvalue_consistency(obj);
1677 return RVALUE_MARKING_BITMAP(obj) != 0;
1681RVALUE_REMEMBERED(
VALUE obj)
1683 check_rvalue_consistency(obj);
1684 return RVALUE_MARKING_BITMAP(obj) != 0;
1688RVALUE_UNCOLLECTIBLE(
VALUE obj)
1690 check_rvalue_consistency(obj);
1691 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1695RVALUE_OLD_P_RAW(
VALUE obj)
1698 return (
RBASIC(obj)->flags & promoted) == promoted;
1702RVALUE_OLD_P(
VALUE obj)
1704 check_rvalue_consistency(obj);
1705 return RVALUE_OLD_P_RAW(obj);
1708#if RGENGC_CHECK_MODE || GC_DEBUG
1710RVALUE_AGE(
VALUE obj)
1712 check_rvalue_consistency(obj);
1713 return RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
1720 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1721 objspace->rgengc.old_objects++;
1722 rb_transient_heap_promote(obj);
1724#if RGENGC_PROFILE >= 2
1725 objspace->profile.total_promoted_count++;
1733 RB_DEBUG_COUNTER_INC(obj_promote);
1734 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1738RVALUE_FLAGS_AGE_SET(
VALUE flags,
int age)
1741 flags |= (age << RVALUE_AGE_SHIFT);
1750 int age = RVALUE_FLAGS_AGE(flags);
1752 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1753 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1757 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1759 if (age == RVALUE_OLD_AGE) {
1760 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1762 check_rvalue_consistency(obj);
1769 check_rvalue_consistency(obj);
1770 GC_ASSERT(!RVALUE_OLD_P(obj));
1772 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, RVALUE_OLD_AGE);
1773 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1775 check_rvalue_consistency(obj);
1782 check_rvalue_consistency(obj);
1783 GC_ASSERT(!RVALUE_OLD_P(obj));
1785 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, RVALUE_OLD_AGE - 1);
1787 check_rvalue_consistency(obj);
1793 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, 0);
1794 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1800 check_rvalue_consistency(obj);
1801 GC_ASSERT(RVALUE_OLD_P(obj));
1803 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1804 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
1807 RVALUE_DEMOTE_RAW(objspace, obj);
1809 if (RVALUE_MARKED(obj)) {
1810 objspace->rgengc.old_objects--;
1813 check_rvalue_consistency(obj);
1817RVALUE_AGE_RESET_RAW(
VALUE obj)
1819 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, 0);
1823RVALUE_AGE_RESET(
VALUE obj)
1825 check_rvalue_consistency(obj);
1826 GC_ASSERT(!RVALUE_OLD_P(obj));
1828 RVALUE_AGE_RESET_RAW(obj);
1829 check_rvalue_consistency(obj);
1833RVALUE_BLACK_P(
VALUE obj)
1835 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1840RVALUE_GREY_P(
VALUE obj)
1842 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1847RVALUE_WHITE_P(
VALUE obj)
1849 return RVALUE_MARKED(obj) == FALSE;
1859 return calloc(1, n);
1863rb_objspace_alloc(
void)
1866 objspace->flags.measure_gc = 1;
1867 malloc_limit = gc_params.malloc_limit_min;
1869 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1872 size_pool->slot_size = (1 << i) * BASE_SLOT_SIZE;
1874 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
1875 ccan_list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
1890 if (is_lazy_sweeping(objspace))
1891 rb_bug(
"lazy sweeping underway when freeing object space");
1893 if (objspace->profile.records) {
1894 free(objspace->profile.records);
1895 objspace->profile.records = 0;
1900 for (list = global_list; list; list = next) {
1905 if (heap_pages_sorted) {
1907 for (i = 0; i < heap_allocated_pages; ++i) {
1908 heap_page_free(objspace, heap_pages_sorted[i]);
1910 free(heap_pages_sorted);
1911 heap_allocated_pages = 0;
1912 heap_pages_sorted_length = 0;
1913 heap_pages_lomem = 0;
1914 heap_pages_himem = 0;
1916 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1918 SIZE_POOL_EDEN_HEAP(size_pool)->total_pages = 0;
1919 SIZE_POOL_EDEN_HEAP(size_pool)->total_slots = 0;
1922 st_free_table(objspace->id_to_obj_tbl);
1923 st_free_table(objspace->obj_to_id_tbl);
1925 free_stack_chunks(&objspace->mark_stack);
1926 mark_stack_free_cache(&objspace->mark_stack);
1932heap_pages_expand_sorted_to(
rb_objspace_t *objspace,
size_t next_length)
1937 gc_report(3, objspace,
"heap_pages_expand_sorted: next_length: %"PRIdSIZE
", size: %"PRIdSIZE
"\n",
1940 if (heap_pages_sorted_length > 0) {
1941 sorted = (
struct heap_page **)realloc(heap_pages_sorted, size);
1942 if (sorted) heap_pages_sorted = sorted;
1945 sorted = heap_pages_sorted = (
struct heap_page **)malloc(size);
1952 heap_pages_sorted_length = next_length;
1963 size_t next_length = heap_allocatable_pages(objspace);
1964 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1966 next_length += SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
1967 next_length += SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
1970 if (next_length > heap_pages_sorted_length) {
1971 heap_pages_expand_sorted_to(objspace, next_length);
1974 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
1975 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
1981 size_pool->allocatable_pages = s;
1982 heap_pages_expand_sorted(objspace);
1988 ASSERT_vm_locking();
1992 asan_unpoison_object(obj,
false);
1994 asan_unlock_freelist(page);
1996 p->as.free.flags = 0;
1997 p->as.free.next = page->freelist;
1999 asan_lock_freelist(page);
2001 if (RGENGC_CHECK_MODE &&
2003 !(page->start <= (uintptr_t)obj &&
2004 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
2005 obj % BASE_SLOT_SIZE == 0)) {
2006 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", (
void *)p);
2009 asan_poison_object(obj);
2010 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)obj);
2016 asan_unlock_freelist(page);
2017 GC_ASSERT(page->free_slots != 0);
2018 GC_ASSERT(page->freelist != NULL);
2020 page->free_next = heap->free_pages;
2021 heap->free_pages = page;
2023 RUBY_DEBUG_LOG(
"page:%p freelist:%p", (
void *)page, (
void *)page->freelist);
2025 asan_lock_freelist(page);
2028#if GC_ENABLE_INCREMENTAL_MARK
2032 asan_unlock_freelist(page);
2033 GC_ASSERT(page->free_slots != 0);
2034 GC_ASSERT(page->freelist != NULL);
2036 page->free_next = heap->pooled_pages;
2037 heap->pooled_pages = page;
2038 objspace->rincgc.pooled_slots += page->free_slots;
2040 asan_lock_freelist(page);
2047 ccan_list_del(&page->page_node);
2048 heap->total_pages--;
2049 heap->total_slots -= page->total_slots;
2052static void rb_aligned_free(
void *ptr,
size_t size);
2057 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2059 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2061 GC_ASSERT(HEAP_PAGE_SIZE % sysconf(_SC_PAGE_SIZE) == 0);
2062 if (munmap(page_body, HEAP_PAGE_SIZE)) {
2063 rb_bug(
"heap_page_body_free: munmap failed");
2068 rb_aligned_free(page_body, HEAP_PAGE_SIZE);
2075 heap_allocated_pages--;
2076 page->size_pool->total_freed_pages++;
2077 heap_page_body_free(GET_PAGE_BODY(page->start));
2086 bool has_pages_in_tomb_heap = FALSE;
2087 for (i = 0; i < SIZE_POOL_COUNT; i++) {
2088 if (!ccan_list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
2089 has_pages_in_tomb_heap = TRUE;
2094 if (has_pages_in_tomb_heap) {
2095 for (i = j = 1; j < heap_allocated_pages; i++) {
2096 struct heap_page *page = heap_pages_sorted[i];
2098 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
2099 heap_unlink_page(objspace, SIZE_POOL_TOMB_HEAP(page->size_pool), page);
2100 heap_page_free(objspace, page);
2104 heap_pages_sorted[j] = page;
2110 struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
2111 uintptr_t himem = (uintptr_t)hipage->start + (hipage->total_slots * hipage->slot_size);
2112 GC_ASSERT(himem <= heap_pages_himem);
2113 heap_pages_himem = himem;
2115 GC_ASSERT(j == heap_allocated_pages);
2120heap_page_body_allocate(
void)
2124 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2126 GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
2128 char *ptr = mmap(NULL, HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE,
2129 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2130 if (ptr == MAP_FAILED) {
2134 char *aligned = ptr + HEAP_PAGE_ALIGN;
2135 aligned -= ((
VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
2136 GC_ASSERT(aligned > ptr);
2137 GC_ASSERT(aligned <= ptr + HEAP_PAGE_ALIGN);
2139 size_t start_out_of_range_size = aligned - ptr;
2140 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2141 if (start_out_of_range_size > 0) {
2142 if (munmap(ptr, start_out_of_range_size)) {
2143 rb_bug(
"heap_page_body_allocate: munmap failed for start");
2147 size_t end_out_of_range_size = HEAP_PAGE_ALIGN - start_out_of_range_size;
2148 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2149 if (end_out_of_range_size > 0) {
2150 if (munmap(aligned + HEAP_PAGE_SIZE, end_out_of_range_size)) {
2151 rb_bug(
"heap_page_body_allocate: munmap failed for end");
2159 page_body = rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
2162 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2170 uintptr_t start, end, p;
2172 uintptr_t hi, lo, mid;
2173 size_t stride = size_pool->slot_size;
2174 unsigned int limit = (
unsigned int)((HEAP_PAGE_SIZE -
sizeof(
struct heap_page_header)))/(
int)stride;
2178 if (page_body == 0) {
2183 page = calloc1(
sizeof(
struct heap_page));
2185 heap_page_body_free(page_body);
2192 if (start % BASE_SLOT_SIZE != 0) {
2193 int delta = BASE_SLOT_SIZE - (start % BASE_SLOT_SIZE);
2194 start = start + delta;
2195 GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
2201 if (NUM_IN_PAGE(start) == 1) {
2202 start += stride - BASE_SLOT_SIZE;
2205 GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % stride == 0);
2207 limit = (HEAP_PAGE_SIZE - (int)(start - (uintptr_t)page_body))/(int)stride;
2209 end = start + (limit * (int)stride);
2213 hi = (uintptr_t)heap_allocated_pages;
2217 mid = (lo + hi) / 2;
2218 mid_page = heap_pages_sorted[mid];
2219 if ((uintptr_t)mid_page->start < start) {
2222 else if ((uintptr_t)mid_page->start > start) {
2226 rb_bug(
"same heap page is allocated: %p at %"PRIuVALUE, (
void *)page_body, (
VALUE)mid);
2230 if (hi < (uintptr_t)heap_allocated_pages) {
2231 MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi],
struct heap_page_header*, heap_allocated_pages - hi);
2234 heap_pages_sorted[hi] = page;
2236 heap_allocated_pages++;
2238 GC_ASSERT(heap_eden_total_pages(objspace) + heap_allocatable_pages(objspace) <= heap_pages_sorted_length);
2239 GC_ASSERT(heap_eden_total_pages(objspace) + heap_tomb_total_pages(objspace) == heap_allocated_pages - 1);
2240 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2242 size_pool->total_allocated_pages++;
2244 if (heap_allocated_pages > heap_pages_sorted_length) {
2245 rb_bug(
"heap_page_allocate: allocated(%"PRIdSIZE
") > sorted(%"PRIdSIZE
")",
2246 heap_allocated_pages, heap_pages_sorted_length);
2249 if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
2250 if (heap_pages_himem < end) heap_pages_himem = end;
2252 page->start = start;
2253 page->total_slots = limit;
2254 page->slot_size = size_pool->slot_size;
2255 page->size_pool = size_pool;
2256 page_body->header.page = page;
2258 for (p = start; p != end; p += stride) {
2259 gc_report(3, objspace,
"assign_heap_page: %p is added to freelist\n", (
void *)p);
2260 heap_page_add_freeobj(objspace, page, (
VALUE)p);
2262 page->free_slots = limit;
2264 asan_lock_freelist(page);
2273 ccan_list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
2274 asan_unlock_freelist(page);
2275 if (page->freelist != NULL) {
2276 heap_unlink_page(objspace, &size_pool->tomb_heap, page);
2277 asan_lock_freelist(page);
2289 const char *method =
"recycle";
2291 size_pool->allocatable_pages--;
2293 page = heap_page_resurrect(objspace, size_pool);
2296 page = heap_page_allocate(objspace, size_pool);
2297 method =
"allocate";
2299 if (0) fprintf(stderr,
"heap_page_create: %s - %p, "
2300 "heap_allocated_pages: %"PRIdSIZE
", "
2301 "heap_allocated_pages: %"PRIdSIZE
", "
2302 "tomb->total_pages: %"PRIdSIZE
"\n",
2303 method, (
void *)page, heap_pages_sorted_length, heap_allocated_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
2311 GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
2312 page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
2313 ccan_list_add_tail(&heap->pages, &page->page_node);
2314 heap->total_pages++;
2315 heap->total_slots += page->total_slots;
2321 struct heap_page *page = heap_page_create(objspace, size_pool);
2322 heap_add_page(objspace, size_pool, heap, page);
2323 heap_add_freepage(heap, page);
2331 size_pool_allocatable_pages_set(objspace, size_pool, add);
2333 for (i = 0; i < add; i++) {
2334 heap_assign_page(objspace, size_pool, heap);
2337 GC_ASSERT(size_pool->allocatable_pages == 0);
2343 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
2346 if (goal_ratio == 0.0) {
2347 next_used = (size_t)(used * gc_params.growth_factor);
2349 else if (total_slots == 0) {
2350 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
2351 next_used = (gc_params.heap_init_slots * multiple) / HEAP_PAGE_OBJ_LIMIT;
2357 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
2359 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
2360 if (f < 1.0) f = 1.1;
2362 next_used = (size_t)(f * used);
2366 "free_slots(%8"PRIuSIZE
")/total_slots(%8"PRIuSIZE
")=%1.2f,"
2367 " G(%1.2f), f(%1.2f),"
2368 " used(%8"PRIuSIZE
") => next_used(%8"PRIuSIZE
")\n",
2369 free_slots, total_slots, free_slots/(
double)total_slots,
2370 goal_ratio, f, used, next_used);
2374 if (gc_params.growth_max_slots > 0) {
2375 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
2376 if (next_used > max_used) next_used = max_used;
2379 size_t extend_page_count = next_used - used;
2381 if (extend_page_count == 0) extend_page_count = 1;
2383 return extend_page_count;
2389 if (size_pool->allocatable_pages > 0) {
2390 gc_report(1, objspace,
"heap_increment: heap_pages_sorted_length: %"PRIdSIZE
", "
2391 "heap_pages_inc: %"PRIdSIZE
", heap->total_pages: %"PRIdSIZE
"\n",
2392 heap_pages_sorted_length, size_pool->allocatable_pages, heap->total_pages);
2394 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2395 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2397 heap_assign_page(objspace, size_pool, heap);
2407 if (heap->free_pages == NULL && is_incremental_marking(objspace)) {
2408 gc_marks_continue(objspace, size_pool, heap);
2413 if (heap->free_pages == NULL && is_lazy_sweeping(objspace)) {
2414 gc_sweep_continue(objspace, size_pool, heap);
2421 GC_ASSERT(heap->free_pages == NULL);
2424 gc_continue(objspace, size_pool, heap);
2428 if (heap->free_pages == NULL &&
2429 (will_be_incremental_marking(objspace) ||
2430 (heap_increment(objspace, size_pool, heap) == FALSE))) {
2431 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2436 gc_continue(objspace, size_pool, heap);
2441 if (heap->free_pages == NULL && !heap_increment(objspace, size_pool, heap)) {
2442 if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE) {
2443 rb_bug(
"cannot create a new page after GC");
2446 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2451 gc_continue(objspace, size_pool, heap);
2453 if (heap->free_pages == NULL &&
2454 !heap_increment(objspace, size_pool, heap)) {
2455 rb_bug(
"cannot create a new page after major GC");
2463 GC_ASSERT(heap->free_pages != NULL);
2471 objspace->flags.has_hook = (objspace->hook_events != 0);
2477 if (UNLIKELY(!ec->cfp))
return;
2478 const VALUE *pc = ec->cfp->pc;
2479 if (pc && VM_FRAME_RUBYFRAME_P(ec->cfp)) {
2483 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2487#define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2488#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2490#define gc_event_hook_prep(objspace, event, data, prep) do { \
2491 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2493 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2497#define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2502#if !__has_feature(memory_sanitizer)
2507 p->as.basic.
flags = flags;
2510#if RACTOR_CHECK_MODE
2511 rb_ractor_setup_belonging(obj);
2514#if RGENGC_CHECK_MODE
2515 p->as.values.v1 = p->as.values.v2 = p->as.values.v3 = 0;
2517 RB_VM_LOCK_ENTER_NO_BARRIER();
2519 check_rvalue_consistency(obj);
2521 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2522 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2523 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2524 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2527 if (RVALUE_AGE(obj) != 2)
rb_bug(
"newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
2530 if (RVALUE_AGE(obj) > 0)
rb_bug(
"newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
2532 if (rgengc_remembered(objspace, (
VALUE)obj))
rb_bug(
"newobj: %s is remembered.", obj_info(obj));
2534 RB_VM_LOCK_LEAVE_NO_BARRIER();
2537 if (UNLIKELY(wb_protected == FALSE)) {
2538 ASSERT_vm_locking();
2539 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2543 objspace->total_allocated_objects++;
2547 objspace->profile.total_generated_normal_object_count++;
2548#if RGENGC_PROFILE >= 2
2549 objspace->profile.generated_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
2553 objspace->profile.total_generated_shady_object_count++;
2554#if RGENGC_PROFILE >= 2
2555 objspace->profile.generated_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
2561 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2565 gc_report(5, objspace,
"newobj: %s\n", obj_info(obj));
2567#if RGENGC_OLD_NEWOBJ_CHECK > 0
2569 static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2571 if (!is_incremental_marking(objspace) &&
2574 if (--newobj_cnt == 0) {
2575 newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2577 gc_mark_set(objspace, obj);
2578 RVALUE_AGE_SET_OLD(objspace, obj);
2580 rb_gc_writebarrier_remember(obj);
2590rb_gc_obj_slot_size(
VALUE obj)
2592 return GET_HEAP_PAGE(obj)->slot_size - RVALUE_OVERHEAD;
2596size_pool_slot_size(
unsigned char pool_id)
2598 GC_ASSERT(pool_id < SIZE_POOL_COUNT);
2600 size_t slot_size = (1 << pool_id) * BASE_SLOT_SIZE;
2602#if RGENGC_CHECK_MODE
2604 GC_ASSERT(size_pools[pool_id].slot_size == (
short)slot_size);
2607 slot_size -= RVALUE_OVERHEAD;
2613rb_size_pool_slot_size(
unsigned char pool_id)
2615 return size_pool_slot_size(pool_id);
2619rb_gc_size_allocatable_p(
size_t size)
2621 return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
2626 size_t size_pool_idx)
2629 RVALUE *p = size_pool_cache->freelist;
2631#if GC_ENABLE_INCREMENTAL_MARK
2632 if (is_incremental_marking(objspace)) {
2634 if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
2639 cache->incremental_mark_step_allocated_slots++;
2646 MAYBE_UNUSED(
const size_t) stride = size_pool_slot_size(size_pool_idx);
2647 size_pool_cache->freelist = p->as.free.next;
2649 asan_unpoison_memory_region(p, stride,
true);
2651 asan_unpoison_object(obj,
true);
2653#if RGENGC_CHECK_MODE
2654 GC_ASSERT(rb_gc_obj_slot_size(obj) == stride);
2656 MEMZERO((
char *)obj,
char, stride);
2668 ASSERT_vm_locking();
2672 if (heap->free_pages == NULL) {
2673 heap_prepare(objspace, size_pool, heap);
2676 page = heap->free_pages;
2677 heap->free_pages = page->free_next;
2679 GC_ASSERT(page->free_slots != 0);
2680 RUBY_DEBUG_LOG(
"page:%p freelist:%p cnt:%d", (
void *)page, (
void *)page->freelist, page->free_slots);
2682 asan_unlock_freelist(page);
2691 gc_report(3, &
rb_objspace,
"ractor_set_cache: Using page %p\n", (
void *)GET_PAGE_BODY(page->start));
2695 GC_ASSERT(size_pool_cache->freelist == NULL);
2696 GC_ASSERT(page->free_slots != 0);
2697 GC_ASSERT(page->freelist != NULL);
2699 size_pool_cache->using_page = page;
2700 size_pool_cache->freelist = page->freelist;
2701 page->free_slots = 0;
2702 page->freelist = NULL;
2704 asan_unpoison_object((
VALUE)size_pool_cache->freelist,
false);
2706 asan_poison_object((
VALUE)size_pool_cache->freelist);
2713 p->as.values.v1 = v1;
2714 p->as.values.v2 = v2;
2715 p->as.values.v3 = v3;
2720size_pool_idx_for_size(
size_t size)
2723 size += RVALUE_OVERHEAD;
2725 size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
2728 size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
2730 if (size_pool_idx >= SIZE_POOL_COUNT) {
2731 rb_bug(
"size_pool_idx_for_size: allocation size too large");
2734#if RGENGC_CHECK_MODE
2736 GC_ASSERT(size <= (
size_t)size_pools[size_pool_idx].slot_size);
2737 if (size_pool_idx > 0) GC_ASSERT(size > (
size_t)size_pools[size_pool_idx - 1].slot_size);
2740 return size_pool_idx;
2742 GC_ASSERT(size <=
sizeof(
RVALUE));
2751 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
2754 VALUE obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2756 if (UNLIKELY(obj ==
Qfalse)) {
2758 bool unlock_vm =
false;
2761 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2767 ASSERT_vm_locking();
2769#if GC_ENABLE_INCREMENTAL_MARK
2770 if (is_incremental_marking(objspace)) {
2771 gc_marks_continue(objspace, size_pool, heap);
2772 cache->incremental_mark_step_allocated_slots = 0;
2775 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2781 struct heap_page *page = heap_next_free_page(objspace, size_pool, heap);
2782 ractor_cache_set_page(cache, size_pool_idx, page);
2785 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2787 GC_ASSERT(obj !=
Qfalse);
2792 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2800newobj_zero_slot(
VALUE obj)
2802 memset((
char *)obj +
sizeof(
struct RBasic), 0, rb_gc_obj_slot_size(obj) -
sizeof(
struct RBasic));
2813 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2815 if (UNLIKELY(during_gc || ruby_gc_stressful)) {
2819 rb_bug(
"object allocation during garbage collection phase");
2822 if (ruby_gc_stressful) {
2823 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2829 obj = newobj_alloc(objspace, cr, size_pool_idx,
true);
2830#if SHAPE_IN_BASIC_FLAGS
2831 flags |= (
VALUE)(size_pool_idx) << SHAPE_FLAG_SHIFT;
2833 newobj_init(klass, flags, wb_protected, objspace, obj);
2837 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2842NOINLINE(
static VALUE newobj_slowpath_wb_protected(
VALUE klass,
VALUE flags,
2844NOINLINE(
static VALUE newobj_slowpath_wb_unprotected(
VALUE klass,
VALUE flags,
2850 return newobj_slowpath(klass, flags, objspace, cr, TRUE, size_pool_idx);
2856 return newobj_slowpath(klass, flags, objspace, cr, FALSE, size_pool_idx);
2865 RB_DEBUG_COUNTER_INC(obj_newobj);
2866 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2868#if GC_DEBUG_STRESS_TO_CLASS
2869 if (UNLIKELY(stress_to_class)) {
2871 for (i = 0; i < cnt; ++i) {
2872 if (klass ==
RARRAY_AREF(stress_to_class, i)) rb_memerror();
2877 size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
2879 if (!UNLIKELY(during_gc ||
2880 ruby_gc_stressful ||
2881 gc_event_hook_available_p(objspace)) &&
2883 obj = newobj_alloc(objspace, cr, size_pool_idx,
false);
2884#if SHAPE_IN_BASIC_FLAGS
2885 flags |= (
VALUE)size_pool_idx << SHAPE_FLAG_SHIFT;
2887 newobj_init(klass, flags, wb_protected, objspace, obj);
2890 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2892 obj = wb_protected ?
2893 newobj_slowpath_wb_protected(klass, flags, objspace, cr, size_pool_idx) :
2894 newobj_slowpath_wb_unprotected(klass, flags, objspace, cr, size_pool_idx);
2903 VALUE obj = newobj_of0(klass, flags, wb_protected, GET_RACTOR(), alloc_size);
2904 return newobj_fill(obj, v1, v2, v3);
2910 VALUE obj = newobj_of0(klass, flags, wb_protected, cr, alloc_size);
2911 return newobj_fill(obj, v1, v2, v3);
2915rb_wb_unprotected_newobj_of(
VALUE klass,
VALUE flags,
size_t size)
2918 return newobj_of(klass, flags, 0, 0, 0, FALSE, size);
2922rb_wb_protected_newobj_of(
VALUE klass,
VALUE flags,
size_t size)
2925 return newobj_of(klass, flags, 0, 0, 0, TRUE, size);
2932 return newobj_of_cr(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
2940 return newobj_of(0,
T_NONE, 0, 0, 0, FALSE, RVALUE_SIZE);
2944rb_obj_embedded_size(uint32_t numiv)
2950rb_class_instance_allocate_internal(
VALUE klass,
VALUE flags,
bool wb_protected)
2953 GC_ASSERT(flags & ROBJECT_EMBED);
2957 uint32_t index_tbl_num_entries = RCLASS_EXT(klass)->max_iv_count;
2959 size = rb_obj_embedded_size(index_tbl_num_entries);
2960 if (!rb_gc_size_allocatable_p(size)) {
2961 size =
sizeof(
struct RObject);
2964 size =
sizeof(
struct RObject);
2967 VALUE obj = newobj_of(klass, flags, 0, 0, 0, wb_protected, size);
2969 rb_shape_get_shape(obj)->
type == SHAPE_INITIAL_CAPACITY);
2973 ROBJECT_SET_SHAPE_ID(obj, ROBJECT_SHAPE_ID(obj) + SIZE_POOL_COUNT);
2978 for (
size_t i = 0; i < ROBJECT_IV_CAPACITY(obj); i++) {
2997#define UNEXPECTED_NODE(func) \
2998 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2999 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
3002rb_imemo_name(
enum imemo_type
type)
3006#define IMEMO_NAME(x) case imemo_##x: return #x;
3010 IMEMO_NAME(throw_data);
3017 IMEMO_NAME(parser_strterm);
3018 IMEMO_NAME(callinfo);
3019 IMEMO_NAME(callcache);
3020 IMEMO_NAME(constcache);
3031 size_t size = RVALUE_SIZE;
3033 return newobj_of(v0, flags, v1, v2, v3, TRUE, size);
3041 return newobj_of(v0, flags, v1, v2, v3, FALSE, size);
3045rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
void *buf,
size_t cnt)
3047 return rb_imemo_tmpbuf_new((
VALUE)buf, 0, (
VALUE)cnt, 0);
3057imemo_memsize(
VALUE obj)
3060 switch (imemo_type(obj)) {
3062 size +=
sizeof(RANY(obj)->as.imemo.ment.def);
3065 size += rb_iseq_memsize((
rb_iseq_t *)obj);
3068 size += RANY(obj)->as.imemo.env.env_size *
sizeof(
VALUE);
3071 size += RANY(obj)->as.imemo.alloc.cnt *
sizeof(
VALUE);
3074 size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
3078 case imemo_throw_data:
3081 case imemo_parser_strterm:
3094 VALUE memo = rb_imemo_new(
type, v1, v2, v3, v0);
3095 fprintf(stderr,
"memo %p (type: %d) @ %s:%d\n", (
void *)memo, imemo_type(memo), file, line);
3100MJIT_FUNC_EXPORTED
VALUE
3101rb_class_allocate_instance(
VALUE klass)
3107rb_data_object_check(
VALUE klass)
3109 if (klass != rb_cObject && (
rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
3111 rb_warn(
"undefining the allocator of T_DATA class %"PRIsVALUE, klass);
3119 if (klass) rb_data_object_check(klass);
3126 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
3134 RBIMPL_NONNULL_ARG(
type);
3135 if (klass) rb_data_object_check(klass);
3142 VALUE obj = rb_data_typed_object_wrap(klass, 0,
type);
3148rb_objspace_data_type_memsize(
VALUE obj)
3153 if (ptr &&
type->function.dsize) {
3154 return type->function.dsize(ptr);
3161rb_objspace_data_type_name(
VALUE obj)
3172ptr_in_page_body_p(
const void *ptr,
const void *memb)
3175 uintptr_t p_body = (uintptr_t)GET_PAGE_BODY(page->start);
3177 if ((uintptr_t)ptr >= p_body) {
3178 return (uintptr_t)ptr < (p_body + HEAP_PAGE_SIZE) ? 0 : 1;
3191 if (ptr < (uintptr_t)heap_pages_lomem ||
3192 ptr > (uintptr_t)heap_pages_himem) {
3196 res = bsearch((
void *)ptr, heap_pages_sorted,
3197 (
size_t)heap_allocated_pages,
sizeof(
struct heap_page *),
3198 ptr_in_page_body_p);
3208PUREFUNC(
static inline int is_pointer_to_heap(
rb_objspace_t *objspace,
void *ptr);)
3212 register uintptr_t p = (uintptr_t)ptr;
3215 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
3217 if (p < heap_pages_lomem || p > heap_pages_himem)
return FALSE;
3218 RB_DEBUG_COUNTER_INC(gc_isptr_range);
3220 if (p % BASE_SLOT_SIZE != 0)
return FALSE;
3221 RB_DEBUG_COUNTER_INC(gc_isptr_align);
3223 page = heap_page_for_ptr(objspace, (uintptr_t)ptr);
3225 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
3226 if (page->flags.in_tomb) {
3230 if (p < page->start)
return FALSE;
3231 if (p >= page->start + (page->total_slots * page->slot_size))
return FALSE;
3232 if ((NUM_IN_PAGE(p) * BASE_SLOT_SIZE) % page->slot_size != 0)
return FALSE;
3240static enum rb_id_table_iterator_result
3241free_const_entry_i(
VALUE value,
void *data)
3245 return ID_TABLE_CONTINUE;
3251 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
3252 rb_id_table_free(tbl);
3261 for (
int i=0; i<ccs->len; i++) {
3264 void *ptr = asan_unpoison_object_temporary((
VALUE)cc);
3266 if (is_pointer_to_heap(objspace, (
void *)cc) &&
3267 IMEMO_TYPE_P(cc, imemo_callcache) &&
3268 cc->klass == klass) {
3273 asan_poison_object((
VALUE)cc);
3278 asan_poison_object((
VALUE)cc);
3281 vm_cc_invalidate(cc);
3283 ruby_xfree(ccs->entries);
3291 RB_DEBUG_COUNTER_INC(ccs_free);
3292 vm_ccs_free(ccs, TRUE, NULL,
Qundef);
3301static enum rb_id_table_iterator_result
3302cc_table_mark_i(
ID id,
VALUE ccs_ptr,
void *data_ptr)
3306 VM_ASSERT(vm_ccs_p(ccs));
3307 VM_ASSERT(
id == ccs->cme->called_id);
3309 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
3310 rb_vm_ccs_free(ccs);
3311 return ID_TABLE_DELETE;
3314 gc_mark(data->objspace, (
VALUE)ccs->cme);
3316 for (
int i=0; i<ccs->len; i++) {
3317 VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
3318 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
3320 gc_mark(data->objspace, (
VALUE)ccs->entries[i].ci);
3321 gc_mark(data->objspace, (
VALUE)ccs->entries[i].cc);
3323 return ID_TABLE_CONTINUE;
3330 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3333 .objspace = objspace,
3336 rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
3340static enum rb_id_table_iterator_result
3341cc_table_free_i(
VALUE ccs_ptr,
void *data_ptr)
3345 VM_ASSERT(vm_ccs_p(ccs));
3346 vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
3347 return ID_TABLE_CONTINUE;
3353 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3357 .objspace = objspace,
3361 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, &data);
3362 rb_id_table_free(cc_tbl);
3366static enum rb_id_table_iterator_result
3367cvar_table_free_i(
VALUE value,
void * ctx)
3369 xfree((
void *) value);
3370 return ID_TABLE_CONTINUE;
3374rb_cc_table_free(
VALUE klass)
3382 struct RZombie *zombie = RZOMBIE(obj);
3384 zombie->dfree = dfree;
3385 zombie->data = data;
3386 VALUE prev, next = heap_pages_deferred_final;
3388 zombie->next = prev = next;
3390 }
while (next != prev);
3392 struct heap_page *page = GET_HEAP_PAGE(obj);
3393 page->final_slots++;
3394 heap_pages_final_slots++;
3400 rb_io_t *fptr = RANY(obj)->as.file.fptr;
3401 make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
3407 ASSERT_vm_locking();
3408 st_data_t o = (st_data_t)obj,
id;
3413 if (st_delete(objspace->obj_to_id_tbl, &o, &
id)) {
3415 st_delete(objspace->id_to_obj_tbl, &
id, NULL);
3418 rb_bug(
"Object ID seen, but not in mapping table: %s\n", obj_info(obj));
3425 RB_DEBUG_COUNTER_INC(obj_free);
3435 rb_bug(
"obj_free() called for broken object");
3447 obj_free_object_id(objspace, obj);
3450 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
3452#if RGENGC_CHECK_MODE
3453#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3454 CHECK(RVALUE_WB_UNPROTECTED);
3455 CHECK(RVALUE_MARKED);
3456 CHECK(RVALUE_MARKING);
3457 CHECK(RVALUE_UNCOLLECTIBLE);
3463 if (rb_shape_obj_too_complex(obj)) {
3464 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
3465 st_free_table(ROBJECT_IV_HASH(obj));
3467 else if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
3468 RB_DEBUG_COUNTER_INC(obj_obj_embed);
3470 else if (ROBJ_TRANSIENT_P(obj)) {
3471 RB_DEBUG_COUNTER_INC(obj_obj_transient);
3474 xfree(RANY(obj)->as.object.as.heap.ivptr);
3475 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
3480 rb_id_table_free(RCLASS_M_TBL(obj));
3481 cc_table_free(objspace, obj, FALSE);
3482 if (RCLASS_IVPTR(obj)) {
3483 xfree(RCLASS_IVPTR(obj));
3485 if (RCLASS_CONST_TBL(obj)) {
3486 rb_free_const_table(RCLASS_CONST_TBL(obj));
3488 if (RCLASS_CVC_TBL(obj)) {
3489 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
3490 rb_id_table_free(RCLASS_CVC_TBL(obj));
3492 rb_class_remove_subclass_head(obj);
3493 rb_class_remove_from_module_subclasses(obj);
3494 rb_class_remove_from_super_subclasses(obj);
3495 if (
FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
3496 xfree(RCLASS_SUPERCLASSES(obj));
3499#if SIZE_POOL_COUNT == 1
3500 if (RCLASS_EXT(obj))
3501 xfree(RCLASS_EXT(obj));
3514#if USE_DEBUG_COUNTER
3517 RB_DEBUG_COUNTER_INC(obj_hash_empty);
3520 RB_DEBUG_COUNTER_INC(obj_hash_1);
3523 RB_DEBUG_COUNTER_INC(obj_hash_2);
3526 RB_DEBUG_COUNTER_INC(obj_hash_3);
3529 RB_DEBUG_COUNTER_INC(obj_hash_4);
3535 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
3539 RB_DEBUG_COUNTER_INC(obj_hash_g8);
3542 if (RHASH_AR_TABLE_P(obj)) {
3543 if (RHASH_AR_TABLE(obj) == NULL) {
3544 RB_DEBUG_COUNTER_INC(obj_hash_null);
3547 RB_DEBUG_COUNTER_INC(obj_hash_ar);
3551 RB_DEBUG_COUNTER_INC(obj_hash_st);
3558 if (RHASH_TRANSIENT_P(obj)) {
3559 RB_DEBUG_COUNTER_INC(obj_hash_transient);
3567 GC_ASSERT(RHASH_ST_TABLE_P(obj));
3568 st_free_table(RHASH(obj)->as.st);
3572 if (RANY(obj)->as.regexp.ptr) {
3573 onig_free(RANY(obj)->as.regexp.ptr);
3574 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
3579 int free_immediately = FALSE;
3580 void (*dfree)(
void *);
3584 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
3585 dfree = RANY(obj)->as.typeddata.type->function.dfree;
3586 if (0 && free_immediately == 0) {
3588 fprintf(stderr,
"not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
3592 dfree = RANY(obj)->as.data.dfree;
3598 RB_DEBUG_COUNTER_INC(obj_data_xfree);
3600 else if (free_immediately) {
3602 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
3605 make_zombie(objspace, obj, dfree, data);
3606 RB_DEBUG_COUNTER_INC(obj_data_zombie);
3611 RB_DEBUG_COUNTER_INC(obj_data_empty);
3616 if (RANY(obj)->as.match.rmatch) {
3617 struct rmatch *rm = RANY(obj)->as.match.rmatch;
3618#if USE_DEBUG_COUNTER
3619 if (rm->
regs.num_regs >= 8) {
3620 RB_DEBUG_COUNTER_INC(obj_match_ge8);
3622 else if (rm->
regs.num_regs >= 4) {
3623 RB_DEBUG_COUNTER_INC(obj_match_ge4);
3625 else if (rm->
regs.num_regs >= 1) {
3626 RB_DEBUG_COUNTER_INC(obj_match_under4);
3629 onig_region_free(&rm->
regs, 0);
3634 RB_DEBUG_COUNTER_INC(obj_match_ptr);
3638 if (RANY(obj)->as.file.fptr) {
3639 make_io_zombie(objspace, obj);
3640 RB_DEBUG_COUNTER_INC(obj_file_ptr);
3645 RB_DEBUG_COUNTER_INC(obj_rational);
3648 RB_DEBUG_COUNTER_INC(obj_complex);
3654 if (RICLASS_OWNS_M_TBL_P(obj)) {
3656 rb_id_table_free(RCLASS_M_TBL(obj));
3658 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
3659 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
3661 rb_class_remove_subclass_head(obj);
3662 cc_table_free(objspace, obj, FALSE);
3663 rb_class_remove_from_module_subclasses(obj);
3664 rb_class_remove_from_super_subclasses(obj);
3665#if !RCLASS_EXT_EMBEDDED
3666 xfree(RCLASS_EXT(obj));
3669 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
3673 RB_DEBUG_COUNTER_INC(obj_float);
3677 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3678 xfree(BIGNUM_DIGITS(obj));
3679 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
3682 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
3687 UNEXPECTED_NODE(obj_free);
3691 if ((
RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
3692 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
3693 RB_DEBUG_COUNTER_INC(obj_struct_embed);
3695 else if (RSTRUCT_TRANSIENT_P(obj)) {
3696 RB_DEBUG_COUNTER_INC(obj_struct_transient);
3699 xfree((
void *)RANY(obj)->as.rstruct.as.heap.ptr);
3700 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
3706 rb_gc_free_dsymbol(obj);
3707 RB_DEBUG_COUNTER_INC(obj_symbol);
3712 switch (imemo_type(obj)) {
3714 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
3715 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
3718 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
3719 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
3722 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
3724 RB_DEBUG_COUNTER_INC(obj_imemo_env);
3727 xfree(RANY(obj)->as.imemo.alloc.ptr);
3728 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
3731 rb_ast_free(&RANY(obj)->as.imemo.ast);
3732 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
3735 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
3738 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
3740 case imemo_throw_data:
3741 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
3744 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
3747 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
3749 case imemo_parser_strterm:
3750 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
3752 case imemo_callinfo:
3753 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
3755 case imemo_callcache:
3756 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
3758 case imemo_constcache:
3759 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
3765 rb_bug(
"gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
3770 make_zombie(objspace, obj, 0, 0);
3779#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3780#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3783object_id_cmp(st_data_t x, st_data_t y)
3785 if (RB_BIGNUM_TYPE_P(x)) {
3786 return !rb_big_eql(x, y);
3794object_id_hash(st_data_t n)
3796 if (RB_BIGNUM_TYPE_P(n)) {
3800 return st_numhash(n);
3803static const struct st_hash_type object_id_hash_type = {
3813#if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
3815 heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
3818 objspace->next_object_id =
INT2FIX(OBJ_ID_INITIAL);
3819 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
3820 objspace->obj_to_id_tbl = st_init_numtable();
3822#if RGENGC_ESTIMATE_OLDMALLOC
3823 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
3826 heap_add_pages(objspace, &size_pools[0], SIZE_POOL_EDEN_HEAP(&size_pools[0]), gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
3829 for (
int i = 1; i < SIZE_POOL_COUNT; i++) {
3831 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
3832 size_pool->allocatable_pages = gc_params.heap_init_slots * multiple / HEAP_PAGE_OBJ_LIMIT;
3834 heap_pages_expand_sorted(objspace);
3836 init_mark_stack(&objspace->mark_stack);
3838 objspace->profile.invoke_time = getrusage_time();
3839 finalizer_table = st_init_numtable();
3847 gc_stress_set(objspace, ruby_initial_gc_stress);
3850typedef int each_obj_callback(
void *,
void *,
size_t,
void *);
3852static void objspace_each_objects(
rb_objspace_t *objspace, each_obj_callback *callback,
void *data,
bool protected);
3853static void objspace_reachable_objects_from_root(
rb_objspace_t *,
void (func)(
const char *,
VALUE,
void *),
void *);
3857 bool reenable_incremental;
3859 each_obj_callback *callback;
3862 struct heap_page **pages[SIZE_POOL_COUNT];
3863 size_t pages_counts[SIZE_POOL_COUNT];
3867objspace_each_objects_ensure(
VALUE arg)
3873 if (data->reenable_incremental) {
3874 objspace->flags.dont_incremental = FALSE;
3877 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3878 struct heap_page **pages = data->pages[i];
3890objspace_each_objects_try(
VALUE arg)
3896 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3898 size_t size = size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool)->total_pages,
sizeof(
struct heap_page *),
rb_eRuntimeError);
3900 struct heap_page **pages = malloc(size);
3901 if (!pages) rb_memerror();
3909 size_t pages_count = 0;
3910 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
3911 pages[pages_count] = page;
3914 data->pages[i] = pages;
3915 data->pages_counts[i] = pages_count;
3916 GC_ASSERT(pages_count == SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
3919 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3921 size_t pages_count = data->pages_counts[i];
3922 struct heap_page **pages = data->pages[i];
3924 struct heap_page *page = ccan_list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages,
struct heap_page, page_node);
3925 for (
size_t i = 0; i < pages_count; i++) {
3928 if (page == NULL)
break;
3932 if (pages[i] != page)
continue;
3934 uintptr_t pstart = (uintptr_t)page->start;
3935 uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
3937 if (!__asan_region_is_poisoned((
void *)pstart, pend - pstart) &&
3938 (*data->callback)((
void *)pstart, (
void *)pend, size_pool->slot_size, data->data)) {
3942 page = ccan_list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
3986rb_objspace_each_objects(each_obj_callback *callback,
void *data)
3988 objspace_each_objects(&
rb_objspace, callback, data, TRUE);
3992objspace_each_objects(
rb_objspace_t *objspace, each_obj_callback *callback,
void *data,
bool protected)
3995 bool reenable_incremental = FALSE;
3997 reenable_incremental = !objspace->flags.dont_incremental;
4000 objspace->flags.dont_incremental = TRUE;
4004 .objspace = objspace,
4005 .reenable_incremental = reenable_incremental,
4007 .callback = callback,
4011 .pages_counts = {0},
4018rb_objspace_each_objects_without_setup(each_obj_callback *callback,
void *data)
4020 objspace_each_objects(&
rb_objspace, callback, data, FALSE);
4029internal_object_p(
VALUE obj)
4032 void *ptr = asan_unpoison_object_temporary(obj);
4033 bool used_p = p->as.basic.
flags;
4038 UNEXPECTED_NODE(internal_object_p);
4047 if (!p->as.basic.
klass)
break;
4049 return rb_singleton_class_internal_p(obj);
4053 if (!p->as.basic.
klass)
break;
4057 if (ptr || ! used_p) {
4058 asan_poison_object(obj);
4064rb_objspace_internal_object_p(
VALUE obj)
4066 return internal_object_p(obj);
4070os_obj_of_i(
void *vstart,
void *vend,
size_t stride,
void *data)
4075 for (; v != (
VALUE)vend; v += stride) {
4076 if (!internal_object_p(v)) {
4096 rb_objspace_each_objects(os_obj_of_i, &oes);
4143 return os_obj_of(of);
4157 return rb_undefine_finalizer(obj);
4164 st_data_t data = obj;
4166 st_delete(finalizer_table, &data, 0);
4172should_be_callable(
VALUE block)
4181should_be_finalizable(
VALUE obj)
4258 should_be_finalizable(obj);
4263 should_be_callable(block);
4266 if (rb_callable_receiver(block) == obj) {
4267 rb_warn(
"finalizer references object to be finalized");
4270 return define_final0(obj, block);
4282 if (st_lookup(finalizer_table, obj, &data)) {
4283 table = (
VALUE)data;
4290 for (i = 0; i < len; i++) {
4299 rb_ary_push(table, block);
4303 RBASIC_CLEAR_CLASS(table);
4304 st_add_direct(finalizer_table, obj, table);
4315 should_be_finalizable(obj);
4316 should_be_callable(block);
4317 return define_final0(obj, block);
4328 if (st_lookup(finalizer_table, obj, &data)) {
4329 table = (
VALUE)data;
4330 st_insert(finalizer_table, dest, table);
4345 VALUE errinfo = ec->errinfo;
4346 rb_warn(
"Exception in finalizer %+"PRIsVALUE,
final);
4347 rb_ec_error_print(ec, errinfo);
4355 enum ruby_tag_type state;
4366#define RESTORE_FINALIZER() (\
4367 ec->cfp = saved.cfp, \
4368 ec->cfp->sp = saved.sp, \
4369 ec->errinfo = saved.errinfo)
4371 saved.errinfo = ec->errinfo;
4372 saved.objid = rb_obj_id(obj);
4373 saved.cfp = ec->cfp;
4374 saved.sp = ec->cfp->sp;
4379 state = EC_EXEC_TAG();
4380 if (state != TAG_NONE) {
4382 warn_exception_in_finalizer(ec, ATOMIC_VALUE_EXCHANGE(saved.final,
Qundef));
4384 for (i = saved.finished;
4386 saved.finished = ++i) {
4387 run_single_final(saved.final =
RARRAY_AREF(table, i), saved.objid);
4390#undef RESTORE_FINALIZER
4396 st_data_t key, table;
4398 if (RZOMBIE(zombie)->dfree) {
4399 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
4402 key = (st_data_t)zombie;
4403 if (st_delete(finalizer_table, &key, &table)) {
4404 run_finalizer(objspace, zombie, (
VALUE)table);
4414 asan_unpoison_object(zombie,
false);
4415 next_zombie = RZOMBIE(zombie)->next;
4416 page = GET_HEAP_PAGE(zombie);
4418 run_final(objspace, zombie);
4424 obj_free_object_id(objspace, zombie);
4427 GC_ASSERT(heap_pages_final_slots > 0);
4428 GC_ASSERT(page->final_slots > 0);
4430 heap_pages_final_slots--;
4431 page->final_slots--;
4433 heap_page_add_freeobj(objspace, page, zombie);
4434 objspace->profile.total_freed_objects++;
4438 zombie = next_zombie;
4446 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
4447 finalize_list(objspace, zombie);
4455 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
4456 finalize_deferred_heap_pages(objspace);
4457 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
4461gc_finalize_deferred(
void *dmy)
4464 if (ATOMIC_EXCHANGE(finalizing, 1))
return;
4466 finalize_deferred(objspace);
4467 ATOMIC_SET(finalizing, 0);
4474 rb_bug(
"gc_finalize_deferred_register: can't register finalizer.");
4485force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
4496bool rb_obj_is_main_ractor(
VALUE gv);
4503#if RGENGC_CHECK_MODE >= 2
4504 gc_verify_internal_consistency(objspace);
4508 if (ATOMIC_EXCHANGE(finalizing, 1))
return;
4511 finalize_deferred(objspace);
4512 GC_ASSERT(heap_pages_deferred_final == 0);
4516 objspace->flags.dont_incremental = 1;
4519 while (finalizer_table->num_entries) {
4521 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
4524 st_data_t obj = (st_data_t)curr->obj;
4525 run_finalizer(objspace, curr->obj, curr->table);
4526 st_delete(finalizer_table, &obj, 0);
4536 unsigned int lock_lev;
4537 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
4540 for (i = 0; i < heap_allocated_pages; i++) {
4541 struct heap_page *page = heap_pages_sorted[i];
4542 short stride = page->slot_size;
4544 uintptr_t p = (uintptr_t)page->start;
4545 uintptr_t pend = p + page->total_slots * stride;
4546 for (; p < pend; p += stride) {
4548 void *poisoned = asan_unpoison_object_temporary(vp);
4551 if (!
DATA_PTR(p) || !RANY(p)->as.data.dfree)
break;
4552 if (rb_obj_is_thread(vp))
break;
4553 if (rb_obj_is_mutex(vp))
break;
4554 if (rb_obj_is_fiber(vp))
break;
4555 if (rb_obj_is_main_ractor(vp))
break;
4557 RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
4559 RANY(p)->as.free.flags = 0;
4563 else if (RANY(p)->as.data.dfree) {
4564 make_zombie(objspace, vp, RANY(p)->as.data.dfree, RANY(p)->as.data.data);
4568 if (RANY(p)->as.file.fptr) {
4569 make_io_zombie(objspace, vp);
4577 asan_poison_object(vp);
4582 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
4584 finalize_deferred_heap_pages(objspace);
4586 st_free_table(finalizer_table);
4587 finalizer_table = 0;
4588 ATOMIC_SET(finalizing, 0);
4594 struct heap_page *page = GET_HEAP_PAGE(ptr);
4595 return page->flags.before_sweep ? FALSE : TRUE;
4602 if (!is_lazy_sweeping(objspace) ||
4603 is_swept_object(objspace, ptr) ||
4604 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
4625 if (!is_garbage_object(objspace, ptr)) {
4637 check_rvalue_consistency(obj);
4642rb_objspace_markable_object_p(
VALUE obj)
4645 return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
4649rb_objspace_garbage_object_p(
VALUE obj)
4652 return is_garbage_object(objspace, obj);
4659 if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
4685#if SIZEOF_LONG == SIZEOF_VOIDP
4686#define NUM2PTR(x) NUM2ULONG(x)
4687#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4688#define NUM2PTR(x) NUM2ULL(x)
4696 if (
FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
4697 ptr = NUM2PTR(objid);
4704 ptr = obj_id_to_ref(objid);
4705 if ((ptr %
sizeof(
RVALUE)) == (4 << 2)) {
4708 if (!rb_static_id_valid_p(symid))
4714 if (!UNDEF_P(orig = id2ref_obj_tbl(objspace, objid)) &&
4715 is_live_object(objspace, orig)) {
4721 rb_raise(
rb_eRangeError,
"%+"PRIsVALUE
" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
4725 if (rb_int_ge(objid, objspace->next_object_id)) {
4737 return id2ref(objid);
4747#if SIZEOF_LONG == SIZEOF_VOIDP
4757 return get_heap_object_id(obj);
4761cached_object_id(
VALUE obj)
4767 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &
id)) {
4773 id = objspace->next_object_id;
4774 objspace->next_object_id = rb_int_plus(
id,
INT2FIX(OBJ_ID_INCREMENT));
4776 VALUE already_disabled = rb_gc_disable_no_rest();
4777 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)
id);
4778 st_insert(objspace->id_to_obj_tbl, (st_data_t)
id, (st_data_t)obj);
4779 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
4788nonspecial_obj_id_(
VALUE obj)
4790 return nonspecial_obj_id(obj);
4797 return rb_find_object_id(obj, nonspecial_obj_id_);
4859 return rb_find_object_id(obj, cached_object_id);
4862static enum rb_id_table_iterator_result
4863cc_table_memsize_i(
VALUE ccs_ptr,
void *data_ptr)
4865 size_t *total_size = data_ptr;
4867 *total_size +=
sizeof(*ccs);
4868 *total_size +=
sizeof(ccs->entries[0]) * ccs->capa;
4869 return ID_TABLE_CONTINUE;
4875 size_t total = rb_id_table_memsize(cc_table);
4876 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
4881obj_memsize_of(
VALUE obj,
int use_all_types)
4890 size += rb_generic_ivar_memsize(obj);
4895 if (rb_shape_obj_too_complex(obj)) {
4896 size += rb_st_memsize(ROBJECT_IV_HASH(obj));
4898 else if (!(
RBASIC(obj)->flags & ROBJECT_EMBED)) {
4899 size += ROBJECT_IV_CAPACITY(obj) *
sizeof(
VALUE);
4904 if (RCLASS_EXT(obj)) {
4905 if (RCLASS_M_TBL(obj)) {
4906 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4909 size +=
SIZEOF_VALUE << bit_length(RCLASS_IV_COUNT(obj));
4910 if (RCLASS_CVC_TBL(obj)) {
4911 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
4913 if (RCLASS_EXT(obj)->const_tbl) {
4914 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
4916 if (RCLASS_CC_TBL(obj)) {
4917 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4919 if (
FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
4920 size += (RCLASS_SUPERCLASS_DEPTH(obj) + 1) *
sizeof(
VALUE);
4922#if SIZE_POOL_COUNT == 1
4928 if (RICLASS_OWNS_M_TBL_P(obj)) {
4929 if (RCLASS_M_TBL(obj)) {
4930 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4933 if (RCLASS_EXT(obj) && RCLASS_CC_TBL(obj)) {
4934 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4938 size += rb_str_memsize(obj);
4941 size += rb_ary_memsize(obj);
4944 if (RHASH_AR_TABLE_P(obj)) {
4945 if (RHASH_AR_TABLE(obj) != NULL) {
4946 size_t rb_hash_ar_table_size(
void);
4947 size += rb_hash_ar_table_size();
4951 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
4952 size += st_memsize(RHASH_ST_TABLE(obj));
4961 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
4966 size += onig_region_memsize(&rm->
regs);
4968 size +=
sizeof(
struct rmatch);
4972 if (
RFILE(obj)->fptr) {
4973 size += rb_io_memsize(
RFILE(obj)->fptr);
4980 size += imemo_memsize(obj);
4988 if (!(
RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
4989 size += BIGNUM_LEN(obj) *
sizeof(BDIGIT);
4994 UNEXPECTED_NODE(obj_memsize_of);
4998 if ((
RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
4999 RSTRUCT(obj)->as.heap.ptr) {
5000 size +=
sizeof(
VALUE) * RSTRUCT_LEN(obj);
5009 rb_bug(
"objspace/memsize_of(): unknown data type 0x%x(%p)",
5013 return size + rb_gc_obj_slot_size(obj);
5017rb_obj_memsize_of(
VALUE obj)
5019 return obj_memsize_of(obj, TRUE);
5023set_zero(st_data_t key, st_data_t val, st_data_t arg)
5027 rb_hash_aset(hash, k,
INT2FIX(0));
5032type_sym(
size_t type)
5035#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
5104count_objects(
int argc,
VALUE *argv,
VALUE os)
5119 for (i = 0; i <=
T_MASK; i++) {
5123 for (i = 0; i < heap_allocated_pages; i++) {
5124 struct heap_page *page = heap_pages_sorted[i];
5125 short stride = page->slot_size;
5127 uintptr_t p = (uintptr_t)page->start;
5128 uintptr_t pend = p + page->total_slots * stride;
5129 for (;p < pend; p += stride) {
5131 GC_ASSERT((NUM_IN_PAGE(vp) * BASE_SLOT_SIZE) % page->slot_size == 0);
5133 void *poisoned = asan_unpoison_object_temporary(vp);
5134 if (RANY(p)->as.basic.flags) {
5142 asan_poison_object(vp);
5145 total += page->total_slots;
5149 hash = rb_hash_new();
5152 rb_hash_stlike_foreach(hash, set_zero, hash);
5157 for (i = 0; i <=
T_MASK; i++) {
5175 size_t total_slots = 0;
5176 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5178 total_slots += SIZE_POOL_EDEN_HEAP(size_pool)->total_slots;
5179 total_slots += SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5187 return (objspace->total_allocated_objects - objspace->profile.total_freed_objects) - heap_pages_final_slots;
5193 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
5197gc_setup_mark_bits(
struct heap_page *page)
5200 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
5207enum {HEAP_PAGE_LOCK = PAGE_NOACCESS, HEAP_PAGE_UNLOCK = PAGE_READWRITE};
5213 return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
5216enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
5217#define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
5223 if (!protect_page_body(body, HEAP_PAGE_LOCK)) {
5224 rb_bug(
"Couldn't protect page %p, errno: %s", (
void *)body, strerror(errno));
5227 gc_report(5, objspace,
"Protecting page in move %p\n", (
void *)body);
5234 if (!protect_page_body(body, HEAP_PAGE_UNLOCK)) {
5235 rb_bug(
"Couldn't unprotect page %p, errno: %s", (
void *)body, strerror(errno));
5238 gc_report(5, objspace,
"Unprotecting page in move %p\n", (
void *)body);
5245 GC_ASSERT(gc_is_moveable_obj(objspace, src));
5247 struct heap_page *src_page = GET_HEAP_PAGE(src);
5255 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(src), src));
5257 asan_unlock_freelist(free_page);
5259 asan_lock_freelist(free_page);
5260 asan_unpoison_object(dest,
false);
5266 free_page->freelist = RANY(dest)->as.free.next;
5270 if (src_page->slot_size > free_page->slot_size) {
5271 objspace->rcompactor.moved_down_count_table[
BUILTIN_TYPE(src)]++;
5273 else if (free_page->slot_size > src_page->slot_size) {
5274 objspace->rcompactor.moved_up_count_table[
BUILTIN_TYPE(src)]++;
5276 objspace->rcompactor.moved_count_table[
BUILTIN_TYPE(src)]++;
5277 objspace->rcompactor.total_moved++;
5279 gc_move(objspace, src, dest, src_page->slot_size, free_page->slot_size);
5280 gc_pin(objspace, src);
5281 free_page->free_slots--;
5289 struct heap_page *cursor = heap->compact_cursor;
5292 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
5293 cursor = ccan_list_next(&heap->pages, cursor, page_node);
5300#ifndef GC_CAN_COMPILE_COMPACTION
5301#if defined(__wasi__)
5302# define GC_CAN_COMPILE_COMPACTION 0
5304# define GC_CAN_COMPILE_COMPACTION 1
5308#if defined(__MINGW32__) || defined(_WIN32)
5309# define GC_COMPACTION_SUPPORTED 1
5313# define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
5316#if GC_CAN_COMPILE_COMPACTION
5318read_barrier_handler(uintptr_t original_address)
5324 uintptr_t address = original_address - (original_address % BASE_SLOT_SIZE);
5326 obj = (
VALUE)address;
5332 if (page_body == NULL) {
5333 rb_bug(
"read_barrier_handler: segmentation fault at %p", (
void *)original_address);
5338 unlock_page_body(objspace, page_body);
5340 objspace->profile.read_barrier_faults++;
5342 invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
5348#if !GC_CAN_COMPILE_COMPACTION
5350uninstall_handlers(
void)
5356install_handlers(
void)
5360#elif defined(_WIN32)
5361static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
5362typedef void (*signal_handler)(int);
5363static signal_handler old_sigsegv_handler;
5366read_barrier_signal(EXCEPTION_POINTERS * info)
5369 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
5374 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
5375 return EXCEPTION_CONTINUE_EXECUTION;
5378 return EXCEPTION_CONTINUE_SEARCH;
5383uninstall_handlers(
void)
5385 signal(SIGSEGV, old_sigsegv_handler);
5386 SetUnhandledExceptionFilter(old_handler);
5390install_handlers(
void)
5393 old_sigsegv_handler = signal(SIGSEGV, NULL);
5396 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
5399static struct sigaction old_sigbus_handler;
5400static struct sigaction old_sigsegv_handler;
5402#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5403static exception_mask_t old_exception_masks[32];
5404static mach_port_t old_exception_ports[32];
5405static exception_behavior_t old_exception_behaviors[32];
5406static thread_state_flavor_t old_exception_flavors[32];
5407static mach_msg_type_number_t old_exception_count;
5410disable_mach_bad_access_exc(
void)
5412 old_exception_count =
sizeof(old_exception_masks) /
sizeof(old_exception_masks[0]);
5413 task_swap_exception_ports(
5414 mach_task_self(), EXC_MASK_BAD_ACCESS,
5415 MACH_PORT_NULL, EXCEPTION_DEFAULT, 0,
5416 old_exception_masks, &old_exception_count,
5417 old_exception_ports, old_exception_behaviors, old_exception_flavors
5422restore_mach_bad_access_exc(
void)
5424 for (mach_msg_type_number_t i = 0; i < old_exception_count; i++) {
5425 task_set_exception_ports(
5427 old_exception_masks[i], old_exception_ports[i],
5428 old_exception_behaviors[i], old_exception_flavors[i]
5435read_barrier_signal(
int sig, siginfo_t * info,
void * data)
5438 struct sigaction prev_sigbus, prev_sigsegv;
5439 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
5440 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
5443 sigset_t set, prev_set;
5445 sigaddset(&set, SIGBUS);
5446 sigaddset(&set, SIGSEGV);
5447 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
5448#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5449 disable_mach_bad_access_exc();
5452 read_barrier_handler((uintptr_t)info->si_addr);
5455#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5456 restore_mach_bad_access_exc();
5458 sigaction(SIGBUS, &prev_sigbus, NULL);
5459 sigaction(SIGSEGV, &prev_sigsegv, NULL);
5460 sigprocmask(SIG_SETMASK, &prev_set, NULL);
5464uninstall_handlers(
void)
5466#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5467 restore_mach_bad_access_exc();
5469 sigaction(SIGBUS, &old_sigbus_handler, NULL);
5470 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
5474install_handlers(
void)
5476 struct sigaction action;
5477 memset(&action, 0,
sizeof(
struct sigaction));
5478 sigemptyset(&action.sa_mask);
5479 action.sa_sigaction = read_barrier_signal;
5480 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
5482 sigaction(SIGBUS, &action, &old_sigbus_handler);
5483 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
5484#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5485 disable_mach_bad_access_exc();
5491revert_stack_objects(
VALUE stack_obj,
void *ctx)
5499 invalidate_moved_page(objspace, GET_HEAP_PAGE(stack_obj));
5506 if (is_pointer_to_heap(objspace, (
void *)v)) {
5511 invalidate_moved_page(objspace, GET_HEAP_PAGE(v));
5522 rb_vm_t *vm = rb_ec_vm_ptr(ec);
5523 rb_vm_each_stack_value(vm, revert_stack_objects, (
void*)objspace);
5524 each_machine_stack_value(ec, revert_machine_stack_references);
5527static void gc_mode_transition(
rb_objspace_t *objspace,
enum gc_mode mode);
5532 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5534 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5535 gc_unprotect_pages(objspace, heap);
5538 uninstall_handlers();
5545 check_stack_for_moved(objspace);
5547 gc_update_references(objspace);
5548 objspace->profile.compact_count++;
5550 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5552 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5553 heap->compact_cursor = NULL;
5554 heap->free_pages = NULL;
5555 heap->compact_cursor_index = 0;
5558 if (gc_prof_enabled(objspace)) {
5560 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
5562 objspace->flags.during_compacting = FALSE;
5575 struct heap_page * sweep_page = ctx->page;
5576 short slot_size = sweep_page->slot_size;
5577 short slot_bits = slot_size / BASE_SLOT_SIZE;
5578 GC_ASSERT(slot_bits > 0);
5582 GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
5584 asan_unpoison_object(vp,
false);
5588 gc_report(2, objspace,
"page_sweep: free %p\n", (
void *)p);
5589#if RGENGC_CHECK_MODE
5590 if (!is_full_marking(objspace)) {
5591 if (RVALUE_OLD_P(vp))
rb_bug(
"page_sweep: %p - old while minor GC.", (
void *)p);
5592 if (rgengc_remembered_sweep(objspace, vp))
rb_bug(
"page_sweep: %p - remembered.", (
void *)p);
5595 if (obj_free(objspace, vp)) {
5598 (void)VALGRIND_MAKE_MEM_UNDEFINED((
void*)p, BASE_SLOT_SIZE);
5599 heap_page_add_freeobj(objspace, sweep_page, vp);
5600 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
5609 if (objspace->flags.during_compacting) {
5615 rb_bug(
"T_MOVED shouldn't be seen until compaction is finished\n");
5617 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
5619 heap_page_add_freeobj(objspace, sweep_page, vp);
5630 bitset >>= slot_bits;
5637 struct heap_page *sweep_page = ctx->page;
5638 GC_ASSERT(SIZE_POOL_EDEN_HEAP(sweep_page->size_pool) == heap);
5641 bits_t *bits, bitset;
5643 gc_report(2, objspace,
"page_sweep: start.\n");
5645#if RGENGC_CHECK_MODE
5646 if (!objspace->flags.immediate_sweep) {
5647 GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
5650 sweep_page->flags.before_sweep = FALSE;
5651 sweep_page->free_slots = 0;
5653 p = (uintptr_t)sweep_page->start;
5654 bits = sweep_page->mark_bits;
5656 int page_rvalue_count = sweep_page->total_slots * (sweep_page->slot_size / BASE_SLOT_SIZE);
5657 int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
5658 if (out_of_range_bits != 0) {
5659 bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
5665 int bitmap_plane_count = CEILDIV(NUM_IN_PAGE(p) + page_rvalue_count, BITS_BITLENGTH);
5666 GC_ASSERT(bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT - 1 ||
5667 bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT);
5671 bitset >>= NUM_IN_PAGE(p);
5673 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5675 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5677 for (
int i = 1; i < bitmap_plane_count; i++) {
5680 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5682 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5685 if (!heap->compact_cursor) {
5686 gc_setup_mark_bits(sweep_page);
5689#if GC_PROFILE_MORE_DETAIL
5690 if (gc_prof_enabled(objspace)) {
5692 record->removing_objects += ctx->final_slots + ctx->freed_slots;
5693 record->empty_objects += ctx->empty_slots;
5696 if (0) fprintf(stderr,
"gc_sweep_page(%"PRIdSIZE
"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5698 sweep_page->total_slots,
5699 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
5701 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
5702 objspace->profile.total_freed_objects += ctx->freed_slots;
5704 if (heap_pages_deferred_final && !finalizing) {
5707 gc_finalize_deferred_register(objspace);
5711#if RGENGC_CHECK_MODE
5712 short freelist_len = 0;
5713 asan_unlock_freelist(sweep_page);
5714 RVALUE *ptr = sweep_page->freelist;
5717 ptr = ptr->as.free.next;
5719 asan_lock_freelist(sweep_page);
5720 if (freelist_len != sweep_page->free_slots) {
5721 rb_bug(
"inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
5725 gc_report(2, objspace,
"page_sweep: end.\n");
5733 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5734 if (!heap->free_pages && heap_increment(objspace, size_pool, heap) == FALSE) {
5736 size_pool_allocatable_pages_set(objspace, size_pool, 1);
5737 if (!heap_increment(objspace, size_pool, heap)) {
5746gc_mode_name(
enum gc_mode mode)
5749 case gc_mode_none:
return "none";
5750 case gc_mode_marking:
return "marking";
5751 case gc_mode_sweeping:
return "sweeping";
5752 case gc_mode_compacting:
return "compacting";
5753 default:
rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
5758gc_mode_transition(
rb_objspace_t *objspace,
enum gc_mode mode)
5760#if RGENGC_CHECK_MODE
5761 enum gc_mode prev_mode = gc_mode(objspace);
5762 switch (prev_mode) {
5763 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking);
break;
5764 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping);
break;
5765 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting);
break;
5766 case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none);
break;
5769 if (0) fprintf(stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
5770 gc_mode_set(objspace, mode);
5777 asan_unlock_freelist(page);
5778 if (page->freelist) {
5779 RVALUE *p = page->freelist;
5780 asan_unpoison_object((
VALUE)p,
false);
5781 while (p->as.free.next) {
5783 p = p->as.free.next;
5784 asan_poison_object((
VALUE)prev);
5785 asan_unpoison_object((
VALUE)p,
false);
5787 p->as.free.next = freelist;
5788 asan_poison_object((
VALUE)p);
5791 page->freelist = freelist;
5793 asan_lock_freelist(page);
5800 heap->sweeping_page = ccan_list_top(&heap->pages,
struct heap_page, page_node);
5801 heap->free_pages = NULL;
5802#if GC_ENABLE_INCREMENTAL_MARK
5803 heap->pooled_pages = NULL;
5805 if (!objspace->flags.immediate_sweep) {
5808 ccan_list_for_each(&heap->pages, page, page_node) {
5809 page->flags.before_sweep = TRUE;
5814#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5820 gc_mode_transition(objspace, gc_mode_sweeping);
5822#if GC_ENABLE_INCREMENTAL_MARK
5823 objspace->rincgc.pooled_slots = 0;
5826 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5828 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5830 gc_sweep_start_heap(objspace, heap);
5834 if (heap->sweeping_page == NULL) {
5835 GC_ASSERT(heap->total_pages == 0);
5836 GC_ASSERT(heap->total_slots == 0);
5837 gc_sweep_finish_size_pool(objspace, size_pool);
5843 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5844 rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
5852 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5853 size_t total_slots = heap->total_slots + SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5854 size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5855 size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
5857 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5864 while ((swept_slots < min_free_slots || swept_slots < gc_params.heap_init_slots) &&
5865 (resurrected_page = heap_page_resurrect(objspace, size_pool))) {
5866 swept_slots += resurrected_page->free_slots;
5868 heap_add_page(objspace, size_pool, heap, resurrected_page);
5869 heap_add_freepage(heap, resurrected_page);
5874 if (min_free_slots < gc_params.heap_init_slots && swept_slots < gc_params.heap_init_slots) {
5875 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
5876 size_t extra_slots = gc_params.heap_init_slots - swept_slots;
5877 size_t extend_page_count = CEILDIV(extra_slots * multiple, HEAP_PAGE_OBJ_LIMIT);
5878 if (extend_page_count > size_pool->allocatable_pages) {
5879 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5883 if (swept_slots < min_free_slots) {
5884 bool grow_heap = is_full_marking(objspace);
5886 if (!is_full_marking(objspace)) {
5889 bool is_growth_heap = (size_pool->empty_slots == 0 ||
5890 size_pool->freed_slots > size_pool->empty_slots) &&
5891 size_pool->allocatable_pages == 0;
5893 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5896 else if (is_growth_heap) {
5897 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
5898 size_pool->force_major_gc_count++;
5903 size_t extend_page_count = heap_extend_pages(objspace, size_pool, swept_slots, total_slots, total_pages);
5905 if (extend_page_count > size_pool->allocatable_pages) {
5906 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5916 gc_report(1, objspace,
"gc_sweep_finish\n");
5918 gc_prof_set_heap_info(objspace);
5919 heap_pages_free_unused_pages(objspace);
5921 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5925 size_t tomb_pages = SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5926 if (size_pool->allocatable_pages < tomb_pages) {
5927 size_pool->allocatable_pages = tomb_pages;
5931 size_pool->freed_slots = 0;
5932 size_pool->empty_slots = 0;
5934#if GC_ENABLE_INCREMENTAL_MARK
5935 if (!will_be_incremental_marking(objspace)) {
5936 rb_heap_t *eden_heap = SIZE_POOL_EDEN_HEAP(size_pool);
5937 struct heap_page *end_page = eden_heap->free_pages;
5939 while (end_page->free_next) end_page = end_page->free_next;
5940 end_page->free_next = eden_heap->pooled_pages;
5943 eden_heap->free_pages = eden_heap->pooled_pages;
5945 eden_heap->pooled_pages = NULL;
5946 objspace->rincgc.pooled_slots = 0;
5951 heap_pages_expand_sorted(objspace);
5954 gc_mode_transition(objspace, gc_mode_none);
5960 struct heap_page *sweep_page = heap->sweeping_page;
5961 int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
5963#if GC_ENABLE_INCREMENTAL_MARK
5964 int swept_slots = 0;
5966 bool need_pool = TRUE;
5968 int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
5971 gc_report(2, objspace,
"gc_sweep_step (need_pool: %d)\n", need_pool);
5973 gc_report(2, objspace,
"gc_sweep_step\n");
5976 if (sweep_page == NULL)
return FALSE;
5978#if GC_ENABLE_LAZY_SWEEP
5979 gc_prof_sweep_timer_start(objspace);
5983 RUBY_DEBUG_LOG(
"sweep_page:%p", (
void *)sweep_page);
5991 gc_sweep_page(objspace, heap, &ctx);
5992 int free_slots = ctx.freed_slots + ctx.empty_slots;
5994 heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
5996 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
5997 heap_pages_freeable_pages > 0 &&
5999 heap_pages_freeable_pages--;
6002 heap_unlink_page(objspace, heap, sweep_page);
6003 heap_add_page(objspace, size_pool, SIZE_POOL_TOMB_HEAP(size_pool), sweep_page);
6005 else if (free_slots > 0) {
6007 size_pool->freed_slots += ctx.freed_slots;
6008 size_pool->empty_slots += ctx.empty_slots;
6011#if GC_ENABLE_INCREMENTAL_MARK
6013 heap_add_poolpage(objspace, heap, sweep_page);
6017 heap_add_freepage(heap, sweep_page);
6018 swept_slots += free_slots;
6019 if (swept_slots > GC_INCREMENTAL_SWEEP_SLOT_COUNT) {
6024 heap_add_freepage(heap, sweep_page);
6029 sweep_page->free_next = NULL;
6031 }
while ((sweep_page = heap->sweeping_page));
6033 if (!heap->sweeping_page) {
6035 gc_sweep_finish_size_pool(objspace, size_pool);
6038 if (!has_sweeping_pages(objspace)) {
6039 gc_sweep_finish(objspace);
6043#if GC_ENABLE_LAZY_SWEEP
6044 gc_prof_sweep_timer_stop(objspace);
6047 return heap->free_pages != NULL;
6053 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6056 while (SIZE_POOL_EDEN_HEAP(size_pool)->sweeping_page) {
6057 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6065 GC_ASSERT(dont_gc_val() == FALSE);
6066 if (!GC_ENABLE_LAZY_SWEEP)
return;
6068 unsigned int lock_lev;
6069 gc_enter(objspace, gc_enter_event_sweep_continue, &lock_lev);
6071 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6073 if (!gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool))) {
6076 if (size_pool == sweep_size_pool) {
6077 if (size_pool->allocatable_pages > 0) {
6078 heap_increment(objspace, size_pool, heap);
6082 gc_sweep_rest(objspace);
6090 gc_exit(objspace, gc_enter_event_sweep_continue, &lock_lev);
6103 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
6104 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6106 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
6108 object = rb_gc_location(forwarding_object);
6110 shape_id_t original_shape_id = 0;
6112 original_shape_id = RMOVED(forwarding_object)->original_shape_id;
6115 gc_move(objspace,
object, forwarding_object, GET_HEAP_PAGE(
object)->slot_size, page->slot_size);
6119 if (original_shape_id) {
6120 ROBJECT_SET_SHAPE_ID(forwarding_object, original_shape_id);
6123 struct heap_page *orig_page = GET_HEAP_PAGE(
object);
6124 orig_page->free_slots++;
6125 heap_page_add_freeobj(objspace, orig_page,
object);
6127 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6132 p += BASE_SLOT_SIZE;
6142 bits_t *mark_bits, *pin_bits;
6145 mark_bits = page->mark_bits;
6146 pin_bits = page->pinned_bits;
6148 uintptr_t p = page->start;
6151 bitset = pin_bits[0] & ~mark_bits[0];
6152 bitset >>= NUM_IN_PAGE(p);
6153 invalidate_moved_plane(objspace, page, p, bitset);
6154 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
6156 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
6159 bitset = pin_bits[i] & ~mark_bits[i];
6161 invalidate_moved_plane(objspace, page, p, bitset);
6162 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
6170 gc_mode_transition(objspace, gc_mode_compacting);
6172 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6173 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
6174 ccan_list_for_each(&heap->pages, page, page_node) {
6175 page->flags.before_sweep = TRUE;
6178 heap->compact_cursor = ccan_list_tail(&heap->pages,
struct heap_page, page_node);
6179 heap->compact_cursor_index = 0;
6182 if (gc_prof_enabled(objspace)) {
6184 record->moved_objects = objspace->rcompactor.total_moved;
6187 memset(objspace->rcompactor.considered_count_table, 0,
T_MASK *
sizeof(
size_t));
6188 memset(objspace->rcompactor.moved_count_table, 0,
T_MASK *
sizeof(
size_t));
6189 memset(objspace->rcompactor.moved_up_count_table, 0,
T_MASK *
sizeof(
size_t));
6190 memset(objspace->rcompactor.moved_down_count_table, 0,
T_MASK *
sizeof(
size_t));
6201 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
6203 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
6205 gc_sweep_start(objspace);
6206 if (objspace->flags.during_compacting) {
6207 gc_sweep_compact(objspace);
6210 if (immediate_sweep) {
6211#if !GC_ENABLE_LAZY_SWEEP
6212 gc_prof_sweep_timer_start(objspace);
6214 gc_sweep_rest(objspace);
6215#if !GC_ENABLE_LAZY_SWEEP
6216 gc_prof_sweep_timer_stop(objspace);
6222 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6224 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6230 gc_heap_prepare_minimum_pages(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6237stack_chunk_alloc(
void)
6251 return stack->chunk == NULL;
6257 size_t size = stack->index;
6258 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
6261 size += stack->limit;
6262 chunk = chunk->next;
6270 chunk->next = stack->cache;
6271 stack->cache = chunk;
6272 stack->cache_size++;
6280 if (stack->unused_cache_size > (stack->cache_size/2)) {
6281 chunk = stack->cache;
6282 stack->cache = stack->cache->next;
6283 stack->cache_size--;
6286 stack->unused_cache_size = stack->cache_size;
6294 GC_ASSERT(stack->index == stack->limit);
6296 if (stack->cache_size > 0) {
6297 next = stack->cache;
6298 stack->cache = stack->cache->next;
6299 stack->cache_size--;
6300 if (stack->unused_cache_size > stack->cache_size)
6301 stack->unused_cache_size = stack->cache_size;
6304 next = stack_chunk_alloc();
6306 next->next = stack->chunk;
6307 stack->chunk = next;
6316 prev = stack->chunk->next;
6317 GC_ASSERT(stack->index == 0);
6318 add_stack_chunk_cache(stack, stack->chunk);
6319 stack->chunk = prev;
6320 stack->index = stack->limit;
6328 while (chunk != NULL) {
6338 mark_stack_chunk_list_free(stack->chunk);
6344 mark_stack_chunk_list_free(stack->cache);
6345 stack->cache_size = 0;
6346 stack->unused_cache_size = 0;
6374 if (stack->index == stack->limit) {
6375 push_mark_stack_chunk(stack);
6377 stack->chunk->data[stack->index++] = data;
6387 rb_bug(
"push_mark_stack() called for broken object");
6391 UNEXPECTED_NODE(push_mark_stack);
6395 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
6397 is_pointer_to_heap(&
rb_objspace, (
void *)data) ?
"corrupted object" :
"non object");
6403 if (is_mark_stack_empty(stack)) {
6406 if (stack->index == 1) {
6407 *data = stack->chunk->data[--stack->index];
6408 pop_mark_stack_chunk(stack);
6411 *data = stack->chunk->data[--stack->index];
6422 stack->index = stack->limit = STACK_CHUNK_SIZE;
6424 for (i=0; i < 4; i++) {
6425 add_stack_chunk_cache(stack, stack_chunk_alloc());
6427 stack->unused_cache_size = stack->cache_size;
6432#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6434#define STACK_START (ec->machine.stack_start)
6435#define STACK_END (ec->machine.stack_end)
6436#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6438#if STACK_GROW_DIRECTION < 0
6439# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6440#elif STACK_GROW_DIRECTION > 0
6441# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6443# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6444 : (size_t)(STACK_END - STACK_START + 1))
6446#if !STACK_GROW_DIRECTION
6447int ruby_stack_grow_direction;
6449ruby_get_stack_grow_direction(
volatile VALUE *addr)
6452 SET_MACHINE_STACK_END(&end);
6454 if (end > addr)
return ruby_stack_grow_direction = 1;
6455 return ruby_stack_grow_direction = -1;
6464 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
6465 return STACK_LENGTH;
6468#define PREVENT_STACK_OVERFLOW 1
6469#ifndef PREVENT_STACK_OVERFLOW
6470#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6471# define PREVENT_STACK_OVERFLOW 1
6473# define PREVENT_STACK_OVERFLOW 0
6476#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6482 size_t length = STACK_LENGTH;
6483 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
6485 return length > maximum_length;
6488#define stack_check(ec, water_mark) FALSE
6491#define STACKFRAME_FOR_CALL_CFUNC 2048
6493MJIT_FUNC_EXPORTED
int
6496 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
6502 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
6522 if (end <= start)
return;
6524 each_location(objspace, start, n, cb);
6530 gc_mark_locations(&
rb_objspace, start, end, gc_mark_maybe);
6538 for (i=0; i<n; i++) {
6539 gc_mark(objspace, values[i]);
6544rb_gc_mark_values(
long n,
const VALUE *values)
6549 for (i=0; i<n; i++) {
6550 gc_mark_and_pin(objspace, values[i]);
6559 for (i=0; i<n; i++) {
6560 if (is_markable_object(objspace, values[i])) {
6561 gc_mark_and_pin(objspace, values[i]);
6567rb_gc_mark_vm_stack_values(
long n,
const VALUE *values)
6570 gc_mark_stack_values(objspace, n, values);
6574mark_value(st_data_t key, st_data_t value, st_data_t data)
6577 gc_mark(objspace, (
VALUE)value);
6582mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
6585 gc_mark_and_pin(objspace, (
VALUE)value);
6592 if (!tbl || tbl->num_entries == 0)
return;
6593 st_foreach(tbl, mark_value, (st_data_t)objspace);
6599 if (!tbl || tbl->num_entries == 0)
return;
6600 st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
6604mark_key(st_data_t key, st_data_t value, st_data_t data)
6607 gc_mark_and_pin(objspace, (
VALUE)key);
6615 st_foreach(tbl, mark_key, (st_data_t)objspace);
6619pin_value(st_data_t key, st_data_t value, st_data_t data)
6622 gc_mark_and_pin(objspace, (
VALUE)value);
6630 st_foreach(tbl, pin_value, (st_data_t)objspace);
6640mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
6644 gc_mark(objspace, (
VALUE)key);
6645 gc_mark(objspace, (
VALUE)value);
6650pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
6654 gc_mark_and_pin(objspace, (
VALUE)key);
6655 gc_mark_and_pin(objspace, (
VALUE)value);
6660pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
6664 gc_mark_and_pin(objspace, (
VALUE)key);
6665 gc_mark(objspace, (
VALUE)value);
6672 if (rb_hash_compare_by_id_p(hash)) {
6673 rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
6676 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
6679 if (RHASH_AR_TABLE_P(hash)) {
6680 if (LIKELY(during_gc) && RHASH_TRANSIENT_P(hash)) {
6681 rb_transient_heap_mark(hash, RHASH_AR_TABLE(hash));
6685 VM_ASSERT(!RHASH_TRANSIENT_P(hash));
6687 gc_mark(objspace, RHASH(hash)->ifnone);
6694 st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
6708 gc_mark(objspace, me->owner);
6709 gc_mark(objspace, me->defined_class);
6712 switch (def->type) {
6713 case VM_METHOD_TYPE_ISEQ:
6715 gc_mark(objspace, (
VALUE)def->body.iseq.
cref);
6717 if (def->iseq_overload && me->defined_class) {
6720 gc_mark_and_pin(objspace, (
VALUE)me);
6723 case VM_METHOD_TYPE_ATTRSET:
6724 case VM_METHOD_TYPE_IVAR:
6725 gc_mark(objspace, def->body.attr.location);
6727 case VM_METHOD_TYPE_BMETHOD:
6728 gc_mark(objspace, def->body.bmethod.proc);
6729 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
6731 case VM_METHOD_TYPE_ALIAS:
6732 gc_mark(objspace, (
VALUE)def->body.alias.original_me);
6734 case VM_METHOD_TYPE_REFINED:
6735 gc_mark(objspace, (
VALUE)def->body.refined.orig_me);
6736 gc_mark(objspace, (
VALUE)def->body.refined.owner);
6738 case VM_METHOD_TYPE_CFUNC:
6739 case VM_METHOD_TYPE_ZSUPER:
6740 case VM_METHOD_TYPE_MISSING:
6741 case VM_METHOD_TYPE_OPTIMIZED:
6742 case VM_METHOD_TYPE_UNDEF:
6743 case VM_METHOD_TYPE_NOTIMPLEMENTED:
6749static enum rb_id_table_iterator_result
6750mark_method_entry_i(
VALUE me,
void *data)
6754 gc_mark(objspace, me);
6755 return ID_TABLE_CONTINUE;
6762 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
6766static enum rb_id_table_iterator_result
6767mark_const_entry_i(
VALUE value,
void *data)
6772 gc_mark(objspace, ce->value);
6773 gc_mark(objspace, ce->file);
6774 return ID_TABLE_CONTINUE;
6781 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
6784#if STACK_GROW_DIRECTION < 0
6785#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6786#elif STACK_GROW_DIRECTION > 0
6787#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6789#define GET_STACK_BOUNDS(start, end, appendix) \
6790 ((STACK_END < STACK_START) ? \
6791 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6797#if defined(__wasm__)
6800static VALUE *rb_stack_range_tmp[2];
6803rb_mark_locations(
void *begin,
void *end)
6805 rb_stack_range_tmp[0] = begin;
6806 rb_stack_range_tmp[1] = end;
6809# if defined(__EMSCRIPTEN__)
6814 emscripten_scan_stack(rb_mark_locations);
6815 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6817 emscripten_scan_registers(rb_mark_locations);
6818 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6825 VALUE *stack_start, *stack_end;
6827 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6828 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6830 rb_wasm_scan_locals(rb_mark_locations);
6831 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6843 VALUE v[
sizeof(rb_jmp_buf) / (
sizeof(
VALUE))];
6844 } save_regs_gc_mark;
6845 VALUE *stack_start, *stack_end;
6847 FLUSH_REGISTER_WINDOWS;
6848 memset(&save_regs_gc_mark, 0,
sizeof(save_regs_gc_mark));
6850 rb_setjmp(save_regs_gc_mark.j);
6856 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6858 each_location(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v), gc_mark_maybe);
6860 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6868 VALUE *stack_start, *stack_end;
6870 GET_STACK_BOUNDS(stack_start, stack_end, 0);
6871 each_stack_location(objspace, ec, stack_start, stack_end, cb);
6877 each_machine_stack_value(ec, gc_mark_maybe);
6885 gc_mark_locations(objspace, stack_start, stack_end, cb);
6887#if defined(__mc68000__)
6888 gc_mark_locations(objspace,
6889 (
VALUE*)((
char*)stack_start + 2),
6890 (
VALUE*)((
char*)stack_end - 2), cb);
6909 (void)VALGRIND_MAKE_MEM_DEFINED(&obj,
sizeof(obj));
6911 if (is_pointer_to_heap(objspace, (
void *)obj)) {
6912 void *ptr = asan_unpoison_object_temporary(obj);
6920 gc_mark_and_pin(objspace, obj);
6926 asan_poison_object(obj);
6940 ASSERT_vm_locking();
6941 if (RVALUE_MARKED(obj))
return 0;
6942 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6949 struct heap_page *page = GET_HEAP_PAGE(obj);
6950 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
6952 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
6953 page->flags.has_uncollectible_shady_objects = TRUE;
6954 MARK_IN_BITMAP(uncollectible_bits, obj);
6955 objspace->rgengc.uncollectible_wb_unprotected_objects++;
6957#if RGENGC_PROFILE > 0
6958 objspace->profile.total_remembered_shady_object_count++;
6959#if RGENGC_PROFILE >= 2
6960 objspace->profile.remembered_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
6973 const VALUE old_parent = objspace->rgengc.parent_object;
6976 if (RVALUE_WB_UNPROTECTED(obj)) {
6977 if (gc_remember_unprotected(objspace, obj)) {
6978 gc_report(2, objspace,
"relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6982 if (!RVALUE_OLD_P(obj)) {
6983 if (RVALUE_MARKED(obj)) {
6985 gc_report(2, objspace,
"relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6986 RVALUE_AGE_SET_OLD(objspace, obj);
6987 if (is_incremental_marking(objspace)) {
6988 if (!RVALUE_MARKING(obj)) {
6989 gc_grey(objspace, obj);
6993 rgengc_remember(objspace, obj);
6997 gc_report(2, objspace,
"relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6998 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
7004 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
7010#if RGENGC_CHECK_MODE
7011 if (RVALUE_MARKED(obj) == FALSE)
rb_bug(
"gc_grey: %s is not marked.", obj_info(obj));
7012 if (RVALUE_MARKING(obj) == TRUE)
rb_bug(
"gc_grey: %s is marking/remembered.", obj_info(obj));
7015#if GC_ENABLE_INCREMENTAL_MARK
7016 if (is_incremental_marking(objspace)) {
7017 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7021 push_mark_stack(&objspace->mark_stack, obj);
7027 struct heap_page *page = GET_HEAP_PAGE(obj);
7029 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
7030 check_rvalue_consistency(obj);
7032 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
7033 if (!RVALUE_OLD_P(obj)) {
7034 gc_report(3, objspace,
"gc_aging: YOUNG: %s\n", obj_info(obj));
7035 RVALUE_AGE_INC(objspace, obj);
7037 else if (is_full_marking(objspace)) {
7038 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
7039 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
7042 check_rvalue_consistency(obj);
7044 objspace->marked_slots++;
7048static void reachable_objects_from_callback(
VALUE obj);
7053 if (LIKELY(during_gc)) {
7054 rgengc_check_relation(objspace, obj);
7055 if (!gc_mark_set(objspace, obj))
return;
7058 if (objspace->rgengc.parent_object) {
7059 RUBY_DEBUG_LOG(
"%p (%s) parent:%p (%s)",
7060 (
void *)obj, obj_type_name(obj),
7061 (
void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
7064 RUBY_DEBUG_LOG(
"%p (%s)", (
void *)obj, obj_type_name(obj));
7070 rb_bug(
"try to mark T_NONE object");
7072 gc_aging(objspace, obj);
7073 gc_grey(objspace, obj);
7076 reachable_objects_from_callback(obj);
7083 GC_ASSERT(is_markable_object(objspace, obj));
7084 if (UNLIKELY(objspace->flags.during_compacting)) {
7085 if (LIKELY(during_gc)) {
7086 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
7094 if (!is_markable_object(objspace, obj))
return;
7095 gc_pin(objspace, obj);
7096 gc_mark_ptr(objspace, obj);
7102 if (!is_markable_object(objspace, obj))
return;
7103 gc_mark_ptr(objspace, obj);
7123rb_objspace_marked_object_p(
VALUE obj)
7125 return RVALUE_MARKED(obj) ? TRUE : FALSE;
7131 if (RVALUE_OLD_P(obj)) {
7132 objspace->rgengc.parent_object = obj;
7135 objspace->rgengc.parent_object =
Qfalse;
7142 switch (imemo_type(obj)) {
7147 if (LIKELY(env->ep)) {
7149 GC_ASSERT(env->ep[VM_ENV_DATA_INDEX_ENV] == obj);
7150 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
7151 gc_mark_values(objspace, (
long)env->env_size, env->env);
7152 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
7153 gc_mark(objspace, (
VALUE)rb_vm_env_prev_env(env));
7154 gc_mark(objspace, (
VALUE)env->iseq);
7159 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
7160 gc_mark(objspace, (
VALUE)RANY(obj)->as.imemo.cref.next);
7161 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
7164 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
7165 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
7166 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
7167 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
7169 case imemo_throw_data:
7170 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
7173 gc_mark_maybe(objspace, (
VALUE)RANY(obj)->as.imemo.ifunc.data);
7176 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
7177 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
7178 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
7181 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
7190 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
7191 }
while ((m = m->next) != NULL);
7195 rb_ast_mark(&RANY(obj)->as.imemo.ast);
7197 case imemo_parser_strterm:
7198 rb_strterm_mark(obj);
7200 case imemo_callinfo:
7202 case imemo_callcache:
7206 gc_mark(objspace, (
VALUE)vm_cc_cme(cc));
7209 case imemo_constcache:
7212 gc_mark(objspace, ice->value);
7215#if VM_CHECK_MODE > 0
7217 VM_UNREACHABLE(gc_mark_imemo);
7227 register RVALUE *any = RANY(obj);
7228 gc_mark_set_parent(objspace, obj);
7231 rb_mark_generic_ivar(obj);
7244 rb_bug(
"rb_gc_mark() called for broken object");
7248 UNEXPECTED_NODE(rb_gc_mark);
7252 gc_mark_imemo(objspace, obj);
7259 gc_mark(objspace, any->as.basic.
klass);
7267 if (!RCLASS_EXT(obj))
break;
7269 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7270 mark_cvc_tbl(objspace, obj);
7271 cc_table_mark(objspace, obj);
7272 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
7273 gc_mark(objspace, RCLASS_IVPTR(obj)[i]);
7275 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
7279 if (RICLASS_OWNS_M_TBL_P(obj)) {
7280 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7285 if (!RCLASS_EXT(obj))
break;
7287 if (RCLASS_INCLUDER(obj)) {
7288 gc_mark(objspace, RCLASS_INCLUDER(obj));
7290 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
7291 cc_table_mark(objspace, obj);
7295 if (ARY_SHARED_P(obj)) {
7296 VALUE root = ARY_SHARED_ROOT(obj);
7297 gc_mark(objspace, root);
7302 for (i=0; i < len; i++) {
7303 gc_mark(objspace, ptr[i]);
7306 if (LIKELY(during_gc)) {
7308 rb_transient_heap_mark(obj, ptr);
7315 mark_hash(objspace, obj);
7319 if (STR_SHARED_P(obj)) {
7331 if (mark_func) (*mark_func)(ptr);
7338 rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
7339 if (rb_shape_obj_too_complex(obj)) {
7340 mark_tbl_no_pin(objspace, ROBJECT_IV_HASH(obj));
7345 uint32_t i, len = ROBJECT_IV_COUNT(obj);
7346 for (i = 0; i < len; i++) {
7347 gc_mark(objspace, ptr[i]);
7350 if (LIKELY(during_gc) &&
7351 ROBJ_TRANSIENT_P(obj)) {
7352 rb_transient_heap_mark(obj, ptr);
7359 uint32_t num_of_ivs = shape->next_iv_index;
7360 if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
7361 RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
7368 if (any->as.file.
fptr) {
7369 gc_mark(objspace, any->as.file.
fptr->
self);
7370 gc_mark(objspace, any->as.file.
fptr->
pathv);
7381 gc_mark(objspace, any->as.regexp.
src);
7385 gc_mark(objspace, any->as.match.
regexp);
7386 if (any->as.match.
str) {
7387 gc_mark(objspace, any->as.match.
str);
7392 gc_mark(objspace, any->as.rational.num);
7393 gc_mark(objspace, any->as.rational.den);
7397 gc_mark(objspace, any->as.complex.real);
7398 gc_mark(objspace, any->as.complex.imag);
7404 const long len = RSTRUCT_LEN(obj);
7405 const VALUE *
const ptr = RSTRUCT_CONST_PTR(obj);
7407 for (i=0; i<len; i++) {
7408 gc_mark(objspace, ptr[i]);
7411 if (LIKELY(during_gc) &&
7412 RSTRUCT_TRANSIENT_P(obj)) {
7413 rb_transient_heap_mark(obj, ptr);
7420 rb_gcdebug_print_obj_condition((
VALUE)obj);
7425 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
7427 is_pointer_to_heap(objspace, any) ?
"corrupted object" :
"non object");
7436gc_mark_stacked_objects(
rb_objspace_t *objspace,
int incremental,
size_t count)
7440#if GC_ENABLE_INCREMENTAL_MARK
7441 size_t marked_slots_at_the_beginning = objspace->marked_slots;
7442 size_t popped_count = 0;
7445 while (pop_mark_stack(mstack, &obj)) {
7446 if (UNDEF_P(obj))
continue;
7448 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
7449 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
7451 gc_mark_children(objspace, obj);
7453#if GC_ENABLE_INCREMENTAL_MARK
7455 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
7456 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
7458 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7461 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
7471 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7473 if (is_mark_stack_empty(mstack)) {
7474 shrink_stack_chunk_cache(mstack);
7483gc_mark_stacked_objects_incremental(
rb_objspace_t *objspace,
size_t count)
7485 return gc_mark_stacked_objects(objspace, TRUE, count);
7491 return gc_mark_stacked_objects(objspace, FALSE, 0);
7495#define MAX_TICKS 0x100
7496static tick_t mark_ticks[MAX_TICKS];
7497static const char *mark_ticks_categories[MAX_TICKS];
7500show_mark_ticks(
void)
7503 fprintf(stderr,
"mark ticks result:\n");
7504 for (i=0; i<MAX_TICKS; i++) {
7505 const char *category = mark_ticks_categories[i];
7507 fprintf(stderr,
"%s\t%8lu\n", category, (
unsigned long)mark_ticks[i]);
7518gc_mark_roots(
rb_objspace_t *objspace,
const char **categoryp)
7522 rb_vm_t *vm = rb_ec_vm_ptr(ec);
7525 tick_t start_tick = tick();
7527 const char *prev_category = 0;
7529 if (mark_ticks_categories[0] == 0) {
7530 atexit(show_mark_ticks);
7534 if (categoryp) *categoryp =
"xxx";
7536 objspace->rgengc.parent_object =
Qfalse;
7539#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7540 if (prev_category) { \
7541 tick_t t = tick(); \
7542 mark_ticks[tick_count] = t - start_tick; \
7543 mark_ticks_categories[tick_count] = prev_category; \
7546 prev_category = category; \
7547 start_tick = tick(); \
7550#define MARK_CHECKPOINT_PRINT_TICK(category)
7553#define MARK_CHECKPOINT(category) do { \
7554 if (categoryp) *categoryp = category; \
7555 MARK_CHECKPOINT_PRINT_TICK(category); \
7558 MARK_CHECKPOINT(
"vm");
7561 if (vm->self) gc_mark(objspace, vm->self);
7563 MARK_CHECKPOINT(
"finalizers");
7564 mark_finalizer_tbl(objspace, finalizer_table);
7566 MARK_CHECKPOINT(
"machine_context");
7567 mark_current_machine_context(objspace, ec);
7570 MARK_CHECKPOINT(
"global_list");
7571 for (list = global_list; list; list = list->next) {
7572 gc_mark_maybe(objspace, *list->varptr);
7575 MARK_CHECKPOINT(
"end_proc");
7578 MARK_CHECKPOINT(
"global_tbl");
7579 rb_gc_mark_global_tbl();
7581 MARK_CHECKPOINT(
"object_id");
7582 rb_gc_mark(objspace->next_object_id);
7583 mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl);
7585 if (stress_to_class) rb_gc_mark(stress_to_class);
7587 MARK_CHECKPOINT(
"finish");
7588#undef MARK_CHECKPOINT
7591#if RGENGC_CHECK_MODE >= 4
7593#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7594#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7595#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7603static struct reflist *
7604reflist_create(
VALUE obj)
7606 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
7609 refs->list[0] = obj;
7615reflist_destruct(
struct reflist *refs)
7622reflist_add(
struct reflist *refs,
VALUE obj)
7624 if (refs->pos == refs->size) {
7626 SIZED_REALLOC_N(refs->list,
VALUE, refs->size, refs->size/2);
7629 refs->list[refs->pos++] = obj;
7633reflist_dump(
struct reflist *refs)
7636 for (i=0; i<refs->pos; i++) {
7637 VALUE obj = refs->list[i];
7638 if (IS_ROOTSIG(obj)) {
7639 fprintf(stderr,
"<root@%s>", GET_ROOTSIG(obj));
7642 fprintf(stderr,
"<%s>", obj_info(obj));
7644 if (i+1 < refs->pos) fprintf(stderr,
", ");
7649reflist_referred_from_machine_context(
struct reflist *refs)
7652 for (i=0; i<refs->pos; i++) {
7653 VALUE obj = refs->list[i];
7654 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj),
"machine_context") == 0)
return 1;
7669 const char *category;
7675allrefs_add(
struct allrefs *data,
VALUE obj)
7677 struct reflist *refs;
7680 if (st_lookup(data->references, obj, &r)) {
7681 refs = (
struct reflist *)r;
7682 reflist_add(refs, data->root_obj);
7686 refs = reflist_create(data->root_obj);
7687 st_insert(data->references, obj, (st_data_t)refs);
7693allrefs_i(
VALUE obj,
void *ptr)
7695 struct allrefs *data = (
struct allrefs *)ptr;
7697 if (allrefs_add(data, obj)) {
7698 push_mark_stack(&data->mark_stack, obj);
7703allrefs_roots_i(
VALUE obj,
void *ptr)
7705 struct allrefs *data = (
struct allrefs *)ptr;
7706 if (strlen(data->category) == 0)
rb_bug(
"!!!");
7707 data->root_obj = MAKE_ROOTSIG(data->category);
7709 if (allrefs_add(data, obj)) {
7710 push_mark_stack(&data->mark_stack, obj);
7713#define PUSH_MARK_FUNC_DATA(v) do { \
7714 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7715 GET_RACTOR()->mfd = (v);
7717#define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7722 struct allrefs data;
7723 struct gc_mark_func_data_struct mfd;
7725 int prev_dont_gc = dont_gc_val();
7728 data.objspace = objspace;
7729 data.references = st_init_numtable();
7730 init_mark_stack(&data.mark_stack);
7732 mfd.mark_func = allrefs_roots_i;
7736 PUSH_MARK_FUNC_DATA(&mfd);
7737 GET_RACTOR()->mfd = &mfd;
7738 gc_mark_roots(objspace, &data.category);
7739 POP_MARK_FUNC_DATA();
7742 while (pop_mark_stack(&data.mark_stack, &obj)) {
7743 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
7745 free_stack_chunks(&data.mark_stack);
7747 dont_gc_set(prev_dont_gc);
7748 return data.references;
7752objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
7754 struct reflist *refs = (
struct reflist *)value;
7755 reflist_destruct(refs);
7760objspace_allrefs_destruct(
struct st_table *refs)
7762 st_foreach(refs, objspace_allrefs_destruct_i, 0);
7763 st_free_table(refs);
7766#if RGENGC_CHECK_MODE >= 5
7768allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
7771 struct reflist *refs = (
struct reflist *)v;
7772 fprintf(stderr,
"[allrefs_dump_i] %s <- ", obj_info(obj));
7774 fprintf(stderr,
"\n");
7781 VALUE size = objspace->rgengc.allrefs_table->num_entries;
7782 fprintf(stderr,
"[all refs] (size: %"PRIuVALUE
")\n", size);
7783 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
7788gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
7791 struct reflist *refs = (
struct reflist *)v;
7795 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
7796 fprintf(stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
7797 fprintf(stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)obj);
7800 if (reflist_referred_from_machine_context(refs)) {
7801 fprintf(stderr,
" (marked from machine stack).\n");
7805 objspace->rgengc.error_count++;
7806 fprintf(stderr,
"\n");
7813gc_marks_check(
rb_objspace_t *objspace, st_foreach_callback_func *checker_func,
const char *checker_name)
7815 size_t saved_malloc_increase = objspace->malloc_params.increase;
7816#if RGENGC_ESTIMATE_OLDMALLOC
7817 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
7819 VALUE already_disabled = rb_objspace_gc_disable(objspace);
7821 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
7824 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
7827 if (objspace->rgengc.error_count > 0) {
7828#if RGENGC_CHECK_MODE >= 5
7829 allrefs_dump(objspace);
7831 if (checker_name)
rb_bug(
"%s: GC has problem.", checker_name);
7834 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
7835 objspace->rgengc.allrefs_table = 0;
7837 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
7838 objspace->malloc_params.increase = saved_malloc_increase;
7839#if RGENGC_ESTIMATE_OLDMALLOC
7840 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
7848 size_t live_object_count;
7849 size_t zombie_object_count;
7852 size_t old_object_count;
7853 size_t remembered_shady_count;
7857check_generation_i(
const VALUE child,
void *ptr)
7860 const VALUE parent = data->parent;
7862 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
7864 if (!RVALUE_OLD_P(child)) {
7865 if (!RVALUE_REMEMBERED(parent) &&
7866 !RVALUE_REMEMBERED(child) &&
7867 !RVALUE_UNCOLLECTIBLE(child)) {
7868 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
7875check_color_i(
const VALUE child,
void *ptr)
7878 const VALUE parent = data->parent;
7880 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
7881 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7882 obj_info(parent), obj_info(child));
7888check_children_i(
const VALUE child,
void *ptr)
7891 if (check_rvalue_consistency_force(child, FALSE) != 0) {
7892 fprintf(stderr,
"check_children_i: %s has error (referenced from %s)",
7893 obj_info(child), obj_info(data->parent));
7894 rb_print_backtrace();
7901verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
7907 for (obj = (
VALUE)page_start; obj != (
VALUE)page_end; obj += stride) {
7908 void *poisoned = asan_unpoison_object_temporary(obj);
7910 if (is_live_object(objspace, obj)) {
7912 data->live_object_count++;
7917 if (!gc_object_moved_p(objspace, obj)) {
7919 rb_objspace_reachable_objects_from(obj, check_children_i, (
void *)data);
7923 if (RVALUE_OLD_P(obj)) data->old_object_count++;
7924 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
7926 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
7929 rb_objspace_reachable_objects_from(obj, check_generation_i, (
void *)data);
7932 if (is_incremental_marking(objspace)) {
7933 if (RVALUE_BLACK_P(obj)) {
7936 rb_objspace_reachable_objects_from(obj, check_color_i, (
void *)data);
7943 data->zombie_object_count++;
7948 asan_poison_object(obj);
7958 unsigned int has_remembered_shady = FALSE;
7959 unsigned int has_remembered_old = FALSE;
7960 int remembered_old_objects = 0;
7961 int free_objects = 0;
7962 int zombie_objects = 0;
7964 short slot_size = page->slot_size;
7965 uintptr_t start = (uintptr_t)page->start;
7966 uintptr_t end = start + page->total_slots * slot_size;
7968 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
7970 void *poisoned = asan_unpoison_object_temporary(val);
7975 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
7976 has_remembered_shady = TRUE;
7978 if (RVALUE_PAGE_MARKING(page, val)) {
7979 has_remembered_old = TRUE;
7980 remembered_old_objects++;
7985 asan_poison_object(val);
7989 if (!is_incremental_marking(objspace) &&
7990 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
7992 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
7994 if (RVALUE_PAGE_MARKING(page, val)) {
7995 fprintf(stderr,
"marking -> %s\n", obj_info(val));
7998 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
7999 (
void *)page, remembered_old_objects, obj ? obj_info(obj) :
"");
8002 if (page->flags.has_uncollectible_shady_objects == FALSE && has_remembered_shady == TRUE) {
8003 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
8004 (
void *)page, obj ? obj_info(obj) :
"");
8009 if (page->free_slots != free_objects) {
8010 rb_bug(
"page %p's free_slots should be %d, but %d\n", (
void *)page, page->free_slots, free_objects);
8013 if (page->final_slots != zombie_objects) {
8014 rb_bug(
"page %p's final_slots should be %d, but %d\n", (
void *)page, page->final_slots, zombie_objects);
8017 return remembered_old_objects;
8021gc_verify_heap_pages_(
rb_objspace_t *objspace,
struct ccan_list_head *head)
8023 int remembered_old_objects = 0;
8026 ccan_list_for_each(head, page, page_node) {
8027 asan_unlock_freelist(page);
8028 RVALUE *p = page->freelist;
8032 asan_unpoison_object(vp,
false);
8034 fprintf(stderr,
"freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
8036 p = p->as.free.next;
8037 asan_poison_object(prev);
8039 asan_lock_freelist(page);
8041 if (page->flags.has_remembered_objects == FALSE) {
8042 remembered_old_objects += gc_verify_heap_page(objspace, page,
Qfalse);
8046 return remembered_old_objects;
8052 int remembered_old_objects = 0;
8053 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8054 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages));
8055 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages));
8057 return remembered_old_objects;
8071gc_verify_internal_consistency_m(
VALUE dummy)
8082 data.objspace = objspace;
8083 gc_report(5, objspace,
"gc_verify_internal_consistency: start\n");
8086 for (
size_t i = 0; i < heap_allocated_pages; i++) {
8087 struct heap_page *page = heap_pages_sorted[i];
8088 short slot_size = page->slot_size;
8090 uintptr_t start = (uintptr_t)page->start;
8091 uintptr_t end = start + page->total_slots * slot_size;
8093 verify_internal_consistency_i((
void *)start, (
void *)end, slot_size, &data);
8096 if (data.err_count != 0) {
8097#if RGENGC_CHECK_MODE >= 5
8098 objspace->rgengc.error_count = data.err_count;
8099 gc_marks_check(objspace, NULL, NULL);
8100 allrefs_dump(objspace);
8102 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
8106 gc_verify_heap_pages(objspace);
8110 if (!is_lazy_sweeping(objspace) &&
8112 ruby_single_main_ractor != NULL) {
8113 if (objspace_live_slots(objspace) != data.live_object_count) {
8114 fprintf(stderr,
"heap_pages_final_slots: %"PRIdSIZE
", "
8115 "objspace->profile.total_freed_objects: %"PRIdSIZE
"\n",
8116 heap_pages_final_slots, objspace->profile.total_freed_objects);
8117 rb_bug(
"inconsistent live slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
8118 objspace_live_slots(objspace), data.live_object_count);
8122 if (!is_marking(objspace)) {
8123 if (objspace->rgengc.old_objects != data.old_object_count) {
8124 rb_bug(
"inconsistent old slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
8125 objspace->rgengc.old_objects, data.old_object_count);
8127 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
8128 rb_bug(
"inconsistent number of wb unprotected objects: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
8129 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
8134 size_t list_count = 0;
8137 VALUE z = heap_pages_deferred_final;
8140 z = RZOMBIE(z)->next;
8144 if (heap_pages_final_slots != data.zombie_object_count ||
8145 heap_pages_final_slots != list_count) {
8147 rb_bug(
"inconsistent finalizing object count:\n"
8148 " expect %"PRIuSIZE
"\n"
8149 " but %"PRIuSIZE
" zombies\n"
8150 " heap_pages_deferred_final list has %"PRIuSIZE
" items.",
8151 heap_pages_final_slots,
8152 data.zombie_object_count,
8157 gc_report(5, objspace,
"gc_verify_internal_consistency: OK\n");
8167 unsigned int prev_during_gc = during_gc;
8170 gc_verify_internal_consistency_(objspace);
8172 during_gc = prev_during_gc;
8178rb_gc_verify_internal_consistency(
void)
8184gc_verify_transient_heap_internal_consistency(
VALUE dmy)
8186 rb_transient_heap_verify();
8190#if GC_ENABLE_INCREMENTAL_MARK
8192heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
8194 if (heap->pooled_pages) {
8195 if (heap->free_pages) {
8196 struct heap_page *free_pages_tail = heap->free_pages;
8197 while (free_pages_tail->free_next) {
8198 free_pages_tail = free_pages_tail->free_next;
8200 free_pages_tail->free_next = heap->pooled_pages;
8203 heap->free_pages = heap->pooled_pages;
8206 heap->pooled_pages = NULL;
8217 gc_report(1, objspace,
"gc_marks_start: (%s)\n", full_mark ?
"full" :
"minor");
8218 gc_mode_transition(objspace, gc_mode_marking);
8221#if GC_ENABLE_INCREMENTAL_MARK
8222 size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
8223 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
8225 if (0) fprintf(stderr,
"objspace->marked_slots: %"PRIdSIZE
", "
8226 "objspace->rincgc.pooled_page_num: %"PRIdSIZE
", "
8227 "objspace->rincgc.step_slots: %"PRIdSIZE
", \n",
8228 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
8230 objspace->flags.during_minor_gc = FALSE;
8231 if (ruby_enable_autocompact) {
8232 objspace->flags.during_compacting |= TRUE;
8234 objspace->profile.major_gc_count++;
8235 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
8236 objspace->rgengc.old_objects = 0;
8237 objspace->rgengc.last_major_gc = objspace->profile.count;
8238 objspace->marked_slots = 0;
8240 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8242 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8243 rgengc_mark_and_rememberset_clear(objspace, heap);
8244 heap_move_pooled_pages_to_free_pages(heap);
8248 objspace->flags.during_minor_gc = TRUE;
8249 objspace->marked_slots =
8250 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects;
8251 objspace->profile.minor_gc_count++;
8253 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8254 rgengc_rememberset_mark(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8258 gc_mark_roots(objspace, NULL);
8260 gc_report(1, objspace,
"gc_marks_start: (%s) end, stack in %"PRIdSIZE
"\n",
8261 full_mark ?
"full" :
"minor", mark_stack_size(&objspace->
mark_stack));
8264#if GC_ENABLE_INCREMENTAL_MARK
8266gc_marks_wb_unprotected_objects_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bits)
8271 gc_report(2, objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((
VALUE)p));
8272 GC_ASSERT(RVALUE_WB_UNPROTECTED((
VALUE)p));
8273 GC_ASSERT(RVALUE_MARKED((
VALUE)p));
8274 gc_mark_children(objspace, (
VALUE)p);
8276 p += BASE_SLOT_SIZE;
8287 ccan_list_for_each(&heap->pages, page, page_node) {
8288 bits_t *mark_bits = page->mark_bits;
8289 bits_t *wbun_bits = page->wb_unprotected_bits;
8290 uintptr_t p = page->start;
8293 bits_t bits = mark_bits[0] & wbun_bits[0];
8294 bits >>= NUM_IN_PAGE(p);
8295 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8296 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8298 for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8299 bits_t bits = mark_bits[j] & wbun_bits[j];
8301 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8302 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8306 gc_mark_stacked_objects_all(objspace);
8313#if GC_ENABLE_INCREMENTAL_MARK
8315 if (is_incremental_marking(objspace)) {
8316 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
8317 rb_bug(
"gc_marks_finish: mark stack is not empty (%"PRIdSIZE
").",
8318 mark_stack_size(&objspace->mark_stack));
8321 gc_mark_roots(objspace, 0);
8322 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) ==
false);
8324#if RGENGC_CHECK_MODE >= 2
8325 if (gc_verify_heap_pages(objspace) != 0) {
8326 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
8330 objspace->flags.during_incremental_marking = FALSE;
8332 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8333 gc_marks_wb_unprotected_objects(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8338#if RGENGC_CHECK_MODE >= 2
8339 gc_verify_internal_consistency(objspace);
8342 if (is_full_marking(objspace)) {
8344 const double r = gc_params.oldobject_limit_factor;
8345 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
8346 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8349#if RGENGC_CHECK_MODE >= 4
8351 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
8357 size_t total_slots = heap_allocatable_slots(objspace) + heap_eden_total_slots(objspace);
8358 size_t sweep_slots = total_slots - objspace->marked_slots;
8359 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
8360 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
8361 int full_marking = is_full_marking(objspace);
8362 const int r_cnt = GET_VM()->ractor.cnt;
8363 const int r_mul = r_cnt > 8 ? 8 : r_cnt;
8365 GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
8368 if (max_free_slots < gc_params.heap_init_slots * r_mul) {
8369 max_free_slots = gc_params.heap_init_slots * r_mul;
8372 if (sweep_slots > max_free_slots) {
8373 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
8376 heap_pages_freeable_pages = 0;
8380 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
8381 min_free_slots = gc_params.heap_free_slots * r_mul;
8384 if (sweep_slots < min_free_slots) {
8385 if (!full_marking) {
8386 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
8387 full_marking = TRUE;
8392 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
8393 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
8400 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
8402 size_pool_allocatable_pages_set(objspace, size_pool, heap_extend_pages(objspace, size_pool, sweep_slots, total_slots, heap_allocated_pages + heap_allocatable_pages(objspace)));
8404 heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
8411 const double r = gc_params.oldobject_limit_factor;
8412 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
8413 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8416 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
8417 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
8419 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
8420 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
8422 if (RGENGC_FORCE_MAJOR_GC) {
8423 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
8426 gc_report(1, objspace,
"gc_marks_finish (marks %"PRIdSIZE
" objects, "
8427 "old %"PRIdSIZE
" objects, total %"PRIdSIZE
" slots, "
8428 "sweep %"PRIdSIZE
" slots, increment: %"PRIdSIZE
", next GC: %s)\n",
8429 objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
8430 objspace->rgengc.need_major_gc ?
"major" :
"minor");
8433 rb_transient_heap_finish_marking();
8434 rb_ractor_finish_marking();
8439#if GC_ENABLE_INCREMENTAL_MARK
8443 GC_ASSERT(is_marking(objspace));
8445 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
8446 gc_marks_finish(objspace);
8449 if (0) fprintf(stderr,
"objspace->marked_slots: %"PRIdSIZE
"\n", objspace->marked_slots);
8454gc_compact_heap_cursors_met_p(
rb_heap_t *heap)
8456 return heap->sweeping_page == heap->compact_cursor;
8467 obj_size = rb_ary_size_as_embedded(src);
8471 if (rb_shape_obj_too_complex(src)) {
8472 return &size_pools[0];
8475 obj_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(src));
8480 obj_size = rb_str_size_as_embedded(src);
8487 if (rb_gc_size_allocatable_p(obj_size)){
8488 idx = size_pool_idx_for_size(obj_size);
8490 return &size_pools[idx];
8497 GC_ASSERT(gc_is_moveable_obj(objspace, src));
8499 rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, size_pool, src);
8500 rb_heap_t *dheap = SIZE_POOL_EDEN_HEAP(dest_pool);
8504 if (gc_compact_heap_cursors_met_p(dheap)) {
8505 return dheap != heap;
8509 orig_shape = rb_shape_get_shape(src);
8510 if (dheap != heap && !rb_shape_obj_too_complex(src)) {
8511 rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)((dest_pool - size_pools) + SIZE_POOL_COUNT));
8512 new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
8515 dest_pool = size_pool;
8521 while (!try_move(objspace, dheap, dheap->free_pages, src)) {
8523 .page = dheap->sweeping_page,
8532 lock_page_body(objspace, GET_PAGE_BODY(src));
8533 gc_sweep_page(objspace, dheap, &ctx);
8534 unlock_page_body(objspace, GET_PAGE_BODY(src));
8536 if (dheap->sweeping_page->free_slots > 0) {
8537 heap_add_freepage(dheap, dheap->sweeping_page);
8540 dheap->sweeping_page = ccan_list_next(&dheap->pages, dheap->sweeping_page, page_node);
8541 if (gc_compact_heap_cursors_met_p(dheap)) {
8542 return dheap != heap;
8548 VALUE dest = rb_gc_location(src);
8549 rb_shape_set_shape(dest, new_shape);
8551 RMOVED(src)->original_shape_id = rb_shape_id(orig_shape);
8560 short slot_size = page->slot_size;
8561 short slot_bits = slot_size / BASE_SLOT_SIZE;
8562 GC_ASSERT(slot_bits > 0);
8566 GC_ASSERT(vp %
sizeof(
RVALUE) == 0);
8569 objspace->rcompactor.considered_count_table[
BUILTIN_TYPE(vp)]++;
8571 if (gc_is_moveable_obj(objspace, vp)) {
8572 if (!gc_compact_move(objspace, heap, size_pool, vp)) {
8579 bitset >>= slot_bits;
8589 GC_ASSERT(page == heap->compact_cursor);
8591 bits_t *mark_bits, *pin_bits;
8593 uintptr_t p = page->start;
8595 mark_bits = page->mark_bits;
8596 pin_bits = page->pinned_bits;
8599 bitset = (mark_bits[0] & ~pin_bits[0]);
8600 bitset >>= NUM_IN_PAGE(p);
8602 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8605 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8607 for (
int j = 1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8608 bitset = (mark_bits[j] & ~pin_bits[j]);
8610 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8613 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8622 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8624 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8626 if (heap->total_pages > 0 &&
8627 !gc_compact_heap_cursors_met_p(heap)) {
8638 gc_compact_start(objspace);
8639#if RGENGC_CHECK_MODE >= 2
8640 gc_verify_internal_consistency(objspace);
8643 while (!gc_compact_all_compacted_p(objspace)) {
8644 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8646 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8648 if (gc_compact_heap_cursors_met_p(heap)) {
8652 struct heap_page *start_page = heap->compact_cursor;
8654 if (!gc_compact_page(objspace, size_pool, heap, start_page)) {
8655 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8662 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8663 heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
8667 gc_compact_finish(objspace);
8669#if RGENGC_CHECK_MODE >= 2
8670 gc_verify_internal_consistency(objspace);
8677 gc_report(1, objspace,
"gc_marks_rest\n");
8679#if GC_ENABLE_INCREMENTAL_MARK
8680 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8681 SIZE_POOL_EDEN_HEAP(&size_pools[i])->pooled_pages = NULL;
8685 if (is_incremental_marking(objspace)) {
8686 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
8689 gc_mark_stacked_objects_all(objspace);
8692 gc_marks_finish(objspace);
8701 GC_ASSERT(dont_gc_val() == FALSE);
8702#if GC_ENABLE_INCREMENTAL_MARK
8704 unsigned int lock_lev;
8705 gc_enter(objspace, gc_enter_event_mark_continue, &lock_lev);
8707 if (heap->free_pages) {
8708 gc_report(2, objspace,
"gc_marks_continue: has pooled pages");
8709 gc_marks_step(objspace, objspace->rincgc.step_slots);
8712 gc_report(2, objspace,
"gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE
").\n",
8713 mark_stack_size(&objspace->mark_stack));
8714 gc_marks_rest(objspace);
8717 gc_exit(objspace, gc_enter_event_mark_continue, &lock_lev);
8724 gc_prof_mark_timer_start(objspace);
8728 gc_marks_start(objspace, full_mark);
8729 if (!is_incremental_marking(objspace)) {
8730 gc_marks_rest(objspace);
8733#if RGENGC_PROFILE > 0
8734 if (gc_prof_record(objspace)) {
8736 record->old_objects = objspace->rgengc.old_objects;
8739 gc_prof_mark_timer_stop(objspace);
8745gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...)
8747 if (level <= RGENGC_DEBUG) {
8751 const char *status =
" ";
8754 status = is_full_marking(objspace) ?
"+" :
"-";
8757 if (is_lazy_sweeping(objspace)) {
8760 if (is_incremental_marking(objspace)) {
8765 va_start(args, fmt);
8766 vsnprintf(buf, 1024, fmt, args);
8769 fprintf(out,
"%s|", status);
8779 return RVALUE_REMEMBERED(obj);
8785 struct heap_page *page = GET_HEAP_PAGE(obj);
8786 bits_t *bits = &page->marking_bits[0];
8788 GC_ASSERT(!is_incremental_marking(objspace));
8790 if (MARKED_IN_BITMAP(bits, obj)) {
8794 page->flags.has_remembered_objects = TRUE;
8795 MARK_IN_BITMAP(bits, obj);
8806 gc_report(6, objspace,
"rgengc_remember: %s %s\n", obj_info(obj),
8807 rgengc_remembersetbits_get(objspace, obj) ?
"was already remembered" :
"is remembered now");
8809 check_rvalue_consistency(obj);
8811 if (RGENGC_CHECK_MODE) {
8812 if (RVALUE_WB_UNPROTECTED(obj))
rb_bug(
"rgengc_remember: %s is not wb protected.", obj_info(obj));
8815#if RGENGC_PROFILE > 0
8816 if (!rgengc_remembered(objspace, obj)) {
8817 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8818 objspace->profile.total_remembered_normal_object_count++;
8819#if RGENGC_PROFILE >= 2
8820 objspace->profile.remembered_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
8826 return rgengc_remembersetbits_set(objspace, obj);
8832 int result = rgengc_remembersetbits_get(objspace, obj);
8833 check_rvalue_consistency(obj);
8840 gc_report(6, objspace,
"rgengc_remembered: %s\n", obj_info(obj));
8841 return rgengc_remembered_sweep(objspace, obj);
8844#ifndef PROFILE_REMEMBERSET_MARK
8845#define PROFILE_REMEMBERSET_MARK 0
8849rgengc_rememberset_mark_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
8855 gc_report(2, objspace,
"rgengc_rememberset_mark: mark %s\n", obj_info(obj));
8856 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
8857 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
8859 gc_mark_children(objspace, obj);
8861 p += BASE_SLOT_SIZE;
8872#if PROFILE_REMEMBERSET_MARK
8873 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
8875 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
8877 ccan_list_for_each(&heap->pages, page, page_node) {
8878 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
8879 uintptr_t p = page->start;
8880 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
8881 bits_t *marking_bits = page->marking_bits;
8882 bits_t *uncollectible_bits = page->uncollectible_bits;
8883 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
8884#if PROFILE_REMEMBERSET_MARK
8885 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_shady_objects) has_both++;
8886 else if (page->flags.has_remembered_objects) has_old++;
8887 else if (page->flags.has_uncollectible_shady_objects) has_shady++;
8889 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8890 bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
8891 marking_bits[j] = 0;
8893 page->flags.has_remembered_objects = FALSE;
8896 bitset >>= NUM_IN_PAGE(p);
8897 rgengc_rememberset_mark_plane(objspace, p, bitset);
8898 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8900 for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8902 rgengc_rememberset_mark_plane(objspace, p, bitset);
8903 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8906#if PROFILE_REMEMBERSET_MARK
8913#if PROFILE_REMEMBERSET_MARK
8914 fprintf(stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
8916 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
8924 ccan_list_for_each(&heap->pages, page, page_node) {
8925 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8926 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8927 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8928 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8929 page->flags.has_uncollectible_shady_objects = FALSE;
8930 page->flags.has_remembered_objects = FALSE;
8941 if (RGENGC_CHECK_MODE) {
8942 if (!RVALUE_OLD_P(a))
rb_bug(
"gc_writebarrier_generational: %s is not an old object.", obj_info(a));
8943 if ( RVALUE_OLD_P(b))
rb_bug(
"gc_writebarrier_generational: %s is an old object.", obj_info(b));
8944 if (is_incremental_marking(objspace))
rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
8949 if (!rgengc_remembered(objspace, a)) {
8950 RB_VM_LOCK_ENTER_NO_BARRIER();
8952 rgengc_remember(objspace, a);
8954 RB_VM_LOCK_LEAVE_NO_BARRIER();
8955 gc_report(1, objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
8959 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(b), b);
8960 if (RVALUE_WB_UNPROTECTED(b)) {
8961 gc_remember_unprotected(objspace, b);
8964 RVALUE_AGE_SET_OLD(objspace, b);
8965 rgengc_remember(objspace, b);
8968 gc_report(1, objspace,
"gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
8971 check_rvalue_consistency(a);
8972 check_rvalue_consistency(b);
8975#if GC_ENABLE_INCREMENTAL_MARK
8979 gc_mark_set_parent(objspace, parent);
8980 rgengc_check_relation(objspace, obj);
8981 if (gc_mark_set(objspace, obj) == FALSE)
return;
8982 gc_aging(objspace, obj);
8983 gc_grey(objspace, obj);
8991 gc_report(2, objspace,
"gc_writebarrier_incremental: [LG] %p -> %s\n", (
void *)a, obj_info(b));
8993 if (RVALUE_BLACK_P(a)) {
8994 if (RVALUE_WHITE_P(b)) {
8995 if (!RVALUE_WB_UNPROTECTED(a)) {
8996 gc_report(2, objspace,
"gc_writebarrier_incremental: [IN] %p -> %s\n", (
void *)a, obj_info(b));
8997 gc_mark_from(objspace, b, a);
9000 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
9001 if (!RVALUE_WB_UNPROTECTED(b)) {
9002 gc_report(1, objspace,
"gc_writebarrier_incremental: [GN] %p -> %s\n", (
void *)a, obj_info(b));
9003 RVALUE_AGE_SET_OLD(objspace, b);
9005 if (RVALUE_BLACK_P(b)) {
9006 gc_grey(objspace, b);
9010 gc_report(1, objspace,
"gc_writebarrier_incremental: [LL] %p -> %s\n", (
void *)a, obj_info(b));
9011 gc_remember_unprotected(objspace, b);
9015 if (UNLIKELY(objspace->flags.during_compacting)) {
9016 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
9021#define gc_writebarrier_incremental(a, b, objspace)
9029 if (RGENGC_CHECK_MODE) {
9035 if (!is_incremental_marking(objspace)) {
9036 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
9040 gc_writebarrier_generational(a, b, objspace);
9046 RB_VM_LOCK_ENTER_NO_BARRIER();
9048 if (is_incremental_marking(objspace)) {
9049 gc_writebarrier_incremental(a, b, objspace);
9055 RB_VM_LOCK_LEAVE_NO_BARRIER();
9057 if (retry)
goto retry;
9065 if (RVALUE_WB_UNPROTECTED(obj)) {
9071 gc_report(2, objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
9072 rgengc_remembered(objspace, obj) ?
" (already remembered)" :
"");
9074 RB_VM_LOCK_ENTER_NO_BARRIER();
9076 if (RVALUE_OLD_P(obj)) {
9077 gc_report(1, objspace,
"rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
9078 RVALUE_DEMOTE(objspace, obj);
9079 gc_mark_set(objspace, obj);
9080 gc_remember_unprotected(objspace, obj);
9083 objspace->profile.total_shade_operation_count++;
9084#if RGENGC_PROFILE >= 2
9085 objspace->profile.shade_operation_count_types[
BUILTIN_TYPE(obj)]++;
9090 RVALUE_AGE_RESET(obj);
9093 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
9094 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
9096 RB_VM_LOCK_LEAVE_NO_BARRIER();
9103MJIT_FUNC_EXPORTED
void
9104rb_gc_writebarrier_remember(
VALUE obj)
9108 gc_report(1, objspace,
"rb_gc_writebarrier_remember: %s\n", obj_info(obj));
9110 if (is_incremental_marking(objspace)) {
9111 if (RVALUE_BLACK_P(obj)) {
9112 gc_grey(objspace, obj);
9116 if (RVALUE_OLD_P(obj)) {
9117 rgengc_remember(objspace, obj);
9122static st_table *rgengc_unprotect_logging_table;
9125rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
9127 fprintf(stderr,
"%s\t%"PRIuVALUE
"\n", (
char *)key, (
VALUE)val);
9132rgengc_unprotect_logging_exit_func(
void)
9134 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
9138rb_gc_unprotect_logging(
void *objptr,
const char *filename,
int line)
9142 if (rgengc_unprotect_logging_table == 0) {
9143 rgengc_unprotect_logging_table = st_init_strtable();
9144 atexit(rgengc_unprotect_logging_exit_func);
9147 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
9152 snprintf(ptr, 0x100 - 1,
"%s|%s:%d", obj_info(obj), filename, line);
9154 if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
9159 if (!ptr) rb_memerror();
9161 st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
9166rb_copy_wb_protected_attribute(
VALUE dest,
VALUE obj)
9170 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
9171 if (!RVALUE_OLD_P(dest)) {
9172 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
9173 RVALUE_AGE_RESET_RAW(dest);
9176 RVALUE_DEMOTE(objspace, dest);
9180 check_rvalue_consistency(dest);
9186rb_obj_rgengc_writebarrier_protected_p(
VALUE obj)
9188 return RBOOL(!RVALUE_WB_UNPROTECTED(obj));
9192rb_obj_rgengc_promoted_p(
VALUE obj)
9198rb_obj_gc_flags(
VALUE obj,
ID* flags,
size_t max)
9201 static ID ID_marked;
9202 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
9205#define I(s) ID_##s = rb_intern(#s);
9215 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
9216 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
9217 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
9218 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
9219 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
9220 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
9229#if GC_ENABLE_INCREMENTAL_MARK
9230 newobj_cache->incremental_mark_step_allocated_slots = 0;
9233 for (
size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
9236 struct heap_page *page = cache->using_page;
9237 RVALUE *freelist = cache->freelist;
9238 RUBY_DEBUG_LOG(
"ractor using_page:%p freelist:%p", (
void *)page, (
void *)freelist);
9240 heap_page_freelist_append(page, freelist);
9242 cache->using_page = NULL;
9243 cache->freelist = NULL;
9253#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
9254#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
9260 if (!is_pointer_to_heap(&
rb_objspace, (
void *)obj))
9265 VALUE ary_ary = GET_VM()->mark_object_ary;
9266 VALUE ary = rb_ary_last(0, 0, ary_ary);
9269 ary = rb_ary_hidden_new(MARK_OBJECT_ARY_BUCKET_SIZE);
9270 rb_ary_push(ary_ary, ary);
9273 rb_ary_push(ary, obj);
9285 tmp->next = global_list;
9294 struct gc_list *tmp = global_list;
9296 if (tmp->varptr == addr) {
9297 global_list = tmp->next;
9302 if (tmp->next->varptr == addr) {
9303 struct gc_list *t = tmp->next;
9305 tmp->next = tmp->next->next;
9316 rb_gc_register_address(var);
9323 gc_stress_no_immediate_sweep,
9324 gc_stress_full_mark_after_malloc,
9328#define gc_stress_full_mark_after_malloc_p() \
9329 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
9334 if (!heap->free_pages) {
9335 if (!heap_increment(objspace, size_pool, heap)) {
9336 size_pool_allocatable_pages_set(objspace, size_pool, 1);
9337 heap_increment(objspace, size_pool, heap);
9345 if (dont_gc_val() || during_gc || ruby_disable_gc) {
9346 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
9348 heap_ready_to_gc(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
9358gc_reset_malloc_info(
rb_objspace_t *objspace,
bool full_mark)
9360 gc_prof_set_malloc_info(objspace);
9362 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
9363 size_t old_limit = malloc_limit;
9365 if (inc > malloc_limit) {
9366 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
9367 if (malloc_limit > gc_params.malloc_limit_max) {
9368 malloc_limit = gc_params.malloc_limit_max;
9372 malloc_limit = (size_t)(malloc_limit * 0.98);
9373 if (malloc_limit < gc_params.malloc_limit_min) {
9374 malloc_limit = gc_params.malloc_limit_min;
9379 if (old_limit != malloc_limit) {
9380 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: %"PRIuSIZE
" -> %"PRIuSIZE
"\n",
9381 rb_gc_count(), old_limit, malloc_limit);
9384 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: not changed (%"PRIuSIZE
")\n",
9385 rb_gc_count(), malloc_limit);
9391#if RGENGC_ESTIMATE_OLDMALLOC
9393 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
9394 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
9395 objspace->rgengc.oldmalloc_increase_limit =
9396 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
9398 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
9399 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
9403 if (0) fprintf(stderr,
"%"PRIdSIZE
"\t%d\t%"PRIuSIZE
"\t%"PRIuSIZE
"\t%"PRIdSIZE
"\n",
9405 objspace->rgengc.need_major_gc,
9406 objspace->rgengc.oldmalloc_increase,
9407 objspace->rgengc.oldmalloc_increase_limit,
9408 gc_params.oldmalloc_limit_max);
9412 objspace->rgengc.oldmalloc_increase = 0;
9414 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
9415 objspace->rgengc.oldmalloc_increase_limit =
9416 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
9417 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
9418 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9426garbage_collect(
rb_objspace_t *objspace,
unsigned int reason)
9432#if GC_PROFILE_MORE_DETAIL
9433 objspace->profile.prepare_time = getrusage_time();
9438#if GC_PROFILE_MORE_DETAIL
9439 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
9442 ret = gc_start(objspace, reason);
9452 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
9453#if GC_ENABLE_INCREMENTAL_MARK
9454 unsigned int immediate_mark = reason & GPR_FLAG_IMMEDIATE_MARK;
9458 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
9461 if (do_full_mark && ruby_enable_autocompact) {
9462 objspace->flags.during_compacting = TRUE;
9465 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
9468 if (!heap_allocated_pages)
return FALSE;
9469 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace))
return TRUE;
9471 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
9472 GC_ASSERT(!is_lazy_sweeping(objspace));
9473 GC_ASSERT(!is_incremental_marking(objspace));
9475 unsigned int lock_lev;
9476 gc_enter(objspace, gc_enter_event_start, &lock_lev);
9478#if RGENGC_CHECK_MODE >= 2
9479 gc_verify_internal_consistency(objspace);
9482 if (ruby_gc_stressful) {
9483 int flag =
FIXNUM_P(ruby_gc_stress_mode) ?
FIX2INT(ruby_gc_stress_mode) : 0;
9485 if ((flag & (1<<gc_stress_no_major)) == 0) {
9486 do_full_mark = TRUE;
9489 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
9492 if (objspace->rgengc.need_major_gc) {
9493 reason |= objspace->rgengc.need_major_gc;
9494 do_full_mark = TRUE;
9496 else if (RGENGC_FORCE_MAJOR_GC) {
9497 reason = GPR_FLAG_MAJOR_BY_FORCE;
9498 do_full_mark = TRUE;
9501 objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
9504 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
9505 reason |= GPR_FLAG_MAJOR_BY_FORCE;
9508#if GC_ENABLE_INCREMENTAL_MARK
9509 if (!GC_ENABLE_INCREMENTAL_MARK || objspace->flags.dont_incremental || immediate_mark) {
9510 objspace->flags.during_incremental_marking = FALSE;
9513 objspace->flags.during_incremental_marking = do_full_mark;
9517 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
9518 objspace->flags.immediate_sweep = TRUE;
9521 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
9523 gc_report(1, objspace,
"gc_start(reason: %x) => %u, %d, %d\n",
9525 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
9527#if USE_DEBUG_COUNTER
9528 RB_DEBUG_COUNTER_INC(gc_count);
9530 if (reason & GPR_FLAG_MAJOR_MASK) {
9531 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
9532 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
9533 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
9534 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
9535#if RGENGC_ESTIMATE_OLDMALLOC
9536 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
9540 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
9541 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
9542 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
9543 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
9544 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
9548 objspace->profile.count++;
9549 objspace->profile.latest_gc_info = reason;
9550 objspace->profile.total_allocated_objects_at_gc_start = objspace->total_allocated_objects;
9551 objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
9552 gc_prof_setup_new_record(objspace, reason);
9553 gc_reset_malloc_info(objspace, do_full_mark);
9554 rb_transient_heap_start_marking(do_full_mark);
9557 GC_ASSERT(during_gc);
9559 gc_prof_timer_start(objspace);
9561 gc_marks(objspace, do_full_mark);
9563 gc_prof_timer_stop(objspace);
9565 gc_exit(objspace, gc_enter_event_start, &lock_lev);
9572 int marking = is_incremental_marking(objspace);
9573 int sweeping = is_lazy_sweeping(objspace);
9575 if (marking || sweeping) {
9576 unsigned int lock_lev;
9577 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
9579 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
9581 if (is_incremental_marking(objspace)) {
9582 gc_marks_rest(objspace);
9584 if (is_lazy_sweeping(objspace)) {
9585 gc_sweep_rest(objspace);
9587 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
9593 unsigned int reason;
9600 if (is_marking(objspace)) {
9602 if (is_full_marking(objspace)) buff[i++] =
'F';
9603#if GC_ENABLE_INCREMENTAL_MARK
9604 if (is_incremental_marking(objspace)) buff[i++] =
'I';
9607 else if (is_sweeping(objspace)) {
9609 if (is_lazy_sweeping(objspace)) buff[i++] =
'L';
9620 static char buff[0x10];
9621 gc_current_status_fill(objspace, buff);
9625#if PRINT_ENTER_EXIT_TICK
9627static tick_t last_exit_tick;
9628static tick_t enter_tick;
9629static int enter_count = 0;
9630static char last_gc_status[0x10];
9633gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
9635 if (direction == 0) {
9637 enter_tick = tick();
9638 gc_current_status_fill(objspace, last_gc_status);
9641 tick_t exit_tick = tick();
9642 char current_gc_status[0x10];
9643 gc_current_status_fill(objspace, current_gc_status);
9646 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9647 enter_tick - last_exit_tick,
9648 exit_tick - enter_tick,
9650 last_gc_status, current_gc_status,
9651 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
9652 last_exit_tick = exit_tick;
9655 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9657 exit_tick - enter_tick,
9659 last_gc_status, current_gc_status,
9660 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
9666gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
9673gc_enter_event_cstr(
enum gc_enter_event event)
9676 case gc_enter_event_start:
return "start";
9677 case gc_enter_event_mark_continue:
return "mark_continue";
9678 case gc_enter_event_sweep_continue:
return "sweep_continue";
9679 case gc_enter_event_rest:
return "rest";
9680 case gc_enter_event_finalizer:
return "finalizer";
9681 case gc_enter_event_rb_memerror:
return "rb_memerror";
9687gc_enter_count(
enum gc_enter_event event)
9690 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start);
break;
9691 case gc_enter_event_mark_continue: RB_DEBUG_COUNTER_INC(gc_enter_mark_continue);
break;
9692 case gc_enter_event_sweep_continue: RB_DEBUG_COUNTER_INC(gc_enter_sweep_continue);
break;
9693 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest);
break;
9694 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer);
break;
9695 case gc_enter_event_rb_memerror:
break;
9700#define MEASURE_GC (objspace->flags.measure_gc)
9704gc_enter_event_measure_p(
rb_objspace_t *objspace,
enum gc_enter_event event)
9706 if (!MEASURE_GC)
return false;
9709 case gc_enter_event_start:
9710 case gc_enter_event_mark_continue:
9711 case gc_enter_event_sweep_continue:
9712 case gc_enter_event_rest:
9722static bool current_process_time(
struct timespec *ts);
9725gc_enter_clock(
rb_objspace_t *objspace,
enum gc_enter_event event)
9727 if (gc_enter_event_measure_p(objspace, event)) {
9728 if (!current_process_time(&objspace->profile.start_time)) {
9729 objspace->profile.start_time.tv_sec = 0;
9730 objspace->profile.start_time.tv_nsec = 0;
9736gc_exit_clock(
rb_objspace_t *objspace,
enum gc_enter_event event)
9738 if (gc_enter_event_measure_p(objspace, event)) {
9741 if ((objspace->profile.start_time.tv_sec > 0 ||
9742 objspace->profile.start_time.tv_nsec > 0) &&
9743 current_process_time(&end_time)) {
9745 if (end_time.tv_sec < objspace->profile.start_time.tv_sec) {
9750 (uint64_t)(end_time.tv_sec - objspace->profile.start_time.tv_sec) * (1000 * 1000 * 1000) +
9751 (end_time.tv_nsec - objspace->profile.start_time.tv_nsec);
9752 objspace->profile.total_time_ns += ns;
9759gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
9761 RB_VM_LOCK_ENTER_LEV(lock_lev);
9763 gc_enter_clock(objspace, event);
9766 case gc_enter_event_rest:
9767 if (!is_marking(objspace))
break;
9769 case gc_enter_event_start:
9770 case gc_enter_event_mark_continue:
9778 gc_enter_count(event);
9779 if (UNLIKELY(during_gc != 0))
rb_bug(
"during_gc != 0");
9780 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
9783 RUBY_DEBUG_LOG(
"%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
9784 gc_report(1, objspace,
"gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9785 gc_record(objspace, 0, gc_enter_event_cstr(event));
9790gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
9792 GC_ASSERT(during_gc != 0);
9795 gc_record(objspace, 1, gc_enter_event_cstr(event));
9796 RUBY_DEBUG_LOG(
"%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
9797 gc_report(1, objspace,
"gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9800 gc_exit_clock(objspace, event);
9801 RB_VM_LOCK_LEAVE_LEV(lock_lev);
9803#if RGENGC_CHECK_MODE >= 2
9804 if (event == gc_enter_event_sweep_continue && gc_mode(objspace) == gc_mode_none) {
9805 GC_ASSERT(!during_gc);
9807 gc_verify_internal_consistency(objspace);
9813gc_with_gvl(
void *ptr)
9816 return (
void *)(
VALUE)garbage_collect(oar->objspace, oar->reason);
9820garbage_collect_with_gvl(
rb_objspace_t *objspace,
unsigned int reason)
9822 if (dont_gc_val())
return TRUE;
9823 if (ruby_thread_has_gvl_p()) {
9824 return garbage_collect(objspace, reason);
9829 oar.objspace = objspace;
9830 oar.reason = reason;
9835 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
9845 unsigned int reason = (GPR_FLAG_FULL_MARK |
9846 GPR_FLAG_IMMEDIATE_MARK |
9847 GPR_FLAG_IMMEDIATE_SWEEP |
9851 if (
RTEST(compact)) {
9852 GC_ASSERT(GC_COMPACTION_SUPPORTED);
9854 reason |= GPR_FLAG_COMPACT;
9857 if (!
RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
9858 if (!
RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
9859 if (!
RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
9862 garbage_collect(objspace, reason);
9863 gc_finalize_deferred(objspace);
9880 if (
DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->
id & ~ID_SCOPE_MASK)) {
9908 if (st_is_member(finalizer_table, obj)) {
9912 GC_ASSERT(RVALUE_MARKED(obj));
9913 GC_ASSERT(!RVALUE_PINNED(obj));
9927#define COULD_MALLOC_REGION_START() \
9928 GC_ASSERT(during_gc); \
9929 VALUE _already_disabled = rb_gc_disable_no_rest(); \
9932#define COULD_MALLOC_REGION_END() \
9934 if (_already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
9946 gc_report(4, objspace,
"Moving object: %p -> %p\n", (
void*)scan, (
void *)free);
9949 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free), free));
9952 marked = rb_objspace_marked_object_p((
VALUE)src);
9953 wb_unprotected = RVALUE_WB_UNPROTECTED((
VALUE)src);
9954 uncollectible = RVALUE_UNCOLLECTIBLE((
VALUE)src);
9955 marking = RVALUE_MARKING((
VALUE)src);
9958 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)src), (
VALUE)src);
9959 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)src), (
VALUE)src);
9960 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)src), (
VALUE)src);
9961 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((
VALUE)src), (
VALUE)src);
9965 COULD_MALLOC_REGION_START();
9969 COULD_MALLOC_REGION_END();
9972 st_data_t srcid = (st_data_t)src,
id;
9976 if (st_lookup(objspace->obj_to_id_tbl, srcid, &
id)) {
9977 gc_report(4, objspace,
"Moving object with seen id: %p -> %p\n", (
void *)src, (
void *)dest);
9979 COULD_MALLOC_REGION_START();
9981 st_delete(objspace->obj_to_id_tbl, &srcid, 0);
9982 st_insert(objspace->obj_to_id_tbl, (st_data_t)dest,
id);
9984 COULD_MALLOC_REGION_END();
9988 memcpy(dest, src, MIN(src_slot_size, slot_size));
9990 if (RVALUE_OVERHEAD > 0) {
9991 void *dest_overhead = (
void *)(((uintptr_t)dest) + slot_size - RVALUE_OVERHEAD);
9992 void *src_overhead = (
void *)(((uintptr_t)src) + src_slot_size - RVALUE_OVERHEAD);
9994 memcpy(dest_overhead, src_overhead, RVALUE_OVERHEAD);
9997 memset(src, 0, src_slot_size);
10001 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS((
VALUE)dest), (
VALUE)dest);
10004 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((
VALUE)dest), (
VALUE)dest);
10008 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)dest), (
VALUE)dest);
10011 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)dest), (
VALUE)dest);
10014 if (wb_unprotected) {
10015 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)dest), (
VALUE)dest);
10018 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)dest), (
VALUE)dest);
10021 if (uncollectible) {
10022 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)dest), (
VALUE)dest);
10025 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)dest), (
VALUE)dest);
10029 src->as.moved.flags =
T_MOVED;
10030 src->as.moved.dummy =
Qundef;
10031 src->as.moved.destination = (
VALUE)dest;
10037#if GC_CAN_COMPILE_COMPACTION
10039compare_free_slots(
const void *left,
const void *right,
void *dummy)
10044 left_page = *(
struct heap_page *
const *)left;
10045 right_page = *(
struct heap_page *
const *)right;
10047 return left_page->free_slots - right_page->free_slots;
10053 for (
int j = 0; j < SIZE_POOL_COUNT; j++) {
10056 size_t total_pages = SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
10058 struct heap_page *page = 0, **page_list = malloc(size);
10061 SIZE_POOL_EDEN_HEAP(size_pool)->free_pages = NULL;
10062 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
10063 page_list[i++] = page;
10067 GC_ASSERT((
size_t)i == total_pages);
10071 ruby_qsort(page_list, total_pages,
sizeof(
struct heap_page *), compare_free_slots, NULL);
10074 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
10076 for (i = 0; i < total_pages; i++) {
10077 ccan_list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
10078 if (page_list[i]->free_slots != 0) {
10079 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
10091 if (ARY_SHARED_P(v)) {
10093 VALUE old_root =
RARRAY(v)->as.heap.aux.shared_root;
10096 UPDATE_IF_MOVED(objspace,
RARRAY(v)->as.heap.aux.shared_root);
10099 VALUE new_root =
RARRAY(v)->as.heap.aux.shared_root;
10101 if (ARY_EMBED_P(new_root) && new_root != old_root) {
10102 size_t offset = (size_t)(
RARRAY(v)->as.heap.ptr -
RARRAY(old_root)->as.ary);
10103 GC_ASSERT(
RARRAY(v)->as.heap.ptr >=
RARRAY(old_root)->as.ary);
10104 RARRAY(v)->as.heap.ptr =
RARRAY(new_root)->as.ary + offset;
10113 for (
long i = 0; i < len; i++) {
10114 UPDATE_IF_MOVED(objspace, ptr[i]);
10119 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
10120 if (rb_ary_embeddable_p(v)) {
10121 rb_ary_make_embedded(v);
10133 if (rb_shape_obj_too_complex(v)) {
10134 rb_gc_update_tbl_refs(ROBJECT_IV_HASH(v));
10139 size_t slot_size = rb_gc_obj_slot_size(v);
10140 size_t embed_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(v));
10141 if (slot_size >= embed_size && !
RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
10143 memcpy(
ROBJECT(v)->as.ary, ptr,
sizeof(
VALUE) * ROBJECT_IV_COUNT(v));
10145 if (ROBJ_TRANSIENT_P(v)) {
10146 ROBJ_TRANSIENT_UNSET(v);
10155 for (uint32_t i = 0; i < ROBJECT_IV_COUNT(v); i++) {
10156 UPDATE_IF_MOVED(objspace, ptr[i]);
10161hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp,
int existing)
10165 if (gc_object_moved_p(objspace, (
VALUE)*key)) {
10166 *key = rb_gc_location((
VALUE)*key);
10169 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
10170 *value = rb_gc_location((
VALUE)*value);
10173 return ST_CONTINUE;
10177hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp,
int error)
10183 if (gc_object_moved_p(objspace, (
VALUE)key)) {
10187 if (gc_object_moved_p(objspace, (
VALUE)value)) {
10190 return ST_CONTINUE;
10194hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp,
int existing)
10198 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
10199 *value = rb_gc_location((
VALUE)*value);
10202 return ST_CONTINUE;
10206hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp,
int error)
10212 if (gc_object_moved_p(objspace, (
VALUE)value)) {
10215 return ST_CONTINUE;
10221 if (!tbl || tbl->num_entries == 0)
return;
10223 if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
10231 if (!tbl || tbl->num_entries == 0)
return;
10233 if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
10243 gc_update_table_refs(objspace, ptr);
10249 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
10257 UPDATE_IF_MOVED(objspace, me->owner);
10258 UPDATE_IF_MOVED(objspace, me->defined_class);
10261 switch (def->type) {
10262 case VM_METHOD_TYPE_ISEQ:
10263 if (def->body.iseq.
iseqptr) {
10266 TYPED_UPDATE_IF_MOVED(objspace,
rb_cref_t *, def->body.iseq.
cref);
10268 case VM_METHOD_TYPE_ATTRSET:
10269 case VM_METHOD_TYPE_IVAR:
10270 UPDATE_IF_MOVED(objspace, def->body.attr.location);
10272 case VM_METHOD_TYPE_BMETHOD:
10273 UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
10275 case VM_METHOD_TYPE_ALIAS:
10278 case VM_METHOD_TYPE_REFINED:
10280 UPDATE_IF_MOVED(objspace, def->body.refined.owner);
10282 case VM_METHOD_TYPE_CFUNC:
10283 case VM_METHOD_TYPE_ZSUPER:
10284 case VM_METHOD_TYPE_MISSING:
10285 case VM_METHOD_TYPE_OPTIMIZED:
10286 case VM_METHOD_TYPE_UNDEF:
10287 case VM_METHOD_TYPE_NOTIMPLEMENTED:
10298 for (i=0; i<n; i++) {
10299 UPDATE_IF_MOVED(objspace, values[i]);
10306 switch (imemo_type(obj)) {
10310 if (LIKELY(env->ep)) {
10312 TYPED_UPDATE_IF_MOVED(objspace,
rb_iseq_t *, env->iseq);
10313 UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
10314 gc_update_values(objspace, (
long)env->env_size, (
VALUE *)env->env);
10319 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
10320 TYPED_UPDATE_IF_MOVED(objspace,
struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
10321 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
10324 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
10325 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
10326 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
10327 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
10329 case imemo_throw_data:
10330 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
10335 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
10336 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
10339 gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
10342 rb_iseq_update_references((
rb_iseq_t *)obj);
10345 rb_ast_update_references((
rb_ast_t *)obj);
10347 case imemo_callcache:
10351 UPDATE_IF_MOVED(objspace, cc->klass);
10352 if (!is_live_object(objspace, cc->klass)) {
10359 if (!is_live_object(objspace, (
VALUE)cc->cme_)) {
10365 case imemo_constcache:
10368 UPDATE_IF_MOVED(objspace, ice->value);
10371 case imemo_parser_strterm:
10373 case imemo_callinfo:
10376 rb_bug(
"not reachable %d", imemo_type(obj));
10381static enum rb_id_table_iterator_result
10382check_id_table_move(
VALUE value,
void *data)
10386 if (gc_object_moved_p(objspace, (
VALUE)value)) {
10387 return ID_TABLE_REPLACE;
10390 return ID_TABLE_CONTINUE;
10402 void *poisoned = asan_unpoison_object_temporary(value);
10405 destination = (
VALUE)RMOVED(value)->destination;
10409 destination = value;
10415 asan_poison_object(value);
10419 destination = value;
10422 return destination;
10425static enum rb_id_table_iterator_result
10426update_id_table(
VALUE *value,
void *data,
int existing)
10430 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
10431 *value = rb_gc_location((
VALUE)*value);
10434 return ID_TABLE_CONTINUE;
10441 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
10445static enum rb_id_table_iterator_result
10446update_cc_tbl_i(
VALUE ccs_ptr,
void *data)
10450 VM_ASSERT(vm_ccs_p(ccs));
10452 if (gc_object_moved_p(objspace, (
VALUE)ccs->cme)) {
10456 for (
int i=0; i<ccs->len; i++) {
10457 if (gc_object_moved_p(objspace, (
VALUE)ccs->entries[i].ci)) {
10458 ccs->entries[i].ci = (
struct rb_callinfo *)rb_gc_location((
VALUE)ccs->entries[i].ci);
10460 if (gc_object_moved_p(objspace, (
VALUE)ccs->entries[i].cc)) {
10461 ccs->entries[i].cc = (
struct rb_callcache *)rb_gc_location((
VALUE)ccs->entries[i].cc);
10466 return ID_TABLE_CONTINUE;
10474 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
10478static enum rb_id_table_iterator_result
10479update_cvc_tbl_i(
VALUE cvc_entry,
void *data)
10487 TYPED_UPDATE_IF_MOVED(objspace,
rb_cref_t *, entry->cref);
10490 entry->class_value = rb_gc_location(entry->class_value);
10492 return ID_TABLE_CONTINUE;
10500 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
10504static enum rb_id_table_iterator_result
10505mark_cvc_tbl_i(
VALUE cvc_entry,
void *data)
10512 rb_gc_mark((
VALUE) entry->cref);
10514 return ID_TABLE_CONTINUE;
10522 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
10526static enum rb_id_table_iterator_result
10527update_const_table(
VALUE value,
void *data)
10532 if (gc_object_moved_p(objspace, ce->value)) {
10533 ce->value = rb_gc_location(ce->value);
10536 if (gc_object_moved_p(objspace, ce->file)) {
10537 ce->file = rb_gc_location(ce->file);
10540 return ID_TABLE_CONTINUE;
10547 rb_id_table_foreach_values(tbl, update_const_table, objspace);
10554 UPDATE_IF_MOVED(objspace, entry->klass);
10555 entry = entry->next;
10562 UPDATE_IF_MOVED(objspace, ext->origin_);
10563 UPDATE_IF_MOVED(objspace, ext->includer);
10564 UPDATE_IF_MOVED(objspace, ext->refined_class);
10565 update_subclass_entries(objspace, ext->subclasses);
10571 if (
FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
10572 for (
size_t i = 0; i < RCLASS_SUPERCLASS_DEPTH(obj) + 1; i++) {
10573 UPDATE_IF_MOVED(objspace, RCLASS_SUPERCLASSES(obj)[i]);
10581 RVALUE *any = RANY(obj);
10583 gc_report(4, objspace,
"update-refs: %p ->\n", (
void *)obj);
10589 UPDATE_IF_MOVED(objspace,
RCLASS(obj)->super);
10591 if (!RCLASS_EXT(obj))
break;
10592 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10593 update_cc_tbl(objspace, obj);
10594 update_cvc_tbl(objspace, obj);
10595 update_superclasses(objspace, obj);
10597 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
10598 UPDATE_IF_MOVED(objspace, RCLASS_IVPTR(obj)[i]);
10601 update_class_ext(objspace, RCLASS_EXT(obj));
10602 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
10606 if (
FL_TEST(obj, RICLASS_IS_ORIGIN) &&
10607 !
FL_TEST(obj, RICLASS_ORIGIN_SHARED_MTBL)) {
10608 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10611 UPDATE_IF_MOVED(objspace,
RCLASS(obj)->super);
10613 if (!RCLASS_EXT(obj))
break;
10614 update_class_ext(objspace, RCLASS_EXT(obj));
10615 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
10616 update_cc_tbl(objspace, obj);
10620 gc_ref_update_imemo(objspace, obj);
10632 gc_ref_update_array(objspace, obj);
10636 gc_ref_update_hash(objspace, obj);
10637 UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
10645 if (STR_SHARED_P(obj)) {
10652 rb_str_update_shared_ary(obj, old_root, new_root);
10659 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
10660 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
10661 rb_str_make_embedded(obj);
10675 if (compact_func) (*compact_func)(ptr);
10682 gc_ref_update_object(objspace, obj);
10686 if (any->as.file.
fptr) {
10687 UPDATE_IF_MOVED(objspace, any->as.file.
fptr->
self);
10688 UPDATE_IF_MOVED(objspace, any->as.file.
fptr->
pathv);
10697 UPDATE_IF_MOVED(objspace, any->as.regexp.
src);
10702 UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
10711 UPDATE_IF_MOVED(objspace, any->as.match.regexp);
10713 if (any->as.match.str) {
10714 UPDATE_IF_MOVED(objspace, any->as.match.str);
10719 UPDATE_IF_MOVED(objspace, any->as.rational.num);
10720 UPDATE_IF_MOVED(objspace, any->as.rational.den);
10724 UPDATE_IF_MOVED(objspace, any->as.complex.real);
10725 UPDATE_IF_MOVED(objspace, any->as.complex.imag);
10731 long i, len = RSTRUCT_LEN(obj);
10732 VALUE *ptr = (
VALUE *)RSTRUCT_CONST_PTR(obj);
10734 for (i = 0; i < len; i++) {
10735 UPDATE_IF_MOVED(objspace, ptr[i]);
10741 rb_gcdebug_print_obj_condition((
VALUE)obj);
10742 rb_obj_info_dump(obj);
10749 UPDATE_IF_MOVED(objspace,
RBASIC(obj)->klass);
10751 gc_report(4, objspace,
"update-refs: %p <-\n", (
void *)obj);
10758 asan_unlock_freelist(page);
10759 asan_lock_freelist(page);
10760 page->flags.has_uncollectible_shady_objects = FALSE;
10761 page->flags.has_remembered_objects = FALSE;
10764 for (; v != (
VALUE)vend; v += stride) {
10765 void *poisoned = asan_unpoison_object_temporary(v);
10773 if (RVALUE_WB_UNPROTECTED(v)) {
10774 page->flags.has_uncollectible_shady_objects = TRUE;
10776 if (RVALUE_PAGE_MARKING(page, v)) {
10777 page->flags.has_remembered_objects = TRUE;
10779 if (page->flags.before_sweep) {
10780 if (RVALUE_MARKED(v)) {
10781 gc_update_object_references(objspace, v);
10785 gc_update_object_references(objspace, v);
10790 asan_poison_object(v);
10798#define global_symbols ruby_global_symbols
10804 rb_vm_t *vm = rb_ec_vm_ptr(ec);
10808 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
10809 bool should_set_mark_bits = TRUE;
10811 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10813 ccan_list_for_each(&heap->pages, page, page_node) {
10814 uintptr_t start = (uintptr_t)page->start;
10815 uintptr_t end = start + (page->total_slots * size_pool->slot_size);
10817 gc_ref_update((
void *)start, (
void *)end, size_pool->slot_size, objspace, page);
10818 if (page == heap->sweeping_page) {
10819 should_set_mark_bits = FALSE;
10821 if (should_set_mark_bits) {
10822 gc_setup_mark_bits(page);
10826 rb_vm_update_references(vm);
10827 rb_transient_heap_update_references();
10828 rb_gc_update_global_tbl();
10829 global_symbols.ids = rb_gc_location(global_symbols.ids);
10830 global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
10831 gc_update_tbl_refs(objspace, objspace->obj_to_id_tbl);
10832 gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
10833 gc_update_table_refs(objspace, global_symbols.str_sym);
10834 gc_update_table_refs(objspace, finalizer_table);
10837#if GC_CAN_COMPILE_COMPACTION
10851gc_compact_stats(
VALUE self)
10855 VALUE h = rb_hash_new();
10856 VALUE considered = rb_hash_new();
10857 VALUE moved = rb_hash_new();
10858 VALUE moved_up = rb_hash_new();
10859 VALUE moved_down = rb_hash_new();
10861 for (i=0; i<
T_MASK; i++) {
10862 if (objspace->rcompactor.considered_count_table[i]) {
10863 rb_hash_aset(considered, type_sym(i),
SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
10866 if (objspace->rcompactor.moved_count_table[i]) {
10867 rb_hash_aset(moved, type_sym(i),
SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
10870 if (objspace->rcompactor.moved_up_count_table[i]) {
10871 rb_hash_aset(moved_up, type_sym(i),
SIZET2NUM(objspace->rcompactor.moved_up_count_table[i]));
10874 if (objspace->rcompactor.moved_down_count_table[i]) {
10875 rb_hash_aset(moved_down, type_sym(i),
SIZET2NUM(objspace->rcompactor.moved_down_count_table[i]));
10887# define gc_compact_stats rb_f_notimplement
10890#if GC_CAN_COMPILE_COMPACTION
10892root_obj_check_moved_i(
const char *category,
VALUE obj,
void *data)
10895 rb_bug(
"ROOT %s points to MOVED: %p -> %s\n", category, (
void *)obj, obj_info(rb_gc_location(obj)));
10900reachable_object_check_moved_i(
VALUE ref,
void *data)
10904 rb_bug(
"Object %s points to MOVED: %p -> %s\n", obj_info(parent), (
void *)ref, obj_info(rb_gc_location(ref)));
10909heap_check_moved_i(
void *vstart,
void *vend,
size_t stride,
void *data)
10912 for (; v != (
VALUE)vend; v += stride) {
10917 void *poisoned = asan_unpoison_object_temporary(v);
10924 if (!rb_objspace_garbage_object_p(v)) {
10925 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (
void *)v);
10931 asan_poison_object(v);
10957gc_compact(
VALUE self)
10962 return gc_compact_stats(self);
10965# define gc_compact rb_f_notimplement
10968#if GC_CAN_COMPILE_COMPACTION
10976 size_t growth_slots = gc_params.heap_init_slots;
10978 if (
RTEST(double_heap)) {
10979 rb_warn(
"double_heap is deprecated, please use expand_heap instead");
10982 RB_VM_LOCK_ENTER();
10987 if (
RTEST(double_heap) ||
RTEST(expand_heap)) {
10988 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
10990 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10992 if (
RTEST(expand_heap)) {
10993 size_t required_pages = growth_slots / size_pool->slot_size;
10994 heap_add_pages(objspace, size_pool, heap, MAX(required_pages, heap->total_pages));
10997 heap_add_pages(objspace, size_pool, heap, heap->total_pages);
11002 if (
RTEST(toward_empty)) {
11003 gc_sort_heap_by_empty_slots(objspace);
11006 RB_VM_LOCK_LEAVE();
11010 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
11011 objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
11013 return gc_compact_stats(self);
11016# define gc_verify_compaction_references (rb_builtin_arity3_function_type)rb_f_notimplement
11030 unsigned int reason = GPR_DEFAULT_REASON;
11031 garbage_collect(objspace, reason);
11041#if RGENGC_PROFILE >= 2
11043static const char *type_name(
int type,
VALUE obj);
11046gc_count_add_each_types(
VALUE hash,
const char *name,
const size_t *types)
11050 for (i=0; i<
T_MASK; i++) {
11051 const char *
type = type_name(i, 0);
11071gc_info_decode(
rb_objspace_t *objspace,
const VALUE hash_or_key,
const unsigned int orig_flags)
11073 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state, sym_need_major_by;
11074 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
11075#if RGENGC_ESTIMATE_OLDMALLOC
11076 static VALUE sym_oldmalloc;
11078 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
11079 static VALUE sym_none, sym_marking, sym_sweeping;
11081 VALUE major_by, need_major_by;
11082 unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
11088 hash = hash_or_key;
11094 if (
NIL_P(sym_major_by)) {
11095#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
11098 S(immediate_sweep);
11108#if RGENGC_ESTIMATE_OLDMALLOC
11122#define SET(name, attr) \
11123 if (key == sym_##name) \
11125 else if (hash != Qnil) \
11126 rb_hash_aset(hash, sym_##name, (attr));
11129 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11130 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11131 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11132 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11133#if RGENGC_ESTIMATE_OLDMALLOC
11134 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11137 SET(major_by, major_by);
11139 if (orig_flags == 0) {
11140 unsigned int need_major_flags = objspace->rgengc.need_major_gc;
11142 (need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11143 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11144 (need_major_flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11145 (need_major_flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11146#if RGENGC_ESTIMATE_OLDMALLOC
11147 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11150 SET(need_major_by, need_major_by);
11154 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
11155 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
11156 (flags & GPR_FLAG_METHOD) ? sym_method :
11157 (flags & GPR_FLAG_CAPI) ? sym_capi :
11158 (flags & GPR_FLAG_STRESS) ? sym_stress :
11162 SET(have_finalizer, RBOOL(flags & GPR_FLAG_HAVE_FINALIZE));
11163 SET(immediate_sweep, RBOOL(flags & GPR_FLAG_IMMEDIATE_SWEEP));
11165 if (orig_flags == 0) {
11166 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
11167 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
11182 return gc_info_decode(objspace, key, 0);
11191 arg = rb_hash_new();
11197 return gc_info_decode(objspace, arg, 0);
11203 gc_stat_sym_heap_allocated_pages,
11204 gc_stat_sym_heap_sorted_length,
11205 gc_stat_sym_heap_allocatable_pages,
11206 gc_stat_sym_heap_available_slots,
11207 gc_stat_sym_heap_live_slots,
11208 gc_stat_sym_heap_free_slots,
11209 gc_stat_sym_heap_final_slots,
11210 gc_stat_sym_heap_marked_slots,
11211 gc_stat_sym_heap_eden_pages,
11212 gc_stat_sym_heap_tomb_pages,
11213 gc_stat_sym_total_allocated_pages,
11214 gc_stat_sym_total_freed_pages,
11215 gc_stat_sym_total_allocated_objects,
11216 gc_stat_sym_total_freed_objects,
11217 gc_stat_sym_malloc_increase_bytes,
11218 gc_stat_sym_malloc_increase_bytes_limit,
11219 gc_stat_sym_minor_gc_count,
11220 gc_stat_sym_major_gc_count,
11221 gc_stat_sym_compact_count,
11222 gc_stat_sym_read_barrier_faults,
11223 gc_stat_sym_total_moved_objects,
11224 gc_stat_sym_remembered_wb_unprotected_objects,
11225 gc_stat_sym_remembered_wb_unprotected_objects_limit,
11226 gc_stat_sym_old_objects,
11227 gc_stat_sym_old_objects_limit,
11228#if RGENGC_ESTIMATE_OLDMALLOC
11229 gc_stat_sym_oldmalloc_increase_bytes,
11230 gc_stat_sym_oldmalloc_increase_bytes_limit,
11233 gc_stat_sym_total_generated_normal_object_count,
11234 gc_stat_sym_total_generated_shady_object_count,
11235 gc_stat_sym_total_shade_operation_count,
11236 gc_stat_sym_total_promoted_count,
11237 gc_stat_sym_total_remembered_normal_object_count,
11238 gc_stat_sym_total_remembered_shady_object_count,
11243static VALUE gc_stat_symbols[gc_stat_sym_last];
11246setup_gc_stat_symbols(
void)
11248 if (gc_stat_symbols[0] == 0) {
11249#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
11252 S(heap_allocated_pages);
11253 S(heap_sorted_length);
11254 S(heap_allocatable_pages);
11255 S(heap_available_slots);
11256 S(heap_live_slots);
11257 S(heap_free_slots);
11258 S(heap_final_slots);
11259 S(heap_marked_slots);
11260 S(heap_eden_pages);
11261 S(heap_tomb_pages);
11262 S(total_allocated_pages);
11263 S(total_freed_pages);
11264 S(total_allocated_objects);
11265 S(total_freed_objects);
11266 S(malloc_increase_bytes);
11267 S(malloc_increase_bytes_limit);
11271 S(read_barrier_faults);
11272 S(total_moved_objects);
11273 S(remembered_wb_unprotected_objects);
11274 S(remembered_wb_unprotected_objects_limit);
11276 S(old_objects_limit);
11277#if RGENGC_ESTIMATE_OLDMALLOC
11278 S(oldmalloc_increase_bytes);
11279 S(oldmalloc_increase_bytes_limit);
11282 S(total_generated_normal_object_count);
11283 S(total_generated_shady_object_count);
11284 S(total_shade_operation_count);
11285 S(total_promoted_count);
11286 S(total_remembered_normal_object_count);
11287 S(total_remembered_shady_object_count);
11294gc_stat_internal(
VALUE hash_or_sym)
11299 setup_gc_stat_symbols();
11302 hash = hash_or_sym;
11311#define SET(name, attr) \
11312 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
11314 else if (hash != Qnil) \
11315 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
11317 SET(count, objspace->profile.count);
11318 SET(time, (
size_t) (objspace->profile.total_time_ns / (1000 * 1000) ));
11321 SET(heap_allocated_pages, heap_allocated_pages);
11322 SET(heap_sorted_length, heap_pages_sorted_length);
11323 SET(heap_allocatable_pages, heap_allocatable_pages(objspace));
11324 SET(heap_available_slots, objspace_available_slots(objspace));
11325 SET(heap_live_slots, objspace_live_slots(objspace));
11326 SET(heap_free_slots, objspace_free_slots(objspace));
11327 SET(heap_final_slots, heap_pages_final_slots);
11328 SET(heap_marked_slots, objspace->marked_slots);
11329 SET(heap_eden_pages, heap_eden_total_pages(objspace));
11330 SET(heap_tomb_pages, heap_tomb_total_pages(objspace));
11331 SET(total_allocated_pages, total_allocated_pages(objspace));
11332 SET(total_freed_pages, total_freed_pages(objspace));
11333 SET(total_allocated_objects, objspace->total_allocated_objects);
11334 SET(total_freed_objects, objspace->profile.total_freed_objects);
11335 SET(malloc_increase_bytes, malloc_increase);
11336 SET(malloc_increase_bytes_limit, malloc_limit);
11337 SET(minor_gc_count, objspace->profile.minor_gc_count);
11338 SET(major_gc_count, objspace->profile.major_gc_count);
11339 SET(compact_count, objspace->profile.compact_count);
11340 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
11341 SET(total_moved_objects, objspace->rcompactor.total_moved);
11342 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
11343 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
11344 SET(old_objects, objspace->rgengc.old_objects);
11345 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
11346#if RGENGC_ESTIMATE_OLDMALLOC
11347 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
11348 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
11352 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
11353 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
11354 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
11355 SET(total_promoted_count, objspace->profile.total_promoted_count);
11356 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
11357 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
11365#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
11366 if (hash !=
Qnil) {
11367 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
11368 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
11369 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->profile.shade_operation_count_types);
11370 gc_count_add_each_types(hash,
"promoted_types", objspace->profile.promoted_types);
11371 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
11372 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
11383 arg = rb_hash_new();
11386 size_t value = gc_stat_internal(arg);
11396 gc_stat_internal(arg);
11404 size_t value = gc_stat_internal(key);
11408 gc_stat_internal(key);
11414enum gc_stat_heap_sym {
11415 gc_stat_heap_sym_slot_size,
11416 gc_stat_heap_sym_heap_allocatable_pages,
11417 gc_stat_heap_sym_heap_eden_pages,
11418 gc_stat_heap_sym_heap_eden_slots,
11419 gc_stat_heap_sym_heap_tomb_pages,
11420 gc_stat_heap_sym_heap_tomb_slots,
11421 gc_stat_heap_sym_total_allocated_pages,
11422 gc_stat_heap_sym_total_freed_pages,
11423 gc_stat_heap_sym_force_major_gc_count,
11424 gc_stat_heap_sym_last
11427static VALUE gc_stat_heap_symbols[gc_stat_heap_sym_last];
11430setup_gc_stat_heap_symbols(
void)
11432 if (gc_stat_heap_symbols[0] == 0) {
11433#define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
11435 S(heap_allocatable_pages);
11436 S(heap_eden_pages);
11437 S(heap_eden_slots);
11438 S(heap_tomb_pages);
11439 S(heap_tomb_slots);
11440 S(total_allocated_pages);
11441 S(total_freed_pages);
11442 S(force_major_gc_count);
11448gc_stat_heap_internal(
int size_pool_idx,
VALUE hash_or_sym)
11453 setup_gc_stat_heap_symbols();
11456 hash = hash_or_sym;
11465 if (size_pool_idx < 0 || size_pool_idx >= SIZE_POOL_COUNT) {
11471#define SET(name, attr) \
11472 if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
11474 else if (hash != Qnil) \
11475 rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
11477 SET(slot_size, size_pool->slot_size);
11478 SET(heap_allocatable_pages, size_pool->allocatable_pages);
11479 SET(heap_eden_pages, SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
11480 SET(heap_eden_slots, SIZE_POOL_EDEN_HEAP(size_pool)->total_slots);
11481 SET(heap_tomb_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
11482 SET(heap_tomb_slots, SIZE_POOL_TOMB_HEAP(size_pool)->total_slots);
11483 SET(total_allocated_pages, size_pool->total_allocated_pages);
11484 SET(total_freed_pages, size_pool->total_freed_pages);
11485 SET(force_major_gc_count, size_pool->force_major_gc_count);
11498 if (
NIL_P(heap_name)) {
11500 arg = rb_hash_new();
11509 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
11512 hash = rb_hash_new();
11513 rb_hash_aset(arg,
INT2FIX(i), hash);
11515 gc_stat_heap_internal(i, hash);
11519 int size_pool_idx =
FIX2INT(heap_name);
11522 arg = rb_hash_new();
11525 size_t value = gc_stat_heap_internal(size_pool_idx, arg);
11535 gc_stat_heap_internal(size_pool_idx, arg);
11548 return ruby_gc_stress_mode;
11554 objspace->flags.gc_stressful =
RTEST(flag);
11555 objspace->gc_stress_mode = flag;
11562 gc_stress_set(objspace, flag);
11570 return rb_objspace_gc_enable(objspace);
11576 int old = dont_gc_val();
11585 return rb_gc_enable();
11589rb_gc_disable_no_rest(
void)
11592 return gc_disable_no_rest(objspace);
11598 int old = dont_gc_val();
11607 return rb_objspace_gc_disable(objspace);
11614 return gc_disable_no_rest(objspace);
11620 return rb_gc_disable();
11623#if GC_CAN_COMPILE_COMPACTION
11637 GC_ASSERT(GC_COMPACTION_SUPPORTED);
11639 ruby_enable_autocompact =
RTEST(v);
11643# define gc_set_auto_compact rb_f_notimplement
11646#if GC_CAN_COMPILE_COMPACTION
11654gc_get_auto_compact(
VALUE _)
11656 return RBOOL(ruby_enable_autocompact);
11659# define gc_get_auto_compact rb_f_notimplement
11663get_envparam_size(
const char *name,
size_t *default_value,
size_t lower_bound)
11665 const char *ptr = getenv(name);
11668 if (ptr != NULL && *ptr) {
11671#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
11672 val = strtoll(ptr, &end, 0);
11674 val = strtol(ptr, &end, 0);
11677 case 'k':
case 'K':
11681 case 'm':
case 'M':
11685 case 'g':
case 'G':
11686 unit = 1024*1024*1024;
11690 while (*end && isspace((
unsigned char)*end)) end++;
11692 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
11696 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
11697 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%s is ignored because it overflows\n", name, ptr);
11702 if (val > 0 && (
size_t)val > lower_bound) {
11704 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
")\n", name, val, *default_value);
11706 *default_value = (size_t)val;
11711 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
") is ignored because it must be greater than %"PRIuSIZE
".\n",
11712 name, val, *default_value, lower_bound);
11721get_envparam_double(
const char *name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
11723 const char *ptr = getenv(name);
11726 if (ptr != NULL && *ptr) {
11728 val =
strtod(ptr, &end);
11729 if (!*ptr || *end) {
11730 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
11734 if (accept_zero && val == 0.0) {
11737 else if (val <= lower_bound) {
11739 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
11740 name, val, *default_value, lower_bound);
11743 else if (upper_bound != 0.0 &&
11744 val > upper_bound) {
11746 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
11747 name, val, *default_value, upper_bound);
11757 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%f (default value: %f)\n", name, val, *default_value);
11758 *default_value = val;
11767 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
11770 if (gc_params.heap_init_slots > size_pool->eden_heap.total_slots) {
11771 size_t slots = gc_params.heap_init_slots - size_pool->eden_heap.total_slots;
11772 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
11773 size_pool->allocatable_pages = slots * multiple / HEAP_PAGE_OBJ_LIMIT;
11778 size_pool->allocatable_pages = 0;
11781 heap_pages_expand_sorted(objspace);
11827ruby_gc_set_params(
void)
11831 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
11836 if (get_envparam_size(
"RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
11837 gc_set_initial_pages(objspace);
11840 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
11841 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
11842 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
11844 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
11845 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
11846 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
11847 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
11848 get_envparam_double(
"RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
11850 if (get_envparam_size(
"RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0)) {
11851 malloc_limit = gc_params.malloc_limit_min;
11853 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
11854 if (!gc_params.malloc_limit_max) {
11855 gc_params.malloc_limit_max = SIZE_MAX;
11857 get_envparam_double(
"RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
11859#if RGENGC_ESTIMATE_OLDMALLOC
11860 if (get_envparam_size(
"RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
11861 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
11863 get_envparam_size (
"RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
11864 get_envparam_double(
"RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
11869reachable_objects_from_callback(
VALUE obj)
11872 cr->mfd->mark_func(obj, cr->mfd->data);
11876rb_objspace_reachable_objects_from(
VALUE obj,
void (func)(
VALUE,
void *),
void *data)
11880 RB_VM_LOCK_ENTER();
11882 if (during_gc)
rb_bug(
"rb_objspace_reachable_objects_from() is not supported while during_gc == true");
11884 if (is_markable_object(objspace, obj)) {
11886 struct gc_mark_func_data_struct mfd = {
11889 }, *prev_mfd = cr->mfd;
11892 gc_mark_children(objspace, obj);
11893 cr->mfd = prev_mfd;
11896 RB_VM_LOCK_LEAVE();
11900 const char *category;
11901 void (*func)(
const char *category,
VALUE,
void *);
11906root_objects_from(
VALUE obj,
void *ptr)
11909 (*data->func)(data->category, obj, data->data);
11913rb_objspace_reachable_objects_from_root(
void (func)(
const char *category,
VALUE,
void *),
void *passing_data)
11916 objspace_reachable_objects_from_root(objspace, func, passing_data);
11920objspace_reachable_objects_from_root(
rb_objspace_t *objspace,
void (func)(
const char *category,
VALUE,
void *),
void *passing_data)
11922 if (during_gc)
rb_bug(
"objspace_reachable_objects_from_root() is not supported while during_gc == true");
11927 .data = passing_data,
11929 struct gc_mark_func_data_struct mfd = {
11930 .mark_func = root_objects_from,
11932 }, *prev_mfd = cr->mfd;
11935 gc_mark_roots(objspace, &data.category);
11936 cr->mfd = prev_mfd;
11950gc_vraise(
void *ptr)
11953 rb_vraise(argv->exc, argv->fmt, *argv->ap);
11958gc_raise(
VALUE exc,
const char *fmt, ...)
11966 if (ruby_thread_has_gvl_p()) {
11976 fprintf(stderr,
"%s",
"[FATAL] ");
11977 vfprintf(stderr, fmt, ap);
11984static void objspace_xfree(
rb_objspace_t *objspace,
void *ptr,
size_t size);
11987negative_size_allocation_error(
const char *msg)
11993ruby_memerror_body(
void *dummy)
11999NORETURN(
static void ruby_memerror(
void));
12004 if (ruby_thread_has_gvl_p()) {
12013 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
12016 exit(EXIT_FAILURE);
12023 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
12034 gc_exit(objspace, gc_enter_event_rb_memerror, NULL);
12039 rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12040 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
12041 exit(EXIT_FAILURE);
12043 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12044 rb_ec_raised_clear(ec);
12047 rb_ec_raised_set(ec, RAISED_NOMEMORY);
12048 exc = ruby_vm_special_exception_copy(exc);
12051 EC_JUMP_TAG(ec, TAG_RAISE);
12055rb_aligned_malloc(
size_t alignment,
size_t size)
12058 GC_ASSERT(((alignment - 1) & alignment) == 0);
12059 GC_ASSERT(alignment %
sizeof(
void*) == 0);
12063#if defined __MINGW32__
12064 res = __mingw_aligned_malloc(size, alignment);
12065#elif defined _WIN32
12066 void *_aligned_malloc(
size_t,
size_t);
12067 res = _aligned_malloc(size, alignment);
12068#elif defined(HAVE_POSIX_MEMALIGN)
12069 if (posix_memalign(&res, alignment, size) != 0) {
12072#elif defined(HAVE_MEMALIGN)
12073 res = memalign(alignment, size);
12076 res = malloc(alignment + size +
sizeof(
void*));
12077 aligned = (
char*)res + alignment +
sizeof(
void*);
12078 aligned -= ((
VALUE)aligned & (alignment - 1));
12079 ((
void**)aligned)[-1] = res;
12080 res = (
void*)aligned;
12083 GC_ASSERT((uintptr_t)res % alignment == 0);
12089rb_aligned_free(
void *ptr,
size_t size)
12091#if defined __MINGW32__
12092 __mingw_aligned_free(ptr);
12093#elif defined _WIN32
12094 _aligned_free(ptr);
12095#elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
12098 free(((
void**)ptr)[-1]);
12102static inline size_t
12103objspace_malloc_size(
rb_objspace_t *objspace,
void *ptr,
size_t hint)
12105#ifdef HAVE_MALLOC_USABLE_SIZE
12106 return malloc_usable_size(ptr);
12113 MEMOP_TYPE_MALLOC = 0,
12119atomic_sub_nounderflow(
size_t *var,
size_t sub)
12121 if (sub == 0)
return;
12125 if (val < sub) sub = val;
12126 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val)
break;
12134 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
12135 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
12137 if (gc_stress_full_mark_after_malloc_p()) {
12138 reason |= GPR_FLAG_FULL_MARK;
12140 garbage_collect_with_gvl(objspace, reason);
12145objspace_malloc_increase_report(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
12147 if (0) fprintf(stderr,
"increase - ptr: %p, type: %s, new_size: %"PRIdSIZE
", old_size: %"PRIdSIZE
"\n",
12149 type == MEMOP_TYPE_MALLOC ?
"malloc" :
12150 type == MEMOP_TYPE_FREE ?
"free " :
12151 type == MEMOP_TYPE_REALLOC ?
"realloc":
"error",
12152 new_size, old_size);
12157objspace_malloc_increase_body(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
12159 if (new_size > old_size) {
12160 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
12161#if RGENGC_ESTIMATE_OLDMALLOC
12162 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
12166 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
12167#if RGENGC_ESTIMATE_OLDMALLOC
12168 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
12172 if (
type == MEMOP_TYPE_MALLOC) {
12175 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
12179 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
12183#if MALLOC_ALLOCATED_SIZE
12184 if (new_size >= old_size) {
12185 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
12188 size_t dec_size = old_size - new_size;
12189 size_t allocated_size = objspace->malloc_params.allocated_size;
12191#if MALLOC_ALLOCATED_SIZE_CHECK
12192 if (allocated_size < dec_size) {
12193 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
12196 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
12200 case MEMOP_TYPE_MALLOC:
12201 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
12203 case MEMOP_TYPE_FREE:
12205 size_t allocations = objspace->malloc_params.allocations;
12206 if (allocations > 0) {
12207 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
12209#if MALLOC_ALLOCATED_SIZE_CHECK
12211 GC_ASSERT(objspace->malloc_params.allocations > 0);
12216 case MEMOP_TYPE_REALLOC:
break;
12222#define objspace_malloc_increase(...) \
12223 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
12224 !malloc_increase_done; \
12225 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
12229#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12236#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12237const char *ruby_malloc_info_file;
12238int ruby_malloc_info_line;
12241static inline size_t
12242objspace_malloc_prepare(
rb_objspace_t *objspace,
size_t size)
12244 if (size == 0) size = 1;
12246#if CALC_EXACT_MALLOC_SIZE
12260 return during_gc && !rb_multi_ractor_p() && ruby_thread_has_gvl_p();
12263static inline void *
12264objspace_malloc_fixup(
rb_objspace_t *objspace,
void *mem,
size_t size)
12266 size = objspace_malloc_size(objspace, mem, size);
12267 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
12269#if CALC_EXACT_MALLOC_SIZE
12273#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12274 info->gen = objspace->profile.count;
12275 info->file = ruby_malloc_info_file;
12276 info->line = info->file ? ruby_malloc_info_line : 0;
12285#if defined(__GNUC__) && RUBY_DEBUG
12286#define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
12289#ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
12290# define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
12293#define GC_MEMERROR(...) \
12294 ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : rb_memerror())
12296#define TRY_WITH_GC(siz, expr) do { \
12297 const gc_profile_record_flag gpr = \
12298 GPR_FLAG_FULL_MARK | \
12299 GPR_FLAG_IMMEDIATE_MARK | \
12300 GPR_FLAG_IMMEDIATE_SWEEP | \
12302 objspace_malloc_gc_stress(objspace); \
12304 if (LIKELY((expr))) { \
12307 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
12309 GC_MEMERROR("TRY_WITH_GC: could not GC"); \
12311 else if ((expr)) { \
12315 GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
12316 "%"PRIdSIZE" bytes for %s", \
12327 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12328 rb_warn(
"malloc during GC detected, this could cause crashes if it triggers another GC");
12329#if RGENGC_CHECK_MODE || RUBY_DEBUG
12330 rb_bug(
"Cannot malloc during GC");
12336 size = objspace_malloc_prepare(objspace, size);
12337 TRY_WITH_GC(size, mem = malloc(size));
12338 RB_DEBUG_COUNTER_INC(heap_xmalloc);
12339 return objspace_malloc_fixup(objspace, mem, size);
12342static inline size_t
12343xmalloc2_size(
const size_t count,
const size_t elsize)
12345 return size_mul_or_raise(count, elsize,
rb_eArgError);
12349objspace_xrealloc(
rb_objspace_t *objspace,
void *ptr,
size_t new_size,
size_t old_size)
12351 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12352 rb_warn(
"realloc during GC detected, this could cause crashes if it triggers another GC");
12353#if RGENGC_CHECK_MODE || RUBY_DEBUG
12354 rb_bug(
"Cannot realloc during GC");
12360 if (!ptr)
return objspace_xmalloc0(objspace, new_size);
12367 if (new_size == 0) {
12368 if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
12391 objspace_xfree(objspace, ptr, old_size);
12405#if CALC_EXACT_MALLOC_SIZE
12410 old_size = info->size;
12414 old_size = objspace_malloc_size(objspace, ptr, old_size);
12416 new_size = objspace_malloc_size(objspace, mem, new_size);
12418#if CALC_EXACT_MALLOC_SIZE
12421 info->size = new_size;
12426 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
12428 RB_DEBUG_COUNTER_INC(heap_xrealloc);
12432#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
12434#define MALLOC_INFO_GEN_SIZE 100
12435#define MALLOC_INFO_SIZE_SIZE 10
12436static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
12437static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
12438static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
12439static st_table *malloc_info_file_table;
12442mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
12444 const char *file = (
void *)key;
12445 const size_t *data = (
void *)val;
12447 fprintf(stderr,
"%s\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", file, data[0], data[1]);
12449 return ST_CONTINUE;
12454rb_malloc_info_show_results(
void)
12458 fprintf(stderr,
"* malloc_info gen statistics\n");
12459 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
12460 if (i == MALLOC_INFO_GEN_SIZE-1) {
12461 fprintf(stderr,
"more\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12464 fprintf(stderr,
"%d\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12468 fprintf(stderr,
"* malloc_info size statistics\n");
12469 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12471 fprintf(stderr,
"%d\t%"PRIdSIZE
"\n", s, malloc_info_size[i]);
12473 fprintf(stderr,
"more\t%"PRIdSIZE
"\n", malloc_info_size[i]);
12475 if (malloc_info_file_table) {
12476 fprintf(stderr,
"* malloc_info file statistics\n");
12477 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
12482rb_malloc_info_show_results(
void)
12488objspace_xfree(
rb_objspace_t *objspace,
void *ptr,
size_t old_size)
12497#if CALC_EXACT_MALLOC_SIZE
12500 old_size = info->size;
12502#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12504 int gen = (int)(objspace->profile.count - info->gen);
12505 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
12508 malloc_info_gen_cnt[gen_index]++;
12509 malloc_info_gen_size[gen_index] += info->size;
12511 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12512 size_t s = 16 << i;
12513 if (info->size <= s) {
12514 malloc_info_size[i]++;
12518 malloc_info_size[i]++;
12522 st_data_t key = (st_data_t)info->file, d;
12525 if (malloc_info_file_table == NULL) {
12526 malloc_info_file_table = st_init_numtable_with_size(1024);
12528 if (st_lookup(malloc_info_file_table, key, &d)) {
12530 data = (
size_t *)d;
12533 data = malloc(xmalloc2_size(2,
sizeof(
size_t)));
12534 if (data == NULL)
rb_bug(
"objspace_xfree: can not allocate memory");
12535 data[0] = data[1] = 0;
12536 st_insert(malloc_info_file_table, key, (st_data_t)data);
12539 data[1] += info->size;
12541 if (0 && gen >= 2) {
12543 fprintf(stderr,
"free - size:%"PRIdSIZE
", gen:%d, pos: %s:%"PRIdSIZE
"\n",
12544 info->size, gen, info->file, info->line);
12547 fprintf(stderr,
"free - size:%"PRIdSIZE
", gen:%d\n",
12554 old_size = objspace_malloc_size(objspace, ptr, old_size);
12556 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
12559 RB_DEBUG_COUNTER_INC(heap_xfree);
12564ruby_xmalloc0(
size_t size)
12570ruby_xmalloc_body(
size_t size)
12572 if ((ssize_t)size < 0) {
12573 negative_size_allocation_error(
"too large allocation size");
12575 return ruby_xmalloc0(size);
12579ruby_malloc_size_overflow(
size_t count,
size_t elsize)
12582 "malloc: possible integer overflow (%"PRIuSIZE
"*%"PRIuSIZE
")",
12587ruby_xmalloc2_body(
size_t n,
size_t size)
12589 return objspace_xmalloc0(&
rb_objspace, xmalloc2_size(n, size));
12595 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12596 rb_warn(
"calloc during GC detected, this could cause crashes if it triggers another GC");
12597#if RGENGC_CHECK_MODE || RUBY_DEBUG
12598 rb_bug(
"Cannot calloc during GC");
12604 size = objspace_malloc_prepare(objspace, size);
12605 TRY_WITH_GC(size, mem = calloc1(size));
12606 return objspace_malloc_fixup(objspace, mem, size);
12610ruby_xcalloc_body(
size_t n,
size_t size)
12612 return objspace_xcalloc(&
rb_objspace, xmalloc2_size(n, size));
12615#ifdef ruby_sized_xrealloc
12616#undef ruby_sized_xrealloc
12619ruby_sized_xrealloc(
void *ptr,
size_t new_size,
size_t old_size)
12621 if ((ssize_t)new_size < 0) {
12622 negative_size_allocation_error(
"too large allocation size");
12625 return objspace_xrealloc(&
rb_objspace, ptr, new_size, old_size);
12629ruby_xrealloc_body(
void *ptr,
size_t new_size)
12631 return ruby_sized_xrealloc(ptr, new_size, 0);
12634#ifdef ruby_sized_xrealloc2
12635#undef ruby_sized_xrealloc2
12638ruby_sized_xrealloc2(
void *ptr,
size_t n,
size_t size,
size_t old_n)
12640 size_t len = xmalloc2_size(n, size);
12641 return objspace_xrealloc(&
rb_objspace, ptr, len, old_n * size);
12645ruby_xrealloc2_body(
void *ptr,
size_t n,
size_t size)
12647 return ruby_sized_xrealloc2(ptr, n, size, 0);
12650#ifdef ruby_sized_xfree
12651#undef ruby_sized_xfree
12654ruby_sized_xfree(
void *x,
size_t size)
12660 if (LIKELY(GET_VM())) {
12672 ruby_sized_xfree(x, 0);
12676rb_xmalloc_mul_add(
size_t x,
size_t y,
size_t z)
12678 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
12679 return ruby_xmalloc(w);
12683rb_xcalloc_mul_add(
size_t x,
size_t y,
size_t z)
12685 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
12686 return ruby_xcalloc(w, 1);
12690rb_xrealloc_mul_add(
const void *p,
size_t x,
size_t y,
size_t z)
12692 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
12693 return ruby_xrealloc((
void *)p, w);
12697rb_xmalloc_mul_add_mul(
size_t x,
size_t y,
size_t z,
size_t w)
12699 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
12700 return ruby_xmalloc(u);
12704rb_xcalloc_mul_add_mul(
size_t x,
size_t y,
size_t z,
size_t w)
12706 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
12707 return ruby_xcalloc(u, 1);
12714ruby_mimmalloc(
size_t size)
12717#if CALC_EXACT_MALLOC_SIZE
12720 mem = malloc(size);
12721#if CALC_EXACT_MALLOC_SIZE
12730#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12742ruby_mimfree(
void *ptr)
12744#if CALC_EXACT_MALLOC_SIZE
12752rb_alloc_tmp_buffer_with_count(
volatile VALUE *store,
size_t size,
size_t cnt)
12760 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
12762 ptr = ruby_xmalloc0(size);
12770rb_alloc_tmp_buffer(
volatile VALUE *store,
long len)
12774 if (len < 0 || (cnt = (
long)roomof(len,
sizeof(
VALUE))) < 0) {
12778 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
12782rb_free_tmp_buffer(
volatile VALUE *store)
12786 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
12792#if MALLOC_ALLOCATED_SIZE
12803gc_malloc_allocated_size(
VALUE self)
12818gc_malloc_allocations(
VALUE self)
12825rb_gc_adjust_memory_usage(ssize_t diff)
12829 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
12831 else if (diff < 0) {
12832 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
12846#define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
12848#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
12850wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
12854 if (!is_live_object(objspace, obj))
return ST_DELETE;
12855 return ST_CONTINUE;
12860wmap_replace_ref(st_data_t *key, st_data_t *value, st_data_t _argp,
int existing)
12862 *key = rb_gc_location((
VALUE)*key);
12865 VALUE size = values[0];
12867 for (
VALUE index = 1; index <= size; index++) {
12868 values[index] = rb_gc_location(values[index]);
12871 return ST_CONTINUE;
12875wmap_foreach_replace(st_data_t key, st_data_t value, st_data_t _argp,
int error)
12877 if (rb_gc_location((
VALUE)key) != (
VALUE)key) {
12882 VALUE size = values[0];
12884 for (
VALUE index = 1; index <= size; index++) {
12885 VALUE val = values[index];
12886 if (rb_gc_location(val) != val) {
12891 return ST_CONTINUE;
12895wmap_compact(
void *ptr)
12898 if (w->wmap2obj) rb_gc_update_tbl_refs(w->wmap2obj);
12899 if (w->obj2wmap) st_foreach_with_replace(w->obj2wmap, wmap_foreach_replace, wmap_replace_ref, (st_data_t)NULL);
12900 w->final = rb_gc_location(w->final);
12904wmap_mark(
void *ptr)
12907#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
12908 if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&
rb_objspace);
12910 rb_gc_mark_movable(w->final);
12914wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
12917 ruby_sized_xfree(ptr, (ptr[0] + 1) *
sizeof(
VALUE));
12918 return ST_CONTINUE;
12922wmap_free(
void *ptr)
12925 st_foreach(w->obj2wmap, wmap_free_map, 0);
12926 st_free_table(w->obj2wmap);
12927 st_free_table(w->wmap2obj);
12932wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
12935 *(
size_t *)arg += (ptr[0] + 1) *
sizeof(
VALUE);
12936 return ST_CONTINUE;
12940wmap_memsize(
const void *ptr)
12943 const struct weakmap *w = ptr;
12945 size += st_memsize(w->obj2wmap);
12946 size += st_memsize(w->wmap2obj);
12947 st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
12959 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
12965wmap_allocate(
VALUE klass)
12969 w->obj2wmap = rb_init_identtable();
12970 w->wmap2obj = rb_init_identtable();
12971 w->final = rb_func_lambda_new(wmap_finalize, obj, 1, 1);
12981 if (!is_pointer_to_heap(objspace, (
void *)obj))
return FALSE;
12983 void *poisoned = asan_unpoison_object_temporary(obj);
12987 is_live_object(objspace, obj));
12990 asan_poison_object(obj);
12997wmap_remove_inverse_ref(st_data_t *key, st_data_t *val, st_data_t arg,
int existing)
12999 if (!existing)
return ST_STOP;
13004 VALUE size = values[0];
13009 ruby_sized_xfree(values, 2 *
sizeof(
VALUE));
13013 bool found =
false;
13015 for (; index <= size; index++) {
13016 if (values[index] == old_ref) {
13021 if (!found)
return ST_STOP;
13023 if (size > index) {
13024 MEMMOVE(&values[index], &values[index + 1],
VALUE, size - index);
13029 SIZED_REALLOC_N(values,
VALUE, size + 1, size + 2);
13030 *val = (st_data_t)values;
13031 return ST_CONTINUE;
13038 st_data_t orig, wmap, data;
13039 VALUE obj, *rids, i, size;
13044 if (UNDEF_P(obj = id2ref_obj_tbl(&
rb_objspace, objid))) {
13045 rb_bug(
"wmap_finalize: objid is not found.");
13049 orig = (st_data_t)obj;
13050 if (st_delete(w->obj2wmap, &orig, &data)) {
13051 rids = (
VALUE *)data;
13053 for (i = 0; i < size; ++i) {
13054 wmap = (st_data_t)rids[i];
13055 st_delete(w->wmap2obj, &wmap, NULL);
13057 ruby_sized_xfree((
VALUE *)data, (size + 1) *
sizeof(
VALUE));
13060 wmap = (st_data_t)obj;
13061 if (st_delete(w->wmap2obj, &wmap, &orig)) {
13062 wmap = (st_data_t)obj;
13063 st_update(w->obj2wmap, orig, wmap_remove_inverse_ref, wmap);
13079 else if (wmap_live_p(objspace, obj)) {
13083 return rb_str_catf(str,
"#<collected:%p>", (
void*)obj);
13088wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
13092 VALUE str = argp->value;
13102 wmap_inspect_append(objspace, str, k);
13104 wmap_inspect_append(objspace, str, v);
13106 return ST_CONTINUE;
13110wmap_inspect(
VALUE self)
13118 str =
rb_sprintf(
"-<%"PRIsVALUE
":%p", c, (
void *)self);
13122 st_foreach(w->wmap2obj, wmap_inspect_i, (st_data_t)&args);
13130wmap_live_entry_p(
rb_objspace_t *objspace, st_data_t key, st_data_t val)
13132 return wmap_live_p(objspace, (
VALUE)key) && wmap_live_p(objspace, (
VALUE)val);
13136wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
13140 if (wmap_live_entry_p(objspace, key, val)) {
13142 return ST_CONTINUE;
13151wmap_each(
VALUE self)
13157 st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
13162wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
13166 if (wmap_live_entry_p(objspace, key, val)) {
13168 return ST_CONTINUE;
13177wmap_each_key(
VALUE self)
13183 st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
13188wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
13192 if (wmap_live_entry_p(objspace, key, val)) {
13194 return ST_CONTINUE;
13203wmap_each_value(
VALUE self)
13209 st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
13214wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
13218 VALUE ary = argp->value;
13220 if (wmap_live_entry_p(objspace, key, val)) {
13221 rb_ary_push(ary, (
VALUE)key);
13222 return ST_CONTINUE;
13231wmap_keys(
VALUE self)
13238 args.value = rb_ary_new();
13239 st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
13244wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
13248 VALUE ary = argp->value;
13250 if (wmap_live_entry_p(objspace, key, val)) {
13251 rb_ary_push(ary, (
VALUE)val);
13252 return ST_CONTINUE;
13261wmap_values(
VALUE self)
13268 args.value = rb_ary_new();
13269 st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
13274wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg,
int existing)
13276 VALUE size, *ptr, *optr;
13278 size = (ptr = optr = (
VALUE *)*val)[0];
13280 for (
VALUE index = 1; index <= size; index++) {
13281 if (ptr[index] == (
VALUE)arg) {
13288 SIZED_REALLOC_N(ptr,
VALUE, size + 1, size);
13293 ptr = ruby_xmalloc0(2 *
sizeof(
VALUE));
13296 ptr[size] = (
VALUE)arg;
13297 if (ptr == optr)
return ST_STOP;
13298 *val = (st_data_t)ptr;
13299 return ST_CONTINUE;
13308wmap_aset_replace_value(st_data_t *key, st_data_t *val, st_data_t _args,
int existing)
13313 args->old_value = *val;
13315 *val = (st_data_t)args->new_value;
13316 return ST_CONTINUE;
13327 define_final0(value, w->final);
13330 define_final0(key, w->final);
13334 .new_value = value,
13337 st_update(w->wmap2obj, (st_data_t)key, wmap_aset_replace_value, (st_data_t)&aset_args);
13340 if (value != aset_args.old_value) {
13341 if (!UNDEF_P(aset_args.old_value) &&
FL_ABLE(aset_args.old_value)) {
13343 st_update(w->obj2wmap, (st_data_t)aset_args.old_value, wmap_remove_inverse_ref, key);
13348 st_update(w->obj2wmap, (st_data_t)value, wmap_aset_update, key);
13352 return nonspecial_obj_id(value);
13363 GC_ASSERT(wmap_live_p(objspace, key));
13366 if (!st_lookup(w->wmap2obj, (st_data_t)key, &data))
return Qundef;
13368 if (!wmap_live_p(objspace, obj))
return Qundef;
13376 VALUE obj = wmap_lookup(self, key);
13377 return !UNDEF_P(obj) ? obj :
Qnil;
13384 return RBOOL(!UNDEF_P(wmap_lookup(self, key)));
13389wmap_size(
VALUE self)
13395 n = w->wmap2obj->num_entries;
13396#if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
13407#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
13410current_process_time(
struct timespec *ts)
13412#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
13414 static int try_clock_gettime = 1;
13415 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
13419 try_clock_gettime = 0;
13426 struct rusage usage;
13428 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13429 time = usage.ru_utime;
13430 ts->tv_sec = time.tv_sec;
13431 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
13439 FILETIME creation_time, exit_time, kernel_time, user_time;
13442 if (GetProcessTimes(GetCurrentProcess(),
13443 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
13444 memcpy(&ui, &user_time,
sizeof(FILETIME));
13445#define PER100NSEC (uint64_t)(1000 * 1000 * 10)
13446 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
13447 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
13457getrusage_time(
void)
13460 if (current_process_time(&ts)) {
13461 return ts.tv_sec + ts.tv_nsec * 1e-9;
13470gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason)
13472 if (objspace->profile.run) {
13473 size_t index = objspace->profile.next_index;
13477 objspace->profile.next_index++;
13479 if (!objspace->profile.records) {
13480 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
13481 objspace->profile.records = malloc(xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
13483 if (index >= objspace->profile.size) {
13485 objspace->profile.size += 1000;
13486 ptr = realloc(objspace->profile.records, xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
13487 if (!ptr) rb_memerror();
13488 objspace->profile.records = ptr;
13490 if (!objspace->profile.records) {
13491 rb_bug(
"gc_profile malloc or realloc miss");
13493 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
13497 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
13498#if MALLOC_ALLOCATED_SIZE
13499 record->allocated_size = malloc_allocated_size;
13501#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
13504 struct rusage usage;
13505 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13506 record->maxrss = usage.ru_maxrss;
13507 record->minflt = usage.ru_minflt;
13508 record->majflt = usage.ru_majflt;
13519 if (gc_prof_enabled(objspace)) {
13521#if GC_PROFILE_MORE_DETAIL
13522 record->prepare_time = objspace->profile.prepare_time;
13524 record->gc_time = 0;
13525 record->gc_invoke_time = getrusage_time();
13530elapsed_time_from(
double time)
13532 double now = getrusage_time();
13544 if (gc_prof_enabled(objspace)) {
13546 record->gc_time = elapsed_time_from(record->gc_invoke_time);
13547 record->gc_invoke_time -= objspace->profile.invoke_time;
13551#define RUBY_DTRACE_GC_HOOK(name) \
13552 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
13556 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
13557#if GC_PROFILE_MORE_DETAIL
13558 if (gc_prof_enabled(objspace)) {
13559 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
13567 RUBY_DTRACE_GC_HOOK(MARK_END);
13568#if GC_PROFILE_MORE_DETAIL
13569 if (gc_prof_enabled(objspace)) {
13571 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
13579 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
13580 if (gc_prof_enabled(objspace)) {
13583 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
13584 objspace->profile.gc_sweep_start_time = getrusage_time();
13592 RUBY_DTRACE_GC_HOOK(SWEEP_END);
13594 if (gc_prof_enabled(objspace)) {
13598 if (record->gc_time > 0) {
13599 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13601 record->gc_time += sweep_time;
13603 else if (GC_PROFILE_MORE_DETAIL) {
13604 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13607#if GC_PROFILE_MORE_DETAIL
13608 record->gc_sweep_time += sweep_time;
13609 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
13611 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
13618#if GC_PROFILE_MORE_DETAIL
13619 if (gc_prof_enabled(objspace)) {
13621 record->allocate_increase = malloc_increase;
13622 record->allocate_limit = malloc_limit;
13630 if (gc_prof_enabled(objspace)) {
13632 size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
13633 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
13635#if GC_PROFILE_MORE_DETAIL
13636 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
13637 record->heap_live_objects = live;
13638 record->heap_free_objects = total - live;
13641 record->heap_total_objects = total;
13642 record->heap_use_size = live *
sizeof(
RVALUE);
13643 record->heap_total_size = total *
sizeof(
RVALUE);
13656gc_profile_clear(
VALUE _)
13659 void *p = objspace->profile.records;
13660 objspace->profile.records = NULL;
13661 objspace->profile.size = 0;
13662 objspace->profile.next_index = 0;
13663 objspace->profile.current_record = 0;
13721gc_profile_record_get(
VALUE _)
13724 VALUE gc_profile = rb_ary_new();
13728 if (!objspace->profile.run) {
13732 for (i =0; i < objspace->profile.next_index; i++) {
13735 prof = rb_hash_new();
13736 rb_hash_aset(prof,
ID2SYM(
rb_intern(
"GC_FLAGS")), gc_info_decode(0, rb_hash_new(), record->flags));
13744#if GC_PROFILE_MORE_DETAIL
13756 rb_hash_aset(prof,
ID2SYM(
rb_intern(
"HAVE_FINALIZE")), RBOOL(record->flags & GPR_FLAG_HAVE_FINALIZE));
13759#if RGENGC_PROFILE > 0
13764 rb_ary_push(gc_profile, prof);
13770#if GC_PROFILE_MORE_DETAIL
13771#define MAJOR_REASON_MAX 0x10
13774gc_profile_dump_major_reason(
unsigned int flags,
char *buff)
13776 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
13779 if (reason == GPR_FLAG_NONE) {
13785 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
13786 buff[i++] = #x[0]; \
13787 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
13793#if RGENGC_ESTIMATE_OLDMALLOC
13806 size_t count = objspace->profile.next_index;
13807#ifdef MAJOR_REASON_MAX
13808 char reason_str[MAJOR_REASON_MAX];
13811 if (objspace->profile.run && count ) {
13815 append(out,
rb_sprintf(
"GC %"PRIuSIZE
" invokes.\n", objspace->profile.count));
13816 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
13818 for (i = 0; i < count; i++) {
13819 record = &objspace->profile.records[i];
13820 append(out,
rb_sprintf(
"%5"PRIuSIZE
" %19.3f %20"PRIuSIZE
" %20"PRIuSIZE
" %20"PRIuSIZE
" %30.20f\n",
13821 i+1, record->gc_invoke_time, record->heap_use_size,
13822 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
13825#if GC_PROFILE_MORE_DETAIL
13826 const char *str =
"\n\n" \
13828 "Prepare Time = Previously GC's rest sweep time\n"
13829 "Index Flags Allocate Inc. Allocate Limit"
13830#if CALC_EXACT_MALLOC_SIZE
13833 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
13835 " OldgenObj RemNormObj RemShadObj"
13837#if GC_PROFILE_DETAIL_MEMORY
13838 " MaxRSS(KB) MinorFLT MajorFLT"
13843 for (i = 0; i < count; i++) {
13844 record = &objspace->profile.records[i];
13845 append(out,
rb_sprintf(
"%5"PRIuSIZE
" %4s/%c/%6s%c %13"PRIuSIZE
" %15"PRIuSIZE
13846#
if CALC_EXACT_MALLOC_SIZE
13849 " %9"PRIuSIZE
" %17.12f %17.12f %17.12f %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
13851 "%10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
13853#
if GC_PROFILE_DETAIL_MEMORY
13859 gc_profile_dump_major_reason(record->flags, reason_str),
13860 (record->flags & GPR_FLAG_HAVE_FINALIZE) ?
'F' :
'.',
13861 (record->flags & GPR_FLAG_NEWOBJ) ?
"NEWOBJ" :
13862 (record->flags & GPR_FLAG_MALLOC) ?
"MALLOC" :
13863 (record->flags & GPR_FLAG_METHOD) ?
"METHOD" :
13864 (record->flags & GPR_FLAG_CAPI) ?
"CAPI__" :
"??????",
13865 (record->flags & GPR_FLAG_STRESS) ?
'!' :
' ',
13866 record->allocate_increase, record->allocate_limit,
13867#if CALC_EXACT_MALLOC_SIZE
13868 record->allocated_size,
13870 record->heap_use_pages,
13871 record->gc_mark_time*1000,
13872 record->gc_sweep_time*1000,
13873 record->prepare_time*1000,
13875 record->heap_live_objects,
13876 record->heap_free_objects,
13877 record->removing_objects,
13878 record->empty_objects
13881 record->old_objects,
13882 record->remembered_normal_objects,
13883 record->remembered_shady_objects
13885#if GC_PROFILE_DETAIL_MEMORY
13887 record->maxrss / 1024,
13910gc_profile_result(
VALUE _)
13927gc_profile_report(
int argc,
VALUE *argv,
VALUE self)
13945gc_profile_total_time(
VALUE self)
13950 if (objspace->profile.run && objspace->profile.next_index > 0) {
13952 size_t count = objspace->profile.next_index;
13954 for (i = 0; i < count; i++) {
13955 time += objspace->profile.records[i].gc_time;
13969gc_profile_enable_get(
VALUE self)
13972 return RBOOL(objspace->profile.run);
13984gc_profile_enable(
VALUE _)
13987 objspace->profile.run = TRUE;
13988 objspace->profile.current_record = 0;
14001gc_profile_disable(
VALUE _)
14005 objspace->profile.run = FALSE;
14006 objspace->profile.current_record = 0;
14018#define TYPE_NAME(t) case (t): return #t;
14045 if (obj && rb_objspace_data_type_name(obj)) {
14046 return rb_objspace_data_type_name(obj);
14055obj_type_name(
VALUE obj)
14057 return type_name(
TYPE(obj), obj);
14061rb_method_type_name(rb_method_type_t
type)
14064 case VM_METHOD_TYPE_ISEQ:
return "iseq";
14065 case VM_METHOD_TYPE_ATTRSET:
return "attrest";
14066 case VM_METHOD_TYPE_IVAR:
return "ivar";
14067 case VM_METHOD_TYPE_BMETHOD:
return "bmethod";
14068 case VM_METHOD_TYPE_ALIAS:
return "alias";
14069 case VM_METHOD_TYPE_REFINED:
return "refined";
14070 case VM_METHOD_TYPE_CFUNC:
return "cfunc";
14071 case VM_METHOD_TYPE_ZSUPER:
return "zsuper";
14072 case VM_METHOD_TYPE_MISSING:
return "missing";
14073 case VM_METHOD_TYPE_OPTIMIZED:
return "optimized";
14074 case VM_METHOD_TYPE_UNDEF:
return "undef";
14075 case VM_METHOD_TYPE_NOTIMPLEMENTED:
return "notimplemented";
14077 rb_bug(
"rb_method_type_name: unreachable (type: %d)",
type);
14081rb_raw_iseq_info(
char *
const buff,
const size_t buff_size,
const rb_iseq_t *iseq)
14083 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !
RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj,
T_MOVED)) {
14084 VALUE path = rb_iseq_path(iseq);
14085 int n = ISEQ_BODY(iseq)->location.first_lineno;
14086 snprintf(buff, buff_size,
" %s@%s:%d",
14093str_len_no_raise(
VALUE str)
14096 if (len < 0)
return 0;
14097 if (len > INT_MAX)
return INT_MAX;
14101#define BUFF_ARGS buff + pos, buff_size - pos
14102#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
14103#define APPEND_S(s) do { \
14104 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
14108 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
14111#define TF(c) ((c) != 0 ? "true" : "false")
14112#define C(c, s) ((c) != 0 ? (s) : " ")
14115rb_raw_obj_info_common(
char *
const buff,
const size_t buff_size,
const VALUE obj)
14120 APPEND_F(
"%s", obj_type_name(obj));
14130 const int age = RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
14132 if (is_pointer_to_heap(&
rb_objspace, (
void *)obj)) {
14133 APPEND_F(
"%p [%d%s%s%s%s%s%s] %s ",
14135 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj),
"L"),
14136 C(RVALUE_MARK_BITMAP(obj),
"M"),
14137 C(RVALUE_PIN_BITMAP(obj),
"P"),
14138 C(RVALUE_MARKING_BITMAP(obj),
"R"),
14139 C(RVALUE_WB_UNPROTECTED_BITMAP(obj),
"U"),
14140 C(rb_objspace_garbage_object_p(obj),
"G"),
14141 obj_type_name(obj));
14145 APPEND_F(
"%p [%dXXXX] %s",
14147 obj_type_name(obj));
14150 if (internal_object_p(obj)) {
14153 else if (
RBASIC(obj)->klass == 0) {
14154 APPEND_S(
"(temporary internal)");
14158 if (!
NIL_P(class_path)) {
14164 APPEND_F(
"@%s:%d", RANY(obj)->file, RANY(obj)->line);
14173rb_raw_obj_info_buitin_type(
char *
const buff,
const size_t buff_size,
const VALUE obj,
size_t pos)
14180 UNEXPECTED_NODE(rb_raw_obj_info);
14183 if (ARY_SHARED_P(obj)) {
14184 APPEND_S(
"shared -> ");
14185 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
14187 else if (ARY_EMBED_P(obj)) {
14188 APPEND_F(
"[%s%s] len: %ld (embed)",
14189 C(ARY_EMBED_P(obj),
"E"),
14190 C(ARY_SHARED_P(obj),
"S"),
14194 APPEND_F(
"[%s%s%s] len: %ld, capa:%ld ptr:%p",
14195 C(ARY_EMBED_P(obj),
"E"),
14196 C(ARY_SHARED_P(obj),
"S"),
14199 ARY_EMBED_P(obj) ? -1L :
RARRAY(obj)->as.heap.aux.capa,
14204 if (STR_SHARED_P(obj)) {
14205 APPEND_F(
" [shared] len: %ld",
RSTRING_LEN(obj));
14208 if (STR_EMBED_P(obj)) APPEND_S(
" [embed]");
14212 APPEND_F(
" \"%.*s\"", str_len_no_raise(obj),
RSTRING_PTR(obj));
14216 VALUE fstr = RSYMBOL(obj)->fstr;
14217 ID id = RSYMBOL(obj)->id;
14219 APPEND_F(
":%s id:%d",
RSTRING_PTR(fstr), (
unsigned int)
id);
14222 APPEND_F(
"(%p) id:%d", (
void *)fstr, (
unsigned int)
id);
14227 APPEND_F(
"-> %p", (
void*)rb_gc_location(obj));
14231 APPEND_F(
"[%c%c] %"PRIdSIZE,
14232 RHASH_AR_TABLE_P(obj) ?
'A' :
'S',
14233 RHASH_TRANSIENT_P(obj) ?
'T' :
' ',
14241 if (!
NIL_P(class_path)) {
14245 APPEND_S(
"(annon)");
14252 if (!
NIL_P(class_path)) {
14259 uint32_t len = ROBJECT_IV_CAPACITY(obj);
14261 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
14262 APPEND_F(
"(embed) len:%d", len);
14266 APPEND_F(
"len:%d ptr:%p", len, (
void *)ptr);
14274 (block = vm_proc_block(obj)) != NULL &&
14275 (vm_block_type(block) == block_type_iseq) &&
14276 (iseq = vm_block_iseq(block)) != NULL) {
14277 rb_raw_iseq_info(BUFF_ARGS, iseq);
14279 else if (rb_ractor_p(obj)) {
14282 APPEND_F(
"r:%d", r->pub.id);
14286 const char *
const type_name = rb_objspace_data_type_name(obj);
14288 APPEND_F(
"%s", type_name);
14294 APPEND_F(
"<%s> ", rb_imemo_name(imemo_type(obj)));
14296 switch (imemo_type(obj)) {
14301 APPEND_F(
":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
14303 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ?
"pub" :
14304 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ?
"pri" :
"pro",
14305 METHOD_ENTRY_COMPLEMENTED(me) ?
",cmp" :
"",
14306 METHOD_ENTRY_CACHED(me) ?
",cc" :
"",
14307 METHOD_ENTRY_INVALIDATED(me) ?
",inv" :
"",
14308 me->def ? rb_method_type_name(me->def->
type) :
"NULL",
14309 me->def ? me->def->aliased : -1,
14311 (void *)me->defined_class);
14314 switch (me->def->type) {
14315 case VM_METHOD_TYPE_ISEQ:
14316 APPEND_S(
" (iseq:");
14317 rb_raw_obj_info(BUFF_ARGS, (
VALUE)me->def->body.iseq.
iseqptr);
14329 rb_raw_iseq_info(BUFF_ARGS, iseq);
14332 case imemo_callinfo:
14335 APPEND_F(
"(mid:%s, flag:%x argc:%d, kwarg:%s)",
14339 vm_ci_kwarg(ci) ?
"available" :
"NULL");
14342 case imemo_callcache:
14348 APPEND_F(
"(klass:%s cme:%s%s (%p) call:%p",
14349 NIL_P(class_path) ? (cc->klass ?
"??" :
"<NULL>") :
RSTRING_PTR(class_path),
14350 cme ?
rb_id2name(cme->called_id) :
"<NULL>",
14351 cme ? (METHOD_ENTRY_INVALIDATED(cme) ?
" [inv]" :
"") :
"",
14353 (void *)vm_cc_call(cc));
14373rb_raw_obj_info(
char *
const buff,
const size_t buff_size,
VALUE obj)
14375 asan_unpoisoning_object(obj) {
14376 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
14377 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
14378 if (pos >= buff_size) {}
14389#define OBJ_INFO_BUFFERS_NUM 10
14390#define OBJ_INFO_BUFFERS_SIZE 0x100
14392static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
14400 if (UNLIKELY(oldval >= maxval - 1)) {
14411 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
14412 char *
const buff = obj_info_buffers[index];
14413 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
14419 return obj_type_name(obj);
14423MJIT_FUNC_EXPORTED
const char *
14424rb_obj_info(
VALUE obj)
14426 return obj_info(obj);
14430rb_obj_info_dump(
VALUE obj)
14433 fprintf(stderr,
"rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
14436MJIT_FUNC_EXPORTED
void
14437rb_obj_info_dump_loc(
VALUE obj,
const char *file,
int line,
const char *func)
14440 fprintf(stderr,
"<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
14446rb_gcdebug_print_obj_condition(
VALUE obj)
14450 fprintf(stderr,
"created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
14453 fprintf(stderr,
"moved?: true\n");
14456 fprintf(stderr,
"moved?: false\n");
14458 if (is_pointer_to_heap(objspace, (
void *)obj)) {
14459 fprintf(stderr,
"pointer to heap?: true\n");
14462 fprintf(stderr,
"pointer to heap?: false\n");
14466 fprintf(stderr,
"marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ?
"true" :
"false");
14467 fprintf(stderr,
"pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ?
"true" :
"false");
14468 fprintf(stderr,
"age? : %d\n", RVALUE_AGE(obj));
14469 fprintf(stderr,
"old? : %s\n", RVALUE_OLD_P(obj) ?
"true" :
"false");
14470 fprintf(stderr,
"WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ?
"false" :
"true");
14471 fprintf(stderr,
"remembered? : %s\n", RVALUE_REMEMBERED(obj) ?
"true" :
"false");
14473 if (is_lazy_sweeping(objspace)) {
14474 fprintf(stderr,
"lazy sweeping?: true\n");
14475 fprintf(stderr,
"swept?: %s\n", is_swept_object(objspace, obj) ?
"done" :
"not yet");
14478 fprintf(stderr,
"lazy sweeping?: false\n");
14485 fprintf(stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)name, (
void *)obj);
14490rb_gcdebug_sentinel(
VALUE obj,
const char *name)
14497#if GC_DEBUG_STRESS_TO_CLASS
14506rb_gcdebug_add_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
14510 if (!stress_to_class) {
14511 stress_to_class = rb_ary_hidden_new(argc);
14513 rb_ary_cat(stress_to_class, argv, argc);
14526rb_gcdebug_remove_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
14531 if (stress_to_class) {
14532 for (i = 0; i < argc; ++i) {
14533 rb_ary_delete_same(stress_to_class, argv[i]);
14536 stress_to_class = 0;
14608gc_using_rvargc_p(
VALUE mod)
14621 VALUE rb_mObjSpace;
14622 VALUE rb_mProfiler;
14623 VALUE gc_constants;
14627 gc_constants = rb_hash_new();
14628 rb_hash_aset(gc_constants,
ID2SYM(
rb_intern(
"DEBUG")), RBOOL(GC_DEBUG));
14636 rb_hash_aset(gc_constants,
ID2SYM(
rb_intern(
"RVARGC_MAX_ALLOCATE_SIZE")),
LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT - 1)));
14660 rb_vm_register_special_exception(ruby_error_nomemory,
rb_eNoMemError,
"failed to allocate memory");
14690#if MALLOC_ALLOCATED_SIZE
14697 if (GC_COMPACTION_SUPPORTED) {
14712#if GC_DEBUG_STRESS_TO_CLASS
14721#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
14725 OPT(RGENGC_CHECK_MODE);
14726 OPT(RGENGC_PROFILE);
14727 OPT(RGENGC_ESTIMATE_OLDMALLOC);
14728 OPT(GC_PROFILE_MORE_DETAIL);
14729 OPT(GC_ENABLE_LAZY_SWEEP);
14730 OPT(CALC_EXACT_MALLOC_SIZE);
14731 OPT(MALLOC_ALLOCATED_SIZE);
14732 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
14733 OPT(GC_PROFILE_DETAIL_MEMORY);
14734 OPT(GC_COMPACTION_SUPPORTED);
14743#ifdef ruby_xmalloc2
14744#undef ruby_xmalloc2
14749#ifdef ruby_xrealloc
14750#undef ruby_xrealloc
14752#ifdef ruby_xrealloc2
14753#undef ruby_xrealloc2
14757ruby_xmalloc(
size_t size)
14759#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14760 ruby_malloc_info_file = __FILE__;
14761 ruby_malloc_info_line = __LINE__;
14763 return ruby_xmalloc_body(size);
14767ruby_xmalloc2(
size_t n,
size_t size)
14769#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14770 ruby_malloc_info_file = __FILE__;
14771 ruby_malloc_info_line = __LINE__;
14773 return ruby_xmalloc2_body(n, size);
14777ruby_xcalloc(
size_t n,
size_t size)
14779#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14780 ruby_malloc_info_file = __FILE__;
14781 ruby_malloc_info_line = __LINE__;
14783 return ruby_xcalloc_body(n, size);
14787ruby_xrealloc(
void *ptr,
size_t new_size)
14789#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14790 ruby_malloc_info_file = __FILE__;
14791 ruby_malloc_info_line = __LINE__;
14793 return ruby_xrealloc_body(ptr, new_size);
14797ruby_xrealloc2(
void *ptr,
size_t n,
size_t new_size)
14799#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14800 ruby_malloc_info_file = __FILE__;
14801 ruby_malloc_info_line = __LINE__;
14803 return ruby_xrealloc2_body(ptr, n, new_size);
#define RUBY_ASSERT(expr)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
#define RUBY_ALIGNOF
Wraps (or simulates) alignof.
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Identical to rb_postponed_job_register_one(), except it additionally checks for duplicated registrati...
#define RB_GNUC_EXTENSION_BLOCK(x)
This is expanded to the passed token for non-GCC compilers.
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
uint32_t rb_event_flag_t
Represents event(s).
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implenentation detail of RB_FL_TEST().
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implenentation detail of RB_FL_SET().
void rb_include_module(VALUE klass, VALUE module)
Includes a module to a class.
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
VALUE rb_define_module(const char *name)
Defines a top-level module.
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
#define TYPE(_)
Old name of rb_type.
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
#define T_FILE
Old name of RUBY_T_FILE.
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
#define ALLOC
Old name of RB_ALLOC.
#define T_STRING
Old name of RUBY_T_STRING.
#define xfree
Old name of ruby_xfree.
#define T_MASK
Old name of RUBY_T_MASK.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define rb_str_cat2
Old name of rb_str_cat_cstr.
#define T_NIL
Old name of RUBY_T_NIL.
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
#define T_FLOAT
Old name of RUBY_T_FLOAT.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
#define ID2SYM
Old name of RB_ID2SYM.
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
#define ULONG2NUM
Old name of RB_ULONG2NUM.
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
#define SYM2ID
Old name of RB_SYM2ID.
#define T_DATA
Old name of RUBY_T_DATA.
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
#define FL_PROMOTED0
Old name of RUBY_FL_PROMOTED0.
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
#define CLASS_OF
Old name of rb_class_of.
#define T_NONE
Old name of RUBY_T_NONE.
#define T_NODE
Old name of RUBY_T_NODE.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
#define xmalloc
Old name of ruby_xmalloc.
#define LONG2FIX
Old name of RB_INT2FIX.
#define FIX2INT
Old name of RB_FIX2INT.
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
#define T_MODULE
Old name of RUBY_T_MODULE.
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define T_HASH
Old name of RUBY_T_HASH.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define FL_ABLE
Old name of RB_FL_ABLE.
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
#define FL_SET
Old name of RB_FL_SET.
#define rb_ary_new3
Old name of rb_ary_new_from_args.
#define LONG2NUM
Old name of RB_LONG2NUM.
#define T_FALSE
Old name of RUBY_T_FALSE.
#define ULL2NUM
Old name of RB_ULL2NUM.
#define T_UNDEF
Old name of RUBY_T_UNDEF.
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define DYNAMIC_SYM_P
Old name of RB_DYNAMIC_SYM_P.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define T_OBJECT
Old name of RUBY_T_OBJECT.
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
#define DBL2NUM
Old name of rb_float_new.
#define T_MATCH
Old name of RUBY_T_MATCH.
#define T_CLASS
Old name of RUBY_T_CLASS.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define OBJ_PROMOTED
Old name of RB_OBJ_PROMOTED.
#define T_MOVED
Old name of RUBY_T_MOVED.
#define FL_TEST
Old name of RB_FL_TEST.
#define FL_PROMOTED1
Old name of RUBY_FL_PROMOTED1.
#define xcalloc
Old name of ruby_xcalloc.
#define FL_UNSET
Old name of RB_FL_UNSET.
#define UINT2NUM
Old name of RB_UINT2NUM.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
#define T_REGEXP
Old name of RUBY_T_REGEXP.
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
int ruby_stack_check(void)
Checks for stack overflow.
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
void rb_bug(const char *fmt,...)
Interpreter panic switch.
VALUE rb_eNoMemError
NoMemoryError exception.
VALUE rb_eRangeError
RangeError exception.
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
VALUE rb_eTypeError
TypeError exception.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
VALUE rb_eArgError
ArgumentError exception.
VALUE rb_mKernel
Kernel module.
VALUE rb_any_to_s(VALUE obj)
Generates a textual representation of the given object.
VALUE rb_mEnumerable
Enumerable module.
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
VALUE rb_cBasicObject
BasicObject class.
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
VALUE rb_stdout
STDOUT constant.
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
#define rb_check_frozen
Just another name of rb_check_frozen.
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
VALUE rb_io_write(VALUE io, VALUE str)
Writes the given string to the given IO.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
void rb_str_free(VALUE str)
Destroys the given string for no reason.
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
VALUE rb_class_name(VALUE obj)
Queries the name of the given object's class.
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
#define strtod(s, e)
Just another name of ruby_strtod.
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define strdup(s)
Just another name of ruby_strdup.
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
VALUE rb_yield_values(int n,...)
Identical to rb_yield(), except it takes variadic number of parameters and pass them to the block.
VALUE rb_yield(VALUE val)
Yields the block.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define PRI_PIDT_PREFIX
A rb_sprintf() format prefix to be used for a pid_t parameter.
#define RARRAY_LEN
Just another name of rb_array_len.
#define RARRAY_CONST_PTR_TRANSIENT
Just another name of rb_array_const_ptr_transient.
#define RARRAY(obj)
Convenient casting macro.
static bool RARRAY_TRANSIENT_P(VALUE ary)
Queries if the array is a transient array.
#define RARRAY_AREF(a, i)
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
#define RBASIC(obj)
Convenient casting macro.
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
#define RCLASS(obj)
Convenient casting macro.
#define DATA_PTR(obj)
Convenient getter macro.
#define RDATA(obj)
Convenient casting macro.
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
#define RFILE(obj)
Convenient casting macro.
#define RGENGC_WB_PROTECTED_OBJECT
This is a compile-time flag to enable/disable write barrier for struct RObject.
#define RHASH_SIZE(h)
Queries the size of the hash.
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
#define RMATCH(obj)
Convenient casting macro.
#define ROBJECT(obj)
Convenient casting macro.
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
#define RREGEXP_PTR(obj)
Convenient accessor macro.
static long RSTRING_LEN(VALUE str)
Queries the length of the string.
static char * RSTRING_PTR(VALUE str)
Queries the contents pointer of the string.
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
static VALUE rb_special_const_p(VALUE obj)
Identical to RB_SPECIAL_CONST_P, except it returns a VALUE.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Ruby's object's, base components.
const VALUE klass
Class of an object.
VALUE flags
Per-object flags.
Internal header for Complex.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
struct rb_io_t * fptr
IO's specific fields.
Regular expression execution context.
VALUE regexp
The expression of this match.
VALUE str
The target string that the match was made against.
VALUE ary[ROBJECT_EMBED_LEN_MAX]
Embedded instance variables.
Internal header for Rational.
Ruby's regular expression.
const VALUE src
Source code of this expression.
union RString::@50 as
String's specific fields.
struct RString::@50::@51 heap
Strings that use separated memory region for contents use this pattern.
union RString::@50::@51::@53 aux
Auxiliary info.
VALUE shared
Parent of the string.
const rb_data_type_t * type
This field stores various information about how Ruby should handle a data.
This is the struct that holds necessary info for a struct.
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
struct rb_data_type_struct::@54 function
Function pointers.
const char * wrap_struct_name
Name of structs of this kind.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
VALUE ecopts
Flags as Ruby hash.
Ruby's IO, metadata and buffers.
VALUE writeconv_asciicompat
This is, when set, an instance of rb_cString which holds the "common" encoding.
VALUE pathv
pathname for file
struct rb_io_enc_t encs
Decomposed encoding flags.
VALUE write_lock
This is a Ruby level mutex.
VALUE self
The IO's Ruby level counterpart.
VALUE writeconv_pre_ecopts
Value of ::rb_io_t::rb_io_enc_t::ecopts stored right before initialising rb_io_t::writeconv.
VALUE tied_io_for_writing
Duplex IO object, if set.
VALUE timeout
The timeout associated with this IO when performing blocking operations.
rb_cref_t * cref
class reference, should be marked
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Internal header for Class.
Represents the region of a capture group.
int char_offset_num_allocated
Number of rmatch_offset that rmatch::char_offset holds.
struct rmatch_offset * char_offset
Capture group offsets, in C array.
struct re_registers regs
"Registers" of a match.
IFUNC (Internal FUNCtion)
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
uintptr_t VALUE
Type that represents a Ruby object.
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
ruby_value_type
C-level type of an object.
@ RUBY_T_MASK
Bitmask of ruby_value_type.