14 #define rb_data_object_alloc rb_data_object_alloc
15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc
37 #include <sys/types.h>
43 #undef rb_data_object_wrap
45 #ifndef HAVE_MALLOC_USABLE_SIZE
47 # define HAVE_MALLOC_USABLE_SIZE
48 # define malloc_usable_size(a) _msize(a)
49 # elif defined HAVE_MALLOC_SIZE
50 # define HAVE_MALLOC_USABLE_SIZE
51 # define malloc_usable_size(a) malloc_size(a)
54 #ifdef HAVE_MALLOC_USABLE_SIZE
55 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
56 # include RUBY_ALTERNATIVE_MALLOC_HEADER
59 # elif defined(HAVE_MALLOC_NP_H)
60 # include <malloc_np.h>
61 # elif defined(HAVE_MALLOC_MALLOC_H)
62 # include <malloc/malloc.h>
66 #ifdef HAVE_SYS_TIME_H
70 #ifdef HAVE_SYS_RESOURCE_H
71 #include <sys/resource.h>
74 #if defined _WIN32 || defined __CYGWIN__
76 #elif defined(HAVE_POSIX_MEMALIGN)
77 #elif defined(HAVE_MEMALIGN)
81 #define rb_setjmp(env) RUBY_SETJMP(env)
82 #define rb_jmp_buf rb_jmpbuf_t
84 #if defined(_MSC_VER) && defined(_WIN64)
86 #pragma intrinsic(_umul128)
102 #elif defined(HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW)
103 p = __builtin_mul_overflow(x, y, &z);
105 #elif defined(DSIZE_T)
112 #elif defined(_MSC_VER) && defined(_WIN64)
114 unsigned __int64 dz = _umul128(x, y, &
dp);
134 #elif defined(HAVE_BUILTIN___BUILTIN_ADD_OVERFLOW)
135 p = __builtin_add_overflow(x, y, &z);
137 #elif defined(DSIZE_T)
155 struct optional t = size_mul_overflow(x, y);
163 struct optional t = size_mul_overflow(x, y);
164 struct optional u = size_mul_overflow(z, w);
172 size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
174 struct optional t = size_mul_overflow(x, y);
194 return size_mul_or_raise(x, y,
exc);
198 size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
200 struct optional t = size_mul_add_overflow(x, y, z);
221 return size_mul_add_or_raise(x, y, z,
exc);
225 size_mul_add_mul_or_raise(
size_t x,
size_t y,
size_t z,
size_t w,
VALUE exc)
227 struct optional t = size_mul_add_mul_overflow(x, y, z, w);
246 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
258 #ifndef GC_HEAP_INIT_SLOTS
259 #define GC_HEAP_INIT_SLOTS 10000
261 #ifndef GC_HEAP_FREE_SLOTS
262 #define GC_HEAP_FREE_SLOTS 4096
264 #ifndef GC_HEAP_GROWTH_FACTOR
265 #define GC_HEAP_GROWTH_FACTOR 1.8
267 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
268 #define GC_HEAP_GROWTH_MAX_SLOTS 0
270 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
271 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
274 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
275 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
277 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
278 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
280 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
281 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
284 #ifndef GC_MALLOC_LIMIT_MIN
285 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
287 #ifndef GC_MALLOC_LIMIT_MAX
288 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
290 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
291 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
294 #ifndef GC_OLDMALLOC_LIMIT_MIN
295 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
297 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
298 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
300 #ifndef GC_OLDMALLOC_LIMIT_MAX
301 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
304 #ifndef PRINT_MEASURE_LINE
305 #define PRINT_MEASURE_LINE 0
307 #ifndef PRINT_ENTER_EXIT_TICK
308 #define PRINT_ENTER_EXIT_TICK 0
310 #ifndef PRINT_ROOT_TICKS
311 #define PRINT_ROOT_TICKS 0
314 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
378 #define RGENGC_DEBUG -1
380 #define RGENGC_DEBUG 0
383 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
384 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
386 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
398 #ifndef RGENGC_CHECK_MODE
399 #define RGENGC_CHECK_MODE 0
403 #define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
411 #ifndef RGENGC_OLD_NEWOBJ_CHECK
412 #define RGENGC_OLD_NEWOBJ_CHECK 0
420 #ifndef RGENGC_PROFILE
421 #define RGENGC_PROFILE 0
430 #ifndef RGENGC_ESTIMATE_OLDMALLOC
431 #define RGENGC_ESTIMATE_OLDMALLOC 1
437 #ifndef RGENGC_FORCE_MAJOR_GC
438 #define RGENGC_FORCE_MAJOR_GC 0
446 #define RGENGC_DEBUG 0
447 #ifdef RGENGC_CHECK_MODE
448 #undef RGENGC_CHECK_MODE
450 #define RGENGC_CHECK_MODE 0
451 #define RGENGC_PROFILE 0
452 #define RGENGC_ESTIMATE_OLDMALLOC 0
453 #define RGENGC_FORCE_MAJOR_GC 0
457 #ifndef GC_PROFILE_MORE_DETAIL
458 #define GC_PROFILE_MORE_DETAIL 0
460 #ifndef GC_PROFILE_DETAIL_MEMORY
461 #define GC_PROFILE_DETAIL_MEMORY 0
463 #ifndef GC_ENABLE_INCREMENTAL_MARK
464 #define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
466 #ifndef GC_ENABLE_LAZY_SWEEP
467 #define GC_ENABLE_LAZY_SWEEP 1
469 #ifndef CALC_EXACT_MALLOC_SIZE
470 #define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
472 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
473 #ifndef MALLOC_ALLOCATED_SIZE
474 #define MALLOC_ALLOCATED_SIZE 0
477 #define MALLOC_ALLOCATED_SIZE 0
479 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
480 #define MALLOC_ALLOCATED_SIZE_CHECK 0
483 #ifndef GC_DEBUG_STRESS_TO_CLASS
484 #define GC_DEBUG_STRESS_TO_CLASS 0
487 #ifndef RGENGC_OBJ_INFO
488 #define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
498 #if RGENGC_ESTIMATE_OLDMALLOC
531 #if GC_PROFILE_MORE_DETAIL
533 double gc_sweep_time;
535 size_t heap_use_pages;
536 size_t heap_live_objects;
537 size_t heap_free_objects;
539 size_t allocate_increase;
540 size_t allocate_limit;
543 size_t removing_objects;
544 size_t empty_objects;
545 #if GC_PROFILE_DETAIL_MEMORY
551 #if MALLOC_ALLOCATED_SIZE
552 size_t allocated_size;
555 #if RGENGC_PROFILE > 0
557 size_t remembered_normal_objects;
558 size_t remembered_shady_objects;
562 #if defined(_MSC_VER) || defined(__CYGWIN__)
563 #pragma pack(push, 1)
614 #if defined(_MSC_VER) || defined(__CYGWIN__)
623 #define popcount_bits rb_popcount_intptr
640 #define STACK_CHUNK_SIZE 500
663 #if GC_ENABLE_INCREMENTAL_MARK
680 #if MALLOC_ALLOCATED_SIZE
681 size_t allocated_size;
698 #if GC_ENABLE_INCREMENTAL_MARK
745 #if GC_PROFILE_MORE_DETAIL
754 #if RGENGC_PROFILE > 0
755 size_t total_generated_normal_object_count;
756 size_t total_generated_shady_object_count;
757 size_t total_shade_operation_count;
758 size_t total_promoted_count;
759 size_t total_remembered_normal_object_count;
760 size_t total_remembered_shady_object_count;
762 #if RGENGC_PROFILE >= 2
763 size_t generated_normal_object_count_types[
RUBY_T_MASK];
764 size_t generated_shady_object_count_types[
RUBY_T_MASK];
767 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
768 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
798 #if RGENGC_ESTIMATE_OLDMALLOC
803 #if RGENGC_CHECK_MODE >= 2
814 #if GC_ENABLE_INCREMENTAL_MARK
825 #if GC_DEBUG_STRESS_TO_CLASS
832 #define HEAP_PAGE_ALIGN_LOG 14
833 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
876 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
877 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
878 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
880 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
881 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
882 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
883 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
886 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
887 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
888 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
891 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
892 #define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
894 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
895 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
896 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
900 #define rb_objspace (*rb_objspace_of(GET_VM()))
901 #define rb_objspace_of(vm) ((vm)->objspace)
903 #define ruby_initial_gc_stress gc_params.gc_stress
907 #define malloc_limit objspace->malloc_params.limit
908 #define malloc_increase objspace->malloc_params.increase
909 #define malloc_allocated_size objspace->malloc_params.allocated_size
910 #define heap_pages_sorted objspace->heap_pages.sorted
911 #define heap_allocated_pages objspace->heap_pages.allocated_pages
912 #define heap_pages_sorted_length objspace->heap_pages.sorted_length
913 #define heap_pages_lomem objspace->heap_pages.range[0]
914 #define heap_pages_himem objspace->heap_pages.range[1]
915 #define heap_allocatable_pages objspace->heap_pages.allocatable_pages
916 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
917 #define heap_pages_final_slots objspace->heap_pages.final_slots
918 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
919 #define heap_eden (&objspace->eden_heap)
920 #define heap_tomb (&objspace->tomb_heap)
921 #define dont_gc objspace->flags.dont_gc
922 #define during_gc objspace->flags.during_gc
923 #define finalizing objspace->atomic_flags.finalizing
924 #define finalizer_table objspace->finalizer_table
925 #define global_list objspace->global_list
926 #define ruby_gc_stressful objspace->flags.gc_stressful
927 #define ruby_gc_stress_mode objspace->gc_stress_mode
928 #if GC_DEBUG_STRESS_TO_CLASS
929 #define stress_to_class objspace->stress_to_class
931 #define stress_to_class 0
935 gc_mode_verify(
enum gc_mode mode)
937 #if RGENGC_CHECK_MODE > 0
944 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
950 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
951 #define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
953 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
954 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
956 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
958 #define is_full_marking(objspace) TRUE
960 #if GC_ENABLE_INCREMENTAL_MARK
961 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
963 #define is_incremental_marking(objspace) FALSE
965 #if GC_ENABLE_INCREMENTAL_MARK
966 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
968 #define will_be_incremental_marking(objspace) FALSE
970 #define has_sweeping_pages(heap) ((heap)->sweeping_page != 0)
971 #define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap))
973 #if SIZEOF_LONG == SIZEOF_VOIDP
974 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
975 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
976 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
977 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
978 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
979 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
981 # error not supported
984 #define RANY(o) ((RVALUE*)(o))
993 #define RZOMBIE(o) ((struct RZombie *)(o))
995 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
997 #if RUBY_MARK_FREE_DEBUG
998 int ruby_gc_debug_indent = 0;
1013 NORETURN(
static void negative_size_allocation_error(
const char *));
1023 static inline void gc_enter(
rb_objspace_t *objspace,
const char *event);
1024 static inline void gc_exit(
rb_objspace_t *objspace,
const char *event);
1026 static void gc_marks(
rb_objspace_t *objspace,
int full_mark);
1027 static void gc_marks_start(
rb_objspace_t *objspace,
int full);
1030 static void gc_marks_step(
rb_objspace_t *objspace,
int slots);
1057 static void shrink_stack_chunk_cache(
mark_stack_t *stack);
1059 static size_t obj_memsize_of(
VALUE obj,
int use_all_types);
1060 static void gc_verify_internal_consistency(
rb_objspace_t *objspace);
1067 static double getrusage_time(
void);
1068 static inline void gc_prof_setup_new_record(
rb_objspace_t *objspace,
int reason);
1071 static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
1072 static inline void gc_prof_mark_timer_stop(
rb_objspace_t *);
1073 static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
1074 static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
1075 static inline void gc_prof_set_malloc_info(
rb_objspace_t *);
1078 #define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1079 if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1080 *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1084 #define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1086 #define gc_prof_record(objspace) (objspace)->profile.current_record
1087 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1089 #ifdef HAVE_VA_ARGS_MACRO
1090 # define gc_report(level, objspace, ...) \
1091 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1093 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1096 static const char *obj_info(
VALUE obj);
1098 #define PUSH_MARK_FUNC_DATA(v) do { \
1099 struct mark_func_data_struct *prev_mark_func_data = objspace->mark_func_data; \
1100 objspace->mark_func_data = (v);
1102 #define POP_MARK_FUNC_DATA() objspace->mark_func_data = prev_mark_func_data;} while (0)
1122 #if defined(__GNUC__) && defined(__i386__)
1123 typedef unsigned long long tick_t;
1124 #define PRItick "llu"
1125 static inline tick_t
1128 unsigned long long int x;
1129 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
1133 #elif defined(__GNUC__) && defined(__x86_64__)
1134 typedef unsigned long long tick_t;
1135 #define PRItick "llu"
1137 static __inline__ tick_t
1140 unsigned long hi,
lo;
1141 __asm__ __volatile__ (
"rdtsc" :
"=a"(
lo),
"=d"(
hi));
1142 return ((
unsigned long long)
lo)|( ((
unsigned long long)
hi)<<32);
1145 #elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1146 typedef unsigned long long tick_t;
1147 #define PRItick "llu"
1149 static __inline__ tick_t
1152 unsigned long long val = __builtin_ppc_get_timebase();
1156 #elif defined(_WIN32) && defined(_MSC_VER)
1158 typedef unsigned __int64 tick_t;
1159 #define PRItick "llu"
1161 static inline tick_t
1169 #define PRItick "llu"
1171 static inline tick_t
1178 #elif TICK_TYPE == 2
1179 typedef double tick_t;
1180 #define PRItick "4.9f"
1182 static inline tick_t
1185 return getrusage_time();
1188 #error "choose tick type"
1191 #define MEASURE_LINE(expr) do { \
1192 volatile tick_t start_time = tick(); \
1193 volatile tick_t end_time; \
1195 end_time = tick(); \
1196 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1200 #define MEASURE_LINE(expr) expr
1203 #define FL_CHECK2(name, x, pred) \
1204 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1205 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1206 #define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1207 #define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1208 #define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1210 #define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1211 #define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1212 #define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1215 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1216 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1217 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1219 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1220 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1221 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1223 #define RVALUE_OLD_AGE 3
1224 #define RVALUE_AGE_SHIFT 5
1233 RVALUE_FLAGS_AGE(
VALUE flags)
1241 check_rvalue_consistency_force(
const VALUE obj,
int terminate)
1247 fprintf(
stderr,
"check_rvalue_consistency: %p is a special const.\n", (
void *)
obj);
1250 else if (!is_pointer_to_heap(objspace, (
void *)
obj)) {
1256 fprintf(
stderr,
"check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1257 (
void *)
obj, (
void *)page);
1262 fprintf(
stderr,
"check_rvalue_consistency: %p is not a Ruby object.\n", (
void *)
obj);
1275 fprintf(
stderr,
"check_rvalue_consistency: %s is in tomb page.\n", obj_info(
obj));
1279 fprintf(
stderr,
"check_rvalue_consistency: %s is T_NONE.\n", obj_info(
obj));
1283 fprintf(
stderr,
"check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(
obj));
1293 if (age > 0 && wb_unprotected_bit) {
1294 fprintf(
stderr,
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(
obj), age);
1298 if (!
is_marking(objspace) && uncollectible_bit && !mark_bit) {
1299 fprintf(
stderr,
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(
obj));
1304 if (uncollectible_bit && age !=
RVALUE_OLD_AGE && !wb_unprotected_bit) {
1305 fprintf(
stderr,
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1306 obj_info(
obj), age);
1310 fprintf(
stderr,
"check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1311 obj_info(
obj), age);
1325 fprintf(
stderr,
"check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(
obj));
1331 if (
err > 0 && terminate) {
1332 rb_bug(
"check_rvalue_consistency_force: there is %d errors.",
err);
1338 #if RGENGC_CHECK_MODE == 0
1340 check_rvalue_consistency(
const VALUE obj)
1346 check_rvalue_consistency(
const VALUE obj)
1348 check_rvalue_consistency_force(
obj,
TRUE);
1360 void *poisoned = asan_poisoned_object_p(
obj);
1361 asan_unpoison_object(
obj,
false);
1367 asan_poison_object(
obj);
1376 check_rvalue_consistency(
obj);
1383 check_rvalue_consistency(
obj);
1391 check_rvalue_consistency(
obj);
1398 check_rvalue_consistency(
obj);
1405 check_rvalue_consistency(
obj);
1412 check_rvalue_consistency(
obj);
1426 check_rvalue_consistency(
obj);
1427 return RVALUE_OLD_P_RAW(
obj);
1430 #if RGENGC_CHECK_MODE || GC_DEBUG
1434 check_rvalue_consistency(
obj);
1446 #if RGENGC_PROFILE >= 2
1447 objspace->
profile.total_promoted_count++;
1472 int age = RVALUE_FLAGS_AGE(
flags);
1475 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(
obj));
1482 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace,
obj);
1484 check_rvalue_consistency(
obj);
1491 check_rvalue_consistency(
obj);
1495 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace,
obj);
1497 check_rvalue_consistency(
obj);
1504 check_rvalue_consistency(
obj);
1509 check_rvalue_consistency(
obj);
1522 check_rvalue_consistency(
obj);
1529 RVALUE_DEMOTE_RAW(objspace,
obj);
1531 if (RVALUE_MARKED(
obj)) {
1535 check_rvalue_consistency(
obj);
1547 check_rvalue_consistency(
obj);
1550 RVALUE_AGE_RESET_RAW(
obj);
1551 check_rvalue_consistency(
obj);
1557 return RVALUE_MARKED(
obj) && !RVALUE_MARKING(
obj);
1564 return RVALUE_MARKED(
obj) && RVALUE_MARKING(
obj);
1571 return RVALUE_MARKED(
obj) ==
FALSE;
1580 static inline void *
1605 rb_bug(
"lazy sweeping underway when freeing object space");
1640 heap_pages_expand_sorted_to(
rb_objspace_t *objspace,
size_t next_length)
1645 gc_report(3, objspace,
"heap_pages_expand_sorted: next_length: %d, size: %d\n", (
int)next_length, (
int)
size);
1675 heap_pages_expand_sorted_to(objspace, next_length);
1683 heap_allocatable_pages_set(
rb_objspace_t *objspace,
size_t s)
1686 heap_pages_expand_sorted(objspace);
1694 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1706 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", (
void *)p);
1709 asan_poison_object(
obj);
1711 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)
obj);
1717 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1726 #if GC_ENABLE_INCREMENTAL_MARK
1730 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1755 static void rb_aligned_free(
void *
ptr);
1776 heap_unlink_page(objspace,
heap_tomb, page);
1777 heap_page_free(objspace, page);
1801 if (page_body == 0) {
1806 page = calloc1(
sizeof(
struct heap_page));
1808 rb_aligned_free(page_body);
1819 end = start + limit;
1827 mid = (
lo +
hi) / 2;
1866 for (p =
start; p != end; p++) {
1867 gc_report(3, objspace,
"assign_heap_page: %p is added to freelist\n", (
void *)p);
1868 heap_page_add_freeobj(objspace, page, (
VALUE)p);
1882 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1884 heap_unlink_page(objspace,
heap_tomb, page);
1897 const char *method =
"recycle";
1901 page = heap_page_resurrect(objspace);
1904 page = heap_page_allocate(objspace);
1905 method =
"allocate";
1907 if (0)
fprintf(
stderr,
"heap_page_create: %s - %p, heap_allocated_pages: %d, heap_allocated_pages: %d, tomb->total_pages: %d\n",
1924 struct heap_page *page = heap_page_create(objspace);
1925 heap_add_page(objspace, heap, page);
1926 heap_add_freepage(heap, page);
1934 heap_allocatable_pages_set(objspace,
add);
1936 for (
i = 0;
i <
add;
i++) {
1937 heap_assign_page(objspace, heap);
1950 if (goal_ratio == 0.0) {
1960 if (
f < 1.0)
f = 1.1;
1962 next_used = (
size_t)(
f * used);
1967 " G(%1.2f), f(%1.2f),"
1970 goal_ratio,
f, used, next_used);
1976 if (next_used > max_used) next_used = max_used;
1979 return next_used - used;
1983 heap_set_increment(
rb_objspace_t *objspace,
size_t additional_pages)
1986 size_t next_used_limit = used + additional_pages;
1990 heap_allocatable_pages_set(objspace, next_used_limit - used);
1999 gc_report(1, objspace,
"heap_increment: heap_pages_sorted_length: %d, heap_pages_inc: %d, heap->total_pages: %d\n",
2005 heap_assign_page(objspace, heap);
2017 gc_sweep_continue(objspace, heap);
2020 gc_marks_continue(objspace, heap);
2037 heap_prepare(objspace, heap);
2044 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
2049 asan_unpoison_object((
VALUE)p,
true);
2060 asan_unpoison_object((
VALUE)p,
true);
2071 asan_unpoison_object((
VALUE)p,
true);
2076 p = heap_get_freeobj_from_next_freepage(objspace, heap);
2093 if (
pc && VM_FRAME_RUBYFRAME_P(ec->
cfp)) {
2101 #define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2102 #define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2104 #define gc_event_hook(objspace, event, data) do { \
2105 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2106 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2113 #if !__has_feature(memory_sanitizer)
2134 #if RGENGC_CHECK_MODE
2141 if (RVALUE_AGE(
obj) != 2)
rb_bug(
"newobj: %s of age (%d) != 2.", obj_info(
obj), RVALUE_AGE(
obj));
2144 if (RVALUE_AGE(
obj) > 0)
rb_bug(
"newobj: %s of age (%d) > 0.", obj_info(
obj), RVALUE_AGE(
obj));
2146 if (rgengc_remembered(objspace, (
VALUE)
obj))
rb_bug(
"newobj: %s is remembered.", obj_info(
obj));
2157 objspace->
profile.total_generated_normal_object_count++;
2158 #if RGENGC_PROFILE >= 2
2163 objspace->
profile.total_generated_shady_object_count++;
2164 #if RGENGC_PROFILE >= 2
2179 #if RGENGC_OLD_NEWOBJ_CHECK > 0
2186 if (--newobj_cnt == 0) {
2189 gc_mark_set(objspace,
obj);
2190 RVALUE_AGE_SET_OLD(objspace,
obj);
2197 check_rvalue_consistency(
obj);
2210 rb_bug(
"object allocation during garbage collection phase");
2250 #if GC_DEBUG_STRESS_TO_CLASS
2253 for (
i = 0;
i <
cnt; ++
i) {
2267 return wb_protected ?
2301 #define UNEXPECTED_NODE(func) \
2302 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2303 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2322 rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
void *
buf,
size_t cnt)
2384 #undef rb_data_object_alloc
2405 #undef rb_data_typed_object_alloc
2424 if (
ptr &&
type->function.dsize) {
2425 return type->function.dsize(
ptr);
2448 register size_t hi,
lo, mid;
2462 mid = (
lo +
hi) / 2;
2464 if (page->
start <= p) {
2485 free_const_entry_i(
VALUE value,
void *data)
2530 rb_bug(
"Object ID seen, but not in mapping table: %s\n", obj_info(
obj));
2546 rb_bug(
"obj_free() called for broken object");
2556 obj_free_object_id(objspace,
obj);
2562 #if RGENGC_CHECK_MODE
2563 #define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
2564 CHECK(RVALUE_WB_UNPROTECTED);
2565 CHECK(RVALUE_MARKED);
2566 CHECK(RVALUE_MARKING);
2567 CHECK(RVALUE_UNCOLLECTIBLE);
2624 #if USE_DEBUG_COUNTER
2682 if (
RANY(
obj)->as.regexp.ptr) {
2689 int free_immediately =
FALSE;
2690 void (*dfree)(
void *);
2695 dfree =
RANY(
obj)->as.typeddata.type->function.dfree;
2696 if (0 && free_immediately == 0) {
2702 dfree =
RANY(
obj)->as.data.dfree;
2710 else if (free_immediately) {
2715 make_zombie(objspace,
obj, dfree, data);
2726 if (
RANY(
obj)->as.match.rmatch) {
2728 #if USE_DEBUG_COUNTER
2748 if (
RANY(
obj)->as.file.fptr) {
2749 make_io_zombie(objspace,
obj);
2874 make_zombie(objspace,
obj, 0, 0);
2883 #define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
2884 #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
2905 static const struct st_hash_type object_id_hash_type = {
2919 #if RGENGC_ESTIMATE_OLDMALLOC
2941 static void objspace_reachable_objects_from_root(
rb_objspace_t *,
void (func)(
const char *,
VALUE,
void *),
void *);
2964 pstart = page->
start;
2967 if ((*callback)(pstart, pend,
sizeof(
RVALUE), data)) {
2974 objspace_each_objects_protected(
VALUE arg)
2982 incremental_enable(
VALUE _)
3040 if (prev_dont_incremental) {
3065 asan_unpoison_object(
obj,
false);
3066 bool used_p = p->
as.
basic.flags;
3080 if (!p->
as.
basic.klass)
break;
3086 if (!p->
as.
basic.klass)
break;
3090 if (
ptr || ! used_p) {
3091 asan_poison_object(
obj);
3099 return internal_object_p(
obj);
3103 os_obj_of_i(
void *vstart,
void *vend,
size_t stride,
void *data)
3108 for (; p != pend; p++) {
3110 if (!internal_object_p(
v)) {
3175 return os_obj_of(
of);
3204 should_be_callable(
VALUE block)
3239 should_be_finalizable(
obj);
3244 should_be_callable(block);
3247 return define_final0(
obj, block);
3263 table = (
VALUE)data;
3270 for (
i = 0;
i <
len;
i++) {
3291 should_be_finalizable(
obj);
3292 should_be_callable(block);
3293 return define_final0(
obj, block);
3305 table = (
VALUE)data;
3330 #define RESTORE_FINALIZER() (\
3331 ec->cfp = saved.cfp, \
3332 rb_set_errinfo(saved.errinfo))
3336 saved.cfp = ec->
cfp;
3344 for (
i = saved.finished;
3346 saved.finished = ++
i) {
3350 #undef RESTORE_FINALIZER
3364 run_finalizer(objspace, zombie, (
VALUE)table);
3374 asan_unpoison_object(zombie,
false);
3375 next_zombie =
RZOMBIE(zombie)->next;
3378 run_final(objspace, zombie);
3382 obj_free_object_id(objspace, zombie);
3385 RZOMBIE(zombie)->basic.flags = 0;
3389 heap_page_add_freeobj(objspace,
GET_HEAP_PAGE(zombie), zombie);
3393 zombie = next_zombie;
3403 finalize_list(objspace, zombie);
3408 gc_finalize_deferred(
void *dmy)
3412 finalize_deferred(objspace);
3420 rb_bug(
"gc_finalize_deferred_register: can't register finalizer.");
3448 #if RGENGC_CHECK_MODE >= 2
3449 gc_verify_internal_consistency(objspace);
3456 finalize_deferred(objspace);
3470 run_finalizer(objspace, curr->
obj, curr->
table);
3481 gc_enter(objspace,
"rb_objspace_call_finalizer");
3487 void *poisoned = asan_poisoned_object_p((
VALUE)p);
3488 asan_unpoison_object((
VALUE)p,
false);
3495 p->as.free.flags = 0;
3497 RDATA(p)->dfree =
RANY(p)->as.typeddata.type->function.dfree;
3502 else if (
RANY(p)->as.data.dfree) {
3503 make_zombie(objspace, (
VALUE)p,
RANY(p)->as.data.dfree,
RANY(p)->as.data.data);
3507 if (
RANY(p)->as.file.fptr) {
3508 make_io_zombie(objspace, (
VALUE)p);
3514 asan_poison_object((
VALUE)p);
3520 gc_exit(objspace,
"rb_objspace_call_finalizer");
3535 if (!is_pointer_to_heap(objspace, (
void *)
ptr))
return FALSE;
3564 is_swept_object(objspace,
ptr) ||
3583 if (!is_garbage_object(objspace,
ptr)) {
3595 check_rvalue_consistency(
obj);
3603 return is_markable_object(objspace,
obj) && is_live_object(objspace,
obj);
3610 return is_garbage_object(objspace,
obj);
3641 #if SIZEOF_LONG == SIZEOF_VOIDP
3642 #define NUM2PTR(x) NUM2ULONG(x)
3643 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3644 #define NUM2PTR(x) NUM2ULL(x)
3661 if ((
ptr %
sizeof(
RVALUE)) == (4 << 2)) {
3670 if ((orig = id2ref_obj_tbl(objspace, objid)) !=
Qundef &&
3671 is_live_object(objspace, orig)) {
3685 return id2ref(objid);
3695 #if SIZEOF_LONG == SIZEOF_VOIDP
3705 return get_heap_object_id(
obj);
3742 return rb_find_object_id(
obj, nonspecial_obj_id_);
3804 return rb_find_object_id(
obj, cached_object_id);
3810 obj_memsize_of(
VALUE obj,
int use_all_types)
3929 rb_bug(
"objspace/memsize_of(): unknown data type 0x%x(%p)",
3939 return obj_memsize_of(
obj,
TRUE);
3952 type_sym(
size_t type)
3955 #define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
4048 for (;p < pend; p++) {
4049 void *poisoned = asan_poisoned_object_p((
VALUE)p);
4050 asan_unpoison_object((
VALUE)p,
false);
4059 asan_poison_object((
VALUE)p);
4108 gc_setup_mark_bits(
struct heap_page *page)
4123 int empty_slots = 0, freed_slots = 0,
final_slots = 0;
4124 RVALUE *p, *pend,*offset;
4127 gc_report(2, objspace,
"page_sweep: start.\n");
4144 asan_unpoison_object((
VALUE)p,
false);
4148 gc_report(2, objspace,
"page_sweep: free %p\n", (
void *)p);
4149 #if USE_RGENGC && RGENGC_CHECK_MODE
4151 if (RVALUE_OLD_P((
VALUE)p))
rb_bug(
"page_sweep: %p - old while minor GC.", (
void *)p);
4152 if (rgengc_remembered_sweep(objspace, (
VALUE)p))
rb_bug(
"page_sweep: %p - remembered.", (
void *)p);
4155 if (obj_free(objspace, (
VALUE)p)) {
4160 heap_page_add_freeobj(objspace, sweep_page, (
VALUE)p);
4161 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info((
VALUE)p));
4163 asan_poison_object((
VALUE)p);
4183 gc_setup_mark_bits(sweep_page);
4185 #if GC_PROFILE_MORE_DETAIL
4188 record->removing_objects +=
final_slots + freed_slots;
4189 record->empty_objects += empty_slots;
4192 if (0)
fprintf(
stderr,
"gc_page_sweep(%d): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
4197 sweep_page->
free_slots = freed_slots + empty_slots;
4205 gc_finalize_deferred_register(objspace);
4209 gc_report(2, objspace,
"page_sweep: end.\n");
4211 return freed_slots + empty_slots;
4220 heap_set_increment(objspace, 1);
4221 if (!heap_increment(objspace, heap)) {
4228 gc_mode_name(
enum gc_mode mode)
4234 default:
rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
4241 #if RGENGC_CHECK_MODE
4243 switch (prev_mode) {
4249 if (0)
fprintf(
stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(
gc_mode(objspace)), gc_mode_name(mode));
4258 #if GC_ENABLE_INCREMENTAL_MARK
4264 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
4277 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
4284 gc_sweep_start_heap(objspace,
heap_eden);
4290 gc_report(1, objspace,
"gc_sweep_finish\n");
4292 gc_prof_set_heap_info(objspace);
4293 heap_pages_free_unused_pages(objspace);
4296 if (heap_allocatable_pages < heap_tomb->total_pages) {
4297 heap_allocatable_pages_set(objspace,
heap_tomb->total_pages);
4303 #if RGENGC_CHECK_MODE >= 2
4304 gc_verify_internal_consistency(objspace);
4312 int unlink_limit = 3;
4313 #if GC_ENABLE_INCREMENTAL_MARK
4316 gc_report(2, objspace,
"gc_sweep_step (need_pool: %d)\n", need_pool);
4318 gc_report(2, objspace,
"gc_sweep_step\n");
4323 #if GC_ENABLE_LAZY_SWEEP
4324 gc_prof_sweep_timer_start(objspace);
4328 int free_slots = gc_page_sweep(objspace, heap, sweep_page);
4337 heap_unlink_page(objspace, heap, sweep_page);
4338 heap_add_page(objspace,
heap_tomb, sweep_page);
4341 #if GC_ENABLE_INCREMENTAL_MARK
4343 if (heap_add_poolpage(objspace, heap, sweep_page)) {
4348 heap_add_freepage(heap, sweep_page);
4352 heap_add_freepage(heap, sweep_page);
4362 gc_sweep_finish(objspace);
4365 #if GC_ENABLE_LAZY_SWEEP
4366 gc_prof_sweep_timer_stop(objspace);
4378 gc_sweep_step(objspace, heap);
4388 gc_enter(objspace,
"sweep_continue");
4391 gc_report(3, objspace,
"gc_sweep_continue: success heap_increment().\n");
4394 gc_sweep_step(objspace, heap);
4395 gc_exit(objspace,
"sweep_continue");
4403 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
4405 if (immediate_sweep) {
4406 #if !GC_ENABLE_LAZY_SWEEP
4407 gc_prof_sweep_timer_start(objspace);
4409 gc_sweep_start(objspace);
4410 gc_sweep_rest(objspace);
4411 #if !GC_ENABLE_LAZY_SWEEP
4412 gc_prof_sweep_timer_stop(objspace);
4417 gc_sweep_start(objspace);
4425 gc_heap_prepare_minimum_pages(objspace,
heap_eden);
4431 stack_chunk_alloc(
void)
4456 chunk = chunk->
next;
4465 stack->
cache = chunk;
4475 chunk = stack->
cache;
4491 next = stack->
cache;
4498 next = stack_chunk_alloc();
4501 stack->
chunk = next;
4512 add_stack_chunk_cache(stack, stack->
chunk);
4513 stack->
chunk = prev;
4523 while (chunk !=
NULL) {
4534 push_mark_stack_chunk(stack);
4542 if (is_mark_stack_empty(stack)) {
4545 if (stack->
index == 1) {
4547 pop_mark_stack_chunk(stack);
4555 #if GC_ENABLE_INCREMENTAL_MARK
4560 for (
i=0;
i<limit;
i++) {
4573 int limit = stack->
index;
4576 if (invalidate_mark_stack_chunk(chunk, limit,
obj))
return;
4577 chunk = chunk->
next;
4578 limit = stack->
limit;
4580 rb_bug(
"invalid_mark_stack: unreachable");
4593 for (
i=0;
i < 4;
i++) {
4594 add_stack_chunk_cache(stack, stack_chunk_alloc());
4601 #define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
4603 #define STACK_START (ec->machine.stack_start)
4604 #define STACK_END (ec->machine.stack_end)
4605 #define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
4607 #ifdef __EMSCRIPTEN__
4608 #undef STACK_GROW_DIRECTION
4609 #define STACK_GROW_DIRECTION 1
4612 #if STACK_GROW_DIRECTION < 0
4613 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
4614 #elif STACK_GROW_DIRECTION > 0
4615 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
4617 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
4618 : (size_t)(STACK_END - STACK_START + 1))
4620 #if !STACK_GROW_DIRECTION
4642 #define PREVENT_STACK_OVERFLOW 1
4643 #ifndef PREVENT_STACK_OVERFLOW
4644 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
4645 # define PREVENT_STACK_OVERFLOW 1
4647 # define PREVENT_STACK_OVERFLOW 0
4650 #if PREVENT_STACK_OVERFLOW
4659 return length > maximum_length;
4662 #define stack_check(ec, water_mark) FALSE
4665 #define STACKFRAME_FOR_CALL_CFUNC 2048
4686 gc_mark_maybe(objspace,
v);
4696 if (end <=
start)
return;
4698 mark_locations_array(objspace,
start,
n);
4712 for (
i=0;
i<
n;
i++) {
4713 gc_mark(objspace, values[
i]);
4723 for (
i=0;
i<
n;
i++) {
4724 gc_mark_and_pin(objspace, values[
i]);
4733 for (
i=0;
i<
n;
i++) {
4736 gc_mark_and_pin(objspace, values[
i]);
4745 gc_mark_and_pin_stack_values(objspace,
n, values);
4752 gc_mark(objspace, (
VALUE)value);
4760 gc_mark_and_pin(objspace, (
VALUE)value);
4782 gc_mark_and_pin(objspace, (
VALUE)
key);
4812 gc_mark(objspace, (
VALUE)value);
4821 gc_mark_and_pin(objspace, (
VALUE)
key);
4822 gc_mark_and_pin(objspace, (
VALUE)value);
4831 gc_mark_and_pin(objspace, (
VALUE)
key);
4832 gc_mark(objspace, (
VALUE)value);
4854 gc_mark(objspace,
RHASH(hash)->ifnone);
4879 switch (def->
type) {
4911 mark_method_entry_i(
VALUE me,
void *data)
4915 gc_mark(objspace,
me);
4928 mark_const_entry_i(
VALUE value,
void *data)
4933 gc_mark(objspace, ce->
value);
4934 gc_mark(objspace, ce->
file);
4945 #if STACK_GROW_DIRECTION < 0
4946 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
4947 #elif STACK_GROW_DIRECTION > 0
4948 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
4950 #define GET_STACK_BOUNDS(start, end, appendix) \
4951 ((STACK_END < STACK_START) ? \
4952 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
4956 const VALUE *stack_start,
const VALUE *stack_end);
4964 } save_regs_gc_mark;
4965 VALUE *stack_start, *stack_end;
4968 memset(&save_regs_gc_mark, 0,
sizeof(save_regs_gc_mark));
4978 mark_locations_array(objspace, save_regs_gc_mark.v,
numberof(save_regs_gc_mark.v));
4980 mark_stack_locations(objspace, ec, stack_start, stack_end);
4987 VALUE *stack_start, *stack_end;
4990 mark_stack_locations(objspace, ec, stack_start, stack_end);
4995 const VALUE *stack_start,
const VALUE *stack_end)
4998 gc_mark_locations(objspace, stack_start, stack_end);
5000 #if defined(__mc68000__)
5001 gc_mark_locations(objspace,
5002 (
VALUE*)((
char*)stack_start + 2),
5003 (
VALUE*)((
char*)stack_end - 2));
5024 if (is_pointer_to_heap(objspace, (
void *)
obj)) {
5026 asan_unpoison_object(
obj,
false);
5035 gc_mark_and_pin(objspace,
obj);
5041 asan_poison_object(
obj);
5055 if (RVALUE_MARKED(
obj))
return 0;
5072 #if RGENGC_PROFILE > 0
5073 objspace->
profile.total_remembered_shady_object_count++;
5074 #if RGENGC_PROFILE >= 2
5093 if (RVALUE_WB_UNPROTECTED(
obj)) {
5094 if (gc_remember_unprotected(objspace,
obj)) {
5095 gc_report(2, objspace,
"relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(
obj));
5099 if (!RVALUE_OLD_P(
obj)) {
5100 if (RVALUE_MARKED(
obj)) {
5102 gc_report(2, objspace,
"relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(
obj));
5103 RVALUE_AGE_SET_OLD(objspace,
obj);
5105 if (!RVALUE_MARKING(
obj)) {
5106 gc_grey(objspace,
obj);
5110 rgengc_remember(objspace,
obj);
5114 gc_report(2, objspace,
"relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(
obj));
5115 RVALUE_AGE_SET_CANDIDATE(objspace,
obj);
5128 #if RGENGC_CHECK_MODE
5129 if (RVALUE_MARKED(
obj) ==
FALSE)
rb_bug(
"gc_grey: %s is not marked.", obj_info(
obj));
5130 if (RVALUE_MARKING(
obj) ==
TRUE)
rb_bug(
"gc_grey: %s is marking/remembered.", obj_info(
obj));
5133 #if GC_ENABLE_INCREMENTAL_MARK
5149 check_rvalue_consistency(
obj);
5152 if (!RVALUE_OLD_P(
obj)) {
5153 gc_report(3, objspace,
"gc_aging: YOUNG: %s\n", obj_info(
obj));
5154 RVALUE_AGE_INC(objspace,
obj);
5158 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page,
obj);
5161 check_rvalue_consistency(
obj);
5173 rgengc_check_relation(objspace,
obj);
5174 if (!gc_mark_set(objspace,
obj))
return;
5176 gc_aging(objspace,
obj);
5177 gc_grey(objspace,
obj);
5196 if (!is_markable_object(objspace,
obj))
return;
5197 gc_pin(objspace,
obj);
5198 gc_mark_ptr(objspace,
obj);
5204 if (!is_markable_object(objspace,
obj))
return;
5205 gc_mark_ptr(objspace,
obj);
5234 if (RVALUE_OLD_P(
obj)) {
5251 gc_mark_values(objspace, (
long)
env->env_size,
env->env);
5254 gc_mark(objspace, (
VALUE)
env->iseq);
5258 gc_mark(objspace,
RANY(
obj)->as.imemo.cref.klass);
5259 gc_mark(objspace, (
VALUE)
RANY(
obj)->as.imemo.cref.next);
5260 gc_mark(objspace,
RANY(
obj)->as.imemo.cref.refinements);
5263 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.cref_or_me);
5264 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.lastline);
5265 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.backref);
5266 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.others);
5269 gc_mark(objspace,
RANY(
obj)->as.imemo.throw_data.throw_obj);
5272 gc_mark_maybe(objspace, (
VALUE)
RANY(
obj)->as.imemo.ifunc.data);
5275 gc_mark(objspace,
RANY(
obj)->as.imemo.memo.v1);
5276 gc_mark(objspace,
RANY(
obj)->as.imemo.memo.v2);
5277 gc_mark_maybe(objspace,
RANY(
obj)->as.imemo.memo.u3.value);
5280 mark_method_entry(objspace, &
RANY(
obj)->as.imemo.ment);
5299 #if VM_CHECK_MODE > 0
5310 gc_mark_set_parent(objspace,
obj);
5326 rb_bug(
"rb_gc_mark() called for broken object");
5334 gc_mark_imemo(objspace,
obj);
5338 gc_mark(objspace, any->
as.
basic.klass);
5366 gc_mark(objspace, root);
5371 for (
i=0;
i <
len;
i++) {
5372 gc_mark(objspace,
ptr[
i]);
5385 mark_hash(objspace,
obj);
5401 if (mark_func) (*mark_func)(
ptr);
5412 for (
i = 0;
i <
len;
i++) {
5413 gc_mark(objspace,
ptr[
i]);
5463 gc_mark(objspace,
ptr[
i]);
5480 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
5482 is_pointer_to_heap(objspace, any) ?
"corrupted object" :
"non object");
5495 #if GC_ENABLE_INCREMENTAL_MARK
5496 size_t marked_slots_at_the_beginning = objspace->
marked_slots;
5497 size_t popped_count = 0;
5500 while (pop_mark_stack(mstack, &
obj)) {
5504 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", obj_info(
obj));
5506 gc_mark_children(objspace,
obj);
5508 #if GC_ENABLE_INCREMENTAL_MARK
5511 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
5516 if (popped_count + (objspace->
marked_slots - marked_slots_at_the_beginning) >
count) {
5528 if (is_mark_stack_empty(mstack)) {
5529 shrink_stack_chunk_cache(mstack);
5540 return gc_mark_stacked_objects(objspace,
TRUE,
count);
5546 return gc_mark_stacked_objects(objspace,
FALSE, 0);
5549 #if PRINT_ROOT_TICKS
5550 #define MAX_TICKS 0x100
5551 static tick_t mark_ticks[MAX_TICKS];
5552 static const char *mark_ticks_categories[MAX_TICKS];
5555 show_mark_ticks(
void)
5559 for (
i=0;
i<MAX_TICKS;
i++) {
5560 const char *category = mark_ticks_categories[
i];
5562 fprintf(
stderr,
"%s\t%8lu\n", category, (
unsigned long)mark_ticks[
i]);
5573 gc_mark_roots(
rb_objspace_t *objspace,
const char **categoryp)
5577 rb_vm_t *vm = rb_ec_vm_ptr(ec);
5579 #if PRINT_ROOT_TICKS
5580 tick_t start_tick = tick();
5582 const char *prev_category = 0;
5584 if (mark_ticks_categories[0] == 0) {
5589 if (categoryp) *categoryp =
"xxx";
5595 #if PRINT_ROOT_TICKS
5596 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \
5597 if (prev_category) { \
5598 tick_t t = tick(); \
5599 mark_ticks[tick_count] = t - start_tick; \
5600 mark_ticks_categories[tick_count] = prev_category; \
5603 prev_category = category; \
5604 start_tick = tick(); \
5607 #define MARK_CHECKPOINT_PRINT_TICK(category)
5610 #define MARK_CHECKPOINT(category) do { \
5611 if (categoryp) *categoryp = category; \
5612 MARK_CHECKPOINT_PRINT_TICK(category); \
5618 if (vm->
self) gc_mark(objspace, vm->
self);
5624 mark_current_machine_context(objspace, ec);
5629 gc_mark_maybe(objspace, *
list->varptr);
5645 #undef MARK_CHECKPOINT
5648 #if RGENGC_CHECK_MODE >= 4
5650 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
5651 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
5652 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
5660 static struct reflist *
5663 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
5666 refs->list[0] =
obj;
5672 reflist_destruct(
struct reflist *refs)
5679 reflist_add(
struct reflist *refs,
VALUE obj)
5681 if (refs->pos == refs->size) {
5686 refs->list[refs->pos++] =
obj;
5690 reflist_dump(
struct reflist *refs)
5693 for (
i=0;
i<refs->pos;
i++) {
5695 if (IS_ROOTSIG(
obj)) {
5706 reflist_referred_from_machine_context(
struct reflist *refs)
5709 for (
i=0;
i<refs->pos;
i++) {
5711 if (IS_ROOTSIG(
obj) &&
strcmp(GET_ROOTSIG(
obj),
"machine_context") == 0)
return 1;
5726 const char *category;
5732 allrefs_add(
struct allrefs *data,
VALUE obj)
5734 struct reflist *refs;
5737 reflist_add(refs, data->root_obj);
5741 refs = reflist_create(data->root_obj);
5750 struct allrefs *data = (
struct allrefs *)
ptr;
5752 if (allrefs_add(data,
obj)) {
5753 push_mark_stack(&data->mark_stack,
obj);
5760 struct allrefs *data = (
struct allrefs *)
ptr;
5762 data->root_obj = MAKE_ROOTSIG(data->category);
5764 if (allrefs_add(data,
obj)) {
5765 push_mark_stack(&data->mark_stack,
obj);
5772 struct allrefs data;
5773 struct mark_func_data_struct mfd;
5778 data.objspace = objspace;
5780 init_mark_stack(&data.mark_stack);
5782 mfd.mark_func = allrefs_roots_i;
5788 gc_mark_roots(objspace, &data.category);
5792 while (pop_mark_stack(&data.mark_stack, &
obj)) {
5795 free_stack_chunks(&data.mark_stack);
5798 return data.references;
5804 struct reflist *refs = (
struct reflist *)value;
5805 reflist_destruct(refs);
5810 objspace_allrefs_destruct(
struct st_table *refs)
5812 st_foreach(refs, objspace_allrefs_destruct_i, 0);
5816 #if RGENGC_CHECK_MODE >= 5
5821 struct reflist *refs = (
struct reflist *)
v;
5831 fprintf(
stderr,
"[all refs] (size: %d)\n", (
int)objspace->
rgengc.allrefs_table->num_entries);
5840 struct reflist *refs = (
struct reflist *)
v;
5845 fprintf(
stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(
obj));
5846 fprintf(
stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)
obj);
5849 if (reflist_referred_from_machine_context(refs)) {
5854 objspace->
rgengc.error_count++;
5865 #if RGENGC_ESTIMATE_OLDMALLOC
5870 objspace->
rgengc.allrefs_table = objspace_allrefs(objspace);
5876 if (objspace->
rgengc.error_count > 0) {
5877 #if RGENGC_CHECK_MODE >= 5
5878 allrefs_dump(objspace);
5880 if (checker_name)
rb_bug(
"%s: GC has problem.", checker_name);
5883 objspace_allrefs_destruct(objspace->
rgengc.allrefs_table);
5884 objspace->
rgengc.allrefs_table = 0;
5888 #if RGENGC_ESTIMATE_OLDMALLOC
5909 check_generation_i(
const VALUE child,
void *
ptr)
5916 if (!RVALUE_OLD_P(child)) {
5917 if (!RVALUE_REMEMBERED(
parent) &&
5918 !RVALUE_REMEMBERED(child) &&
5919 !RVALUE_UNCOLLECTIBLE(child)) {
5920 fprintf(
stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(
parent), obj_info(child));
5927 check_color_i(
const VALUE child,
void *
ptr)
5932 if (!RVALUE_WB_UNPROTECTED(
parent) && RVALUE_WHITE_P(child)) {
5933 fprintf(
stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
5934 obj_info(
parent), obj_info(child));
5941 check_children_i(
const VALUE child,
void *
ptr)
5944 if (check_rvalue_consistency_force(child,
FALSE) != 0) {
5945 fprintf(
stderr,
"check_children_i: %s has error (referenced from %s)",
5946 obj_info(child), obj_info(data->
parent));
5954 verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
void *
ptr)
5961 void *poisoned = asan_poisoned_object_p(
obj);
5962 asan_unpoison_object(
obj,
false);
5988 if (RVALUE_BLACK_P(
obj)) {
6004 asan_poison_object(
obj);
6016 unsigned int has_remembered_shady =
FALSE;
6017 unsigned int has_remembered_old =
FALSE;
6018 int remembered_old_objects = 0;
6019 int free_objects = 0;
6020 int zombie_objects = 0;
6024 void *poisoned = asan_poisoned_object_p(val);
6025 asan_unpoison_object(val,
false);
6027 if (
RBASIC(val) == 0) free_objects++;
6030 has_remembered_shady =
TRUE;
6033 has_remembered_old =
TRUE;
6034 remembered_old_objects++;
6039 asan_poison_object(val);
6052 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
6053 (
void *)page, remembered_old_objects,
obj ? obj_info(
obj) :
"");
6057 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
6058 (
void *)page,
obj ? obj_info(
obj) :
"");
6064 rb_bug(
"page %p's free_slots should be %d, but %d\n", (
void *)page, (
int)page->free_slots, free_objects);
6068 rb_bug(
"page %p's final_slots should be %d, but %d\n", (
void *)page, (
int)page->final_slots, zombie_objects);
6071 return remembered_old_objects;
6080 int remembered_old_objects = 0;
6084 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
6088 asan_unpoison_object((
VALUE)p,
false);
6090 fprintf(
stderr,
"freelist slot expected to be T_NONE but was: %s\n", obj_info((
VALUE)p));
6093 asan_poison_object((
VALUE)prev);
6098 remembered_old_objects += gc_verify_heap_page(objspace, page,
Qfalse);
6102 return remembered_old_objects;
6108 int remembered_old_objects = 0;
6109 remembered_old_objects += gc_verify_heap_pages_(objspace, &
heap_eden->pages);
6110 remembered_old_objects += gc_verify_heap_pages_(objspace, &
heap_tomb->pages);
6111 return remembered_old_objects;
6125 gc_verify_internal_consistency_m(
VALUE dummy)
6142 objspace_each_objects_without_setup(
objspace, verify_internal_consistency_i, &data);
6145 #if RGENGC_CHECK_MODE >= 5
6150 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
6160 fprintf(
stderr,
"heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n",
6178 size_t list_count = 0;
6191 rb_bug(
"inconsistent finalizing object count:\n"
6194 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
6211 gc_verify_transient_heap_internal_consistency(
VALUE dmy)
6228 #if GC_ENABLE_INCREMENTAL_MARK
6231 if (0)
fprintf(
stderr,
"objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n",
6256 #if GC_ENABLE_INCREMENTAL_MARK
6277 gc_report(2, objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((
VALUE)p));
6280 gc_mark_children(objspace, (
VALUE)p);
6289 gc_mark_stacked_objects_all(objspace);
6293 heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
6299 heap_add_freepage(heap, page);
6309 #if GC_ENABLE_INCREMENTAL_MARK
6313 heap_move_pooled_pages_to_free_pages(
heap_eden);
6314 gc_report(1, objspace,
"gc_marks_finish: pooled pages are exists. retry.\n");
6319 rb_bug(
"gc_marks_finish: mark stack is not empty (%d).", (
int)mark_stack_size(&objspace->
mark_stack));
6322 gc_mark_roots(objspace, 0);
6325 gc_report(1, objspace,
"gc_marks_finish: not empty (%d). retry.\n", (
int)mark_stack_size(&objspace->
mark_stack));
6329 #if RGENGC_CHECK_MODE >= 2
6330 if (gc_verify_heap_pages(objspace) != 0) {
6331 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
6337 gc_marks_wb_unprotected_objects(objspace);
6341 #if RGENGC_CHECK_MODE >= 2
6342 gc_verify_internal_consistency(objspace);
6354 #if RGENGC_CHECK_MODE >= 4
6355 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
6372 if (sweep_slots > max_free_slots) {
6383 if (sweep_slots < min_free_slots) {
6384 if (!full_marking) {
6386 full_marking =
TRUE;
6391 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
6397 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
6398 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slots,
total_slots));
6399 heap_increment(objspace, heap);
6420 gc_report(1, objspace,
"gc_marks_finish (marks %d objects, old %d objects, total %d slots, sweep %d slots, increment: %d, next GC: %s)\n",
6424 if (sweep_slots < min_free_slots) {
6425 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
6426 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slot, total_slot));
6427 heap_increment(objspace, heap);
6442 #if GC_ENABLE_INCREMENTAL_MARK
6445 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
6446 if (gc_marks_finish(objspace)) {
6458 gc_report(1, objspace,
"gc_marks_rest\n");
6460 #if GC_ENABLE_INCREMENTAL_MARK
6466 while (gc_mark_stacked_objects_incremental(objspace,
INT_MAX) ==
FALSE);
6467 }
while (gc_marks_finish(objspace) ==
FALSE);
6470 gc_mark_stacked_objects_all(objspace);
6471 gc_marks_finish(objspace);
6482 #if GC_ENABLE_INCREMENTAL_MARK
6484 gc_enter(objspace,
"marks_continue");
6493 struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
6496 from =
"pooled-pages";
6498 else if (heap_increment(objspace, heap)) {
6500 from =
"incremented-pages";
6504 gc_report(2, objspace,
"gc_marks_continue: provide %d slots from %s.\n", slots, from);
6508 gc_report(2, objspace,
"gc_marks_continue: no more pooled pages (stack depth: %d).\n", (
int)mark_stack_size(&objspace->
mark_stack));
6509 gc_marks_rest(objspace);
6514 gc_exit(objspace,
"marks_continue");
6521 gc_prof_mark_timer_start(objspace);
6528 gc_marks_start(objspace, full_mark);
6530 gc_marks_rest(objspace);
6533 #if RGENGC_PROFILE > 0
6541 gc_marks_start(objspace,
TRUE);
6542 gc_marks_rest(objspace);
6546 gc_prof_mark_timer_stop(objspace);
6558 const char *status =
" ";
6590 return RVALUE_REMEMBERED(
obj);
6617 gc_report(6, objspace,
"rgengc_remember: %s %s\n", obj_info(
obj),
6618 rgengc_remembersetbits_get(objspace,
obj) ?
"was already remembered" :
"is remembered now");
6620 check_rvalue_consistency(
obj);
6623 if (RVALUE_WB_UNPROTECTED(
obj))
rb_bug(
"rgengc_remember: %s is not wb protected.", obj_info(
obj));
6626 #if RGENGC_PROFILE > 0
6627 if (!rgengc_remembered(objspace,
obj)) {
6628 if (RVALUE_WB_UNPROTECTED(
obj) == 0) {
6629 objspace->
profile.total_remembered_normal_object_count++;
6630 #if RGENGC_PROFILE >= 2
6637 return rgengc_remembersetbits_set(objspace,
obj);
6643 int result = rgengc_remembersetbits_get(objspace,
obj);
6644 check_rvalue_consistency(
obj);
6651 gc_report(6, objspace,
"rgengc_remembered: %s\n", obj_info(
obj));
6652 return rgengc_remembered_sweep(objspace,
obj);
6655 #ifndef PROFILE_REMEMBERSET_MARK
6656 #define PROFILE_REMEMBERSET_MARK 0
6664 #if PROFILE_REMEMBERSET_MARK
6665 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
6667 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
6677 #if PROFILE_REMEMBERSET_MARK
6697 gc_report(2, objspace,
"rgengc_rememberset_mark: mark %s\n", obj_info(
obj));
6701 gc_mark_children(objspace,
obj);
6709 #if PROFILE_REMEMBERSET_MARK
6716 #if PROFILE_REMEMBERSET_MARK
6717 fprintf(
stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
6719 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
6745 if (!RVALUE_OLD_P(a))
rb_bug(
"gc_writebarrier_generational: %s is not an old object.", obj_info(a));
6746 if ( RVALUE_OLD_P(b))
rb_bug(
"gc_writebarrier_generational: %s is an old object.", obj_info(b));
6747 if (
is_incremental_marking(objspace))
rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
6752 if (!rgengc_remembered(objspace, a)) {
6753 rgengc_remember(objspace, a);
6754 gc_report(1, objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
6759 if (RVALUE_WB_UNPROTECTED(b)) {
6760 gc_remember_unprotected(objspace, b);
6763 RVALUE_AGE_SET_OLD(objspace, b);
6764 rgengc_remember(objspace, b);
6767 gc_report(1, objspace,
"gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
6770 check_rvalue_consistency(a);
6771 check_rvalue_consistency(b);
6774 #if GC_ENABLE_INCREMENTAL_MARK
6778 gc_mark_set_parent(objspace, parent);
6779 rgengc_check_relation(objspace,
obj);
6780 if (gc_mark_set(objspace,
obj) ==
FALSE)
return;
6781 gc_aging(objspace,
obj);
6782 gc_grey(objspace,
obj);
6790 gc_report(2, objspace,
"gc_writebarrier_incremental: [LG] %p -> %s\n", (
void *)a, obj_info(b));
6792 if (RVALUE_BLACK_P(a)) {
6793 if (RVALUE_WHITE_P(b)) {
6794 if (!RVALUE_WB_UNPROTECTED(a)) {
6795 gc_report(2, objspace,
"gc_writebarrier_incremental: [IN] %p -> %s\n", (
void *)a, obj_info(b));
6796 gc_mark_from(objspace, b, a);
6799 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
6800 if (!RVALUE_WB_UNPROTECTED(b)) {
6801 gc_report(1, objspace,
"gc_writebarrier_incremental: [GN] %p -> %s\n", (
void *)a, obj_info(b));
6802 RVALUE_AGE_SET_OLD(objspace, b);
6804 if (RVALUE_BLACK_P(b)) {
6805 gc_grey(objspace, b);
6809 gc_report(1, objspace,
"gc_writebarrier_incremental: [LL] %p -> %s\n", (
void *)a, obj_info(b));
6810 gc_remember_unprotected(objspace, b);
6816 #define gc_writebarrier_incremental(a, b, objspace)
6828 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
6832 gc_writebarrier_generational(a, b, objspace);
6836 gc_writebarrier_incremental(a, b, objspace);
6843 if (RVALUE_WB_UNPROTECTED(
obj)) {
6849 gc_report(2, objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", obj_info(
obj),
6850 rgengc_remembered(objspace,
obj) ?
" (already remembered)" :
"");
6852 if (RVALUE_OLD_P(
obj)) {
6853 gc_report(1, objspace,
"rb_gc_writebarrier_unprotect: %s\n", obj_info(
obj));
6854 RVALUE_DEMOTE(objspace,
obj);
6855 gc_mark_set(objspace,
obj);
6856 gc_remember_unprotected(objspace,
obj);
6859 objspace->
profile.total_shade_operation_count++;
6860 #if RGENGC_PROFILE >= 2
6866 RVALUE_AGE_RESET(
obj);
6882 gc_report(1, objspace,
"rb_gc_writebarrier_remember: %s\n", obj_info(
obj));
6885 if (RVALUE_BLACK_P(
obj)) {
6886 gc_grey(objspace,
obj);
6890 if (RVALUE_OLD_P(
obj)) {
6891 rgengc_remember(objspace,
obj);
6896 static st_table *rgengc_unprotect_logging_table;
6906 rgengc_unprotect_logging_exit_func(
void)
6908 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
6916 if (rgengc_unprotect_logging_table == 0) {
6918 atexit(rgengc_unprotect_logging_exit_func);
6921 if (RVALUE_WB_UNPROTECTED(
obj) == 0) {
6926 snprintf(
ptr, 0x100 - 1,
"%s|%s:%d", obj_info(
obj), filename, line);
6946 if (RVALUE_WB_UNPROTECTED(
obj) && !RVALUE_WB_UNPROTECTED(dest)) {
6947 if (!RVALUE_OLD_P(dest)) {
6949 RVALUE_AGE_RESET_RAW(dest);
6952 RVALUE_DEMOTE(objspace, dest);
6956 check_rvalue_consistency(dest);
6982 static ID ID_marked;
6984 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
6988 #define I(s) ID_##s = rb_intern(#s);
7001 if (RVALUE_WB_UNPROTECTED(
obj) == 0 &&
n<max)
flags[
n++] = ID_wb_protected;
7002 if (RVALUE_OLD_P(
obj) &&
n<max)
flags[
n++] = ID_old;
7003 if (RVALUE_UNCOLLECTIBLE(
obj) &&
n<max)
flags[
n++] = ID_uncollectible;
7019 int is_old = RVALUE_OLD_P(
obj);
7021 gc_report(2, objspace,
"rb_gc_force_recycle: %s\n", obj_info(
obj));
7024 if (RVALUE_MARKED(
obj)) {
7031 #if GC_ENABLE_INCREMENTAL_MARK
7045 #if GC_ENABLE_INCREMENTAL_MARK
7061 #ifndef MARK_OBJECT_ARY_BUCKET_SIZE
7062 #define MARK_OBJECT_ARY_BUCKET_SIZE 1024
7097 if (tmp->
varptr == addr) {
7129 #define gc_stress_full_mark_after_malloc_p() \
7130 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
7136 if (!heap_increment(objspace, heap)) {
7137 heap_set_increment(objspace, 1);
7138 heap_increment(objspace, heap);
7158 gc_prof_set_malloc_info(objspace);
7189 #if RGENGC_ESTIMATE_OLDMALLOC
7226 #if GC_PROFILE_MORE_DETAIL
7227 objspace->
profile.prepare_time = getrusage_time();
7232 #if GC_PROFILE_MORE_DETAIL
7233 objspace->
profile.prepare_time = getrusage_time() - objspace->
profile.prepare_time;
7236 return gc_start(objspace, reason);
7254 #if RGENGC_CHECK_MODE >= 2
7255 gc_verify_internal_consistency(objspace);
7258 gc_enter(objspace,
"gc_start");
7264 do_full_mark =
TRUE;
7273 do_full_mark =
TRUE;
7277 do_full_mark =
TRUE;
7288 #if GC_ENABLE_INCREMENTAL_MARK
7303 gc_report(1, objspace,
"gc_start(reason: %d) => %u, %d, %d\n",
7307 #if USE_DEBUG_COUNTER
7315 #if RGENGC_ESTIMATE_OLDMALLOC
7332 gc_prof_setup_new_record(objspace, reason);
7333 gc_reset_malloc_info(objspace);
7339 gc_prof_timer_start(objspace);
7341 gc_marks(objspace, do_full_mark);
7343 gc_prof_timer_stop(objspace);
7345 gc_exit(objspace,
"gc_start");
7355 if (marking || sweeping) {
7356 gc_enter(objspace,
"gc_rest");
7362 gc_marks_rest(objspace);
7366 gc_sweep_rest(objspace);
7368 gc_exit(objspace,
"gc_rest");
7385 #if GC_ENABLE_INCREMENTAL_MARK
7403 static char buff[0x10];
7404 gc_current_status_fill(objspace, buff);
7408 #if PRINT_ENTER_EXIT_TICK
7410 static tick_t last_exit_tick;
7411 static tick_t enter_tick;
7412 static int enter_count = 0;
7413 static char last_gc_status[0x10];
7416 gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
7418 if (direction == 0) {
7420 enter_tick = tick();
7421 gc_current_status_fill(objspace, last_gc_status);
7424 tick_t exit_tick = tick();
7425 char current_gc_status[0x10];
7426 gc_current_status_fill(objspace, current_gc_status);
7429 fprintf(
stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
7430 enter_tick - last_exit_tick,
7431 exit_tick - enter_tick,
7433 last_gc_status, current_gc_status,
7435 last_exit_tick = exit_tick;
7438 fprintf(
stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
7440 exit_tick - enter_tick,
7442 last_gc_status, current_gc_status,
7449 gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
7464 gc_report(1, objspace,
"gc_enter: %s [%s]\n", event, gc_current_status(objspace));
7465 gc_record(objspace, 0, event);
7475 gc_record(objspace, 1, event);
7476 gc_report(1, objspace,
"gc_exit: %s [%s]\n", event, gc_current_status(objspace));
7483 gc_with_gvl(
void *
ptr)
7572 return RVALUE_MARKED(
obj) && !RVALUE_PINNED(
obj);
7600 wb_unprotected = RVALUE_WB_UNPROTECTED((
VALUE)
src);
7601 uncollectible = RVALUE_UNCOLLECTIBLE((
VALUE)
src);
7602 marking = RVALUE_MARKING((
VALUE)
src);
7645 if (wb_unprotected) {
7652 if (uncollectible) {
7661 src->as.moved.destination = (
VALUE)dest;
7662 src->as.moved.next = moved_list;
7678 if (
free->slot ==
free->page->start +
free->page->total_slots - 1) {
7680 free->page = page_list[
free->index];
7717 size_t total_pages =
heap_eden->total_pages;
7718 page = page_list[0];
7723 free->objspace = objspace;
7725 page = page_list[total_pages - 1];
7726 scan->
index = total_pages - 1;
7746 compare_pinned(
const void *left,
const void *right,
void *dummy)
7751 left_page = *(
struct heap_page *
const *)left;
7752 right_page = *(
struct heap_page *
const *)right;
7758 compare_free_slots(
const void *left,
const void *right,
void *dummy)
7763 left_page = *(
struct heap_page *
const *)left;
7764 right_page = *(
struct heap_page *
const *)right;
7774 size_t total_pages =
heap_eden->total_pages;
7780 page_list[
i++] = page;
7804 page_list = allocate_page_list(objspace, comparator);
7806 init_cursors(objspace, &free_cursor, &scan_cursor, page_list);
7809 while (not_met(&free_cursor, &scan_cursor)) {
7813 void *free_slot_poison = asan_poisoned_object_p((
VALUE)free_cursor.slot);
7814 asan_unpoison_object((
VALUE)free_cursor.slot,
false);
7816 while (
BUILTIN_TYPE(free_cursor.slot) !=
T_NONE && not_met(&free_cursor, &scan_cursor)) {
7818 if (free_slot_poison) {
7820 asan_poison_object((
VALUE)free_cursor.slot);
7823 advance_cursor(&free_cursor, page_list);
7826 free_slot_poison = asan_poisoned_object_p((
VALUE)free_cursor.slot);
7827 asan_unpoison_object((
VALUE)free_cursor.slot,
false);
7831 void *scan_slot_poison = asan_poisoned_object_p((
VALUE)scan_cursor.slot);
7832 asan_unpoison_object((
VALUE)scan_cursor.slot,
false);
7837 while (!gc_is_moveable_obj(objspace, (
VALUE)scan_cursor.slot) && not_met(&free_cursor, &scan_cursor)) {
7840 if (scan_slot_poison) {
7842 asan_poison_object((
VALUE)scan_cursor.slot);
7845 retreat_cursor(&scan_cursor, page_list);
7848 scan_slot_poison = asan_poisoned_object_p((
VALUE)scan_cursor.slot);
7849 asan_unpoison_object((
VALUE)scan_cursor.slot,
false);
7854 if (not_met(&free_cursor, &scan_cursor)) {
7861 moved_list = gc_move(objspace, (
VALUE)scan_cursor.slot, (
VALUE)free_cursor.slot, moved_list);
7867 advance_cursor(&free_cursor, page_list);
7868 retreat_cursor(&scan_cursor, page_list);
7887 for (
i = 0;
i <
len;
i++) {
7900 for (
i = 0;
i <
len;
i++) {
7911 if (gc_object_moved_p(objspace, (
VALUE)*
key)) {
7915 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
7929 if (gc_object_moved_p(objspace, (
VALUE)
key)) {
7933 if (gc_object_moved_p(objspace, (
VALUE)value)) {
7944 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
7958 if (gc_object_moved_p(objspace, (
VALUE)value)) {
7989 gc_update_table_refs(objspace,
ptr);
8007 switch (def->
type) {
8044 for (
i=0;
i<
n;
i++) {
8058 gc_update_values(objspace, (
long)
env->env_size, (
VALUE *)
env->env);
8082 gc_ref_update_method_entry(objspace, &
RANY(
obj)->as.imemo.ment);
8100 check_id_table_move(
ID id,
VALUE value,
void *data)
8104 if (gc_object_moved_p(objspace, (
VALUE)value)) {
8120 void *poisoned = asan_poisoned_object_p(value);
8121 asan_unpoison_object(value,
false);
8128 destination = value;
8134 asan_poison_object(value);
8138 destination = value;
8145 update_id_table(
ID *
key,
VALUE * value,
void *data,
int existing)
8149 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
8165 update_const_table(
VALUE value,
void *data)
8170 if (gc_object_moved_p(objspace, ce->
value)) {
8174 if (gc_object_moved_p(objspace, ce->
file)) {
8193 entry = entry->
next;
8202 update_subclass_entries(objspace, ext->
subclasses);
8210 gc_report(4, objspace,
"update-refs: %p ->", (
void *)
obj);
8241 gc_ref_update_imemo(objspace,
obj);
8257 gc_ref_update_array(objspace,
obj);
8262 gc_ref_update_hash(objspace,
obj);
8279 if (compact_func) (*compact_func)(
ptr);
8286 gc_ref_update_object(objspace,
obj);
8316 if (any->as.match.str) {
8337 for (
i = 0;
i <
len;
i++) {
8354 gc_report(4, objspace,
"update-refs: %p <-", (
void *)
obj);
8358 gc_ref_update(
void *vstart,
void *vend,
size_t stride,
void * data)
8367 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
8374 for (;
v != (
VALUE)vend;
v += stride) {
8376 void *poisoned = asan_poisoned_object_p(
v);
8377 asan_unpoison_object(
v,
false);
8381 heap_page_add_freeobj(objspace, page,
v);
8389 if (RVALUE_WB_UNPROTECTED(
v)) {
8395 gc_update_object_references(objspace,
v);
8400 asan_poison_object(
v);
8410 #define global_symbols ruby_global_symbols
8416 rb_vm_t *vm = rb_ec_vm_ptr(ec);
8418 objspace_each_objects_without_setup(objspace, gc_ref_update, objspace);
8453 static void gc_compact_after_gc(
rb_objspace_t *objspace,
int use_toward_empty,
int use_double_pages,
int use_verifier);
8456 gc_compact(
rb_objspace_t *objspace,
int use_toward_empty,
int use_double_pages,
int use_verifier)
8464 gc_compact_after_gc(objspace, use_toward_empty, use_double_pages, use_verifier);
8476 return gc_compact_stats(objspace);
8480 root_obj_check_moved_i(
const char *category,
VALUE obj,
void *data)
8488 reachable_object_check_moved_i(
VALUE ref,
void *data)
8492 rb_bug(
"Object %s points to MOVED: %p -> %s\n", obj_info(parent), (
void *)ref, obj_info(
rb_gc_location(ref)));
8497 heap_check_moved_i(
void *vstart,
void *vend,
size_t stride,
void *data)
8500 for (;
v != (
VALUE)vend;
v += stride) {
8505 void *poisoned = asan_poisoned_object_p(
v);
8506 asan_unpoison_object(
v,
false);
8518 asan_poison_object(
v);
8529 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i,
NULL);
8530 objspace_each_objects(objspace, heap_check_moved_i,
NULL);
8535 gc_compact_after_gc(
rb_objspace_t *objspace,
int use_toward_empty,
int use_double_pages,
int use_verifier)
8537 if (0)
fprintf(
stderr,
"gc_compact_after_gc: %d,%d,%d\n", use_toward_empty, use_double_pages, use_verifier);
8544 gc_verify_internal_consistency(objspace);
8547 if (use_double_pages) {
8552 VALUE moved_list_head;
8555 if (use_toward_empty) {
8556 moved_list_head = gc_compact_heap(objspace, compare_free_slots);
8559 moved_list_head = gc_compact_heap(objspace, compare_pinned);
8563 gc_update_references(objspace);
8567 gc_check_references_for_moved(objspace);
8576 while (moved_list_head) {
8581 next_moved =
RMOVED(moved_list_head)->next;
8584 RMOVED(moved_list_head)->flags = 0;
8585 RMOVED(moved_list_head)->destination = 0;
8586 RMOVED(moved_list_head)->next = 0;
8588 heap_page_add_freeobj(objspace, page, moved_list_head);
8592 heap_unlink_page(objspace,
heap_eden, page);
8593 heap_add_page(objspace,
heap_tomb, page);
8596 moved_list_head = next_moved;
8616 gc_verify_internal_consistency(objspace);
8642 int use_toward_empty =
FALSE;
8643 int use_double_pages =
FALSE;
8648 static ID keyword_ids[2];
8656 if (!keyword_ids[0]) {
8658 keyword_ids[1] =
rb_intern(
"double_heap");
8663 use_toward_empty =
TRUE;
8666 use_double_pages =
TRUE;
8670 gc_compact(objspace, use_toward_empty, use_double_pages,
TRUE);
8671 return gc_compact_stats(objspace);
8686 garbage_collect(objspace, reason);
8696 #if RGENGC_PROFILE >= 2
8701 gc_count_add_each_types(
VALUE hash,
const char *
name,
const size_t *
types)
8706 const char *
type = type_name(
i, 0);
8726 gc_info_decode(
rb_objspace_t *objspace,
const VALUE hash_or_key,
const int orig_flags)
8728 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
8729 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
8730 #if RGENGC_ESTIMATE_OLDMALLOC
8731 static VALUE sym_oldmalloc;
8733 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
8734 static VALUE sym_none, sym_marking, sym_sweeping;
8749 if (sym_major_by ==
Qnil) {
8750 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
8762 #if RGENGC_ESTIMATE_OLDMALLOC
8776 #define SET(name, attr) \
8777 if (key == sym_##name) \
8779 else if (hash != Qnil) \
8780 rb_hash_aset(hash, sym_##name, (attr));
8787 #if RGENGC_ESTIMATE_OLDMALLOC
8791 SET(major_by, major_by);
8805 if (orig_flags == 0) {
8822 return gc_info_decode(objspace,
key, 0);
8837 return gc_info_decode(objspace,
arg, 0);
8866 #if RGENGC_ESTIMATE_OLDMALLOC
8871 gc_stat_sym_total_generated_normal_object_count,
8872 gc_stat_sym_total_generated_shady_object_count,
8873 gc_stat_sym_total_shade_operation_count,
8874 gc_stat_sym_total_promoted_count,
8875 gc_stat_sym_total_remembered_normal_object_count,
8876 gc_stat_sym_total_remembered_shady_object_count,
8902 #if RGENGC_ESTIMATE_OLDMALLOC
8911 static VALUE gc_stat_compat_table;
8914 setup_gc_stat_symbols(
void)
8916 if (gc_stat_symbols[0] == 0) {
8917 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
8920 S(heap_sorted_length);
8922 S(heap_available_slots);
8925 S(heap_final_slots);
8926 S(heap_marked_slots);
8929 S(total_allocated_pages);
8930 S(total_freed_pages);
8931 S(total_allocated_objects);
8932 S(total_freed_objects);
8933 S(malloc_increase_bytes);
8934 S(malloc_increase_bytes_limit);
8939 S(remembered_wb_unprotected_objects);
8940 S(remembered_wb_unprotected_objects_limit);
8942 S(old_objects_limit);
8943 #if RGENGC_ESTIMATE_OLDMALLOC
8944 S(oldmalloc_increase_bytes);
8945 S(oldmalloc_increase_bytes_limit);
8948 S(total_generated_normal_object_count);
8949 S(total_generated_shady_object_count);
8950 S(total_shade_operation_count);
8951 S(total_promoted_count);
8952 S(total_remembered_normal_object_count);
8953 S(total_remembered_shady_object_count);
8957 #define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s))
8958 S(gc_stat_heap_used);
8959 S(heap_eden_page_length);
8960 S(heap_tomb_page_length);
8968 S(remembered_shady_object);
8969 S(remembered_shady_object_limit);
8971 S(old_object_limit);
8973 S(total_allocated_object);
8974 S(total_freed_object);
8977 #if RGENGC_ESTIMATE_OLDMALLOC
8978 S(oldmalloc_increase);
8989 #define OLD_SYM(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s]
8990 #define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s]
9009 #if RGENGC_ESTIMATE_OLDMALLOC
9025 if (!
NIL_P(new_key)) {
9026 static int warned = 0;
9028 rb_warn(
"GC.stat keys were changed from Ruby 2.1. "
9030 "Please check <https://bugs.ruby-lang.org/issues/9924> for more information.",
9048 if ((new_key = compat_key(
key)) !=
Qnil) {
9056 gc_stat_internal(
VALUE hash_or_sym)
9061 setup_gc_stat_symbols();
9067 static VALUE default_proc_for_compat = 0;
9068 if (default_proc_for_compat == 0) {
9069 default_proc_for_compat =
rb_proc_new(default_proc_for_compat_func,
Qnil);
9082 #define SET(name, attr) \
9083 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
9085 else if (hash != Qnil) \
9086 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
9095 SET(heap_available_slots, objspace_available_slots(objspace));
9096 SET(heap_live_slots, objspace_live_slots(objspace));
9097 SET(heap_free_slots, objspace_free_slots(objspace));
9116 #if RGENGC_ESTIMATE_OLDMALLOC
9122 SET(total_generated_normal_object_count, objspace->
profile.total_generated_normal_object_count);
9123 SET(total_generated_shady_object_count, objspace->
profile.total_generated_shady_object_count);
9124 SET(total_shade_operation_count, objspace->
profile.total_shade_operation_count);
9125 SET(total_promoted_count, objspace->
profile.total_promoted_count);
9126 SET(total_remembered_normal_object_count, objspace->
profile.total_remembered_normal_object_count);
9127 SET(total_remembered_shady_object_count, objspace->
profile.total_remembered_shady_object_count);
9134 if ((new_key = compat_key(
key)) !=
Qnil) {
9141 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
9143 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->
profile.generated_normal_object_count_types);
9144 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->
profile.generated_shady_object_count_types);
9145 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->
profile.shade_operation_count_types);
9146 gc_count_add_each_types(hash,
"promoted_types", objspace->
profile.promoted_types);
9147 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->
profile.remembered_normal_object_count_types);
9148 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->
profile.remembered_shady_object_count_types);
9162 size_t value = gc_stat_internal(
arg);
9172 gc_stat_internal(
arg);
9180 size_t value = gc_stat_internal(
key);
9184 gc_stat_internal(
key);
9207 gc_stress_set(objspace, flag);
9237 return gc_disable_no_rest(objspace);
9259 return gc_disable_no_rest(objspace);
9269 get_envparam_size(
const char *
name,
size_t *default_value,
size_t lower_bound)
9277 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
9292 unit = 1024*1024*1024;
9296 while (*end && isspace((
unsigned char)*end)) end++;
9308 if (val > 0 && (
size_t)val > lower_bound) {
9312 *default_value = (
size_t)val;
9318 name, val, *default_value, lower_bound);
9327 get_envparam_double(
const char *
name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
9335 if (!*
ptr || *end) {
9340 if (accept_zero && val == 0.0) {
9343 else if (val <= lower_bound) {
9345 fprintf(
stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
9346 name, val, *default_value, lower_bound);
9349 else if (upper_bound != 0.0 &&
9350 val > upper_bound) {
9352 fprintf(
stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
9353 name, val, *default_value, upper_bound);
9359 *default_value = val;
9367 gc_set_initial_pages(
void)
9373 if (min_pages >
heap_eden->total_pages) {
9424 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.
heap_free_slots, 0)) {
9427 else if (get_envparam_size(
"RUBY_FREE_MIN", &gc_params.
heap_free_slots, 0)) {
9428 rb_warn(
"RUBY_FREE_MIN is obsolete. Use RUBY_GC_HEAP_FREE_SLOTS instead.");
9432 if (get_envparam_size(
"RUBY_GC_HEAP_INIT_SLOTS", &gc_params.
heap_init_slots, 0)) {
9433 gc_set_initial_pages();
9435 else if (get_envparam_size(
"RUBY_HEAP_MIN_SLOTS", &gc_params.
heap_init_slots, 0)) {
9436 rb_warn(
"RUBY_HEAP_MIN_SLOTS is obsolete. Use RUBY_GC_HEAP_INIT_SLOTS instead.");
9437 gc_set_initial_pages();
9440 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.
growth_factor, 1.0, 0.0,
FALSE);
9441 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.
growth_max_slots, 0);
9450 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT", &gc_params.
malloc_limit_min, 0);
9451 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.
malloc_limit_max, 0);
9457 #if RGENGC_ESTIMATE_OLDMALLOC
9472 if (is_markable_object(objspace,
obj)) {
9473 struct mark_func_data_struct mfd;
9474 mfd.mark_func = func;
9477 gc_mark_children(objspace,
obj);
9499 objspace_reachable_objects_from_root(objspace,
func, passing_data);
9506 struct mark_func_data_struct mfd;
9509 data.data = passing_data;
9511 mfd.mark_func = root_objects_from;
9515 gc_mark_roots(objspace, &data.category);
9530 gc_vraise(
void *
ptr)
9567 negative_size_allocation_error(
const char *msg)
9573 ruby_memerror_body(
void *dummy)
9610 if (
during_gc) gc_exit(objspace,
"rb_memerror");
9634 #if defined __MINGW32__
9635 res = __mingw_aligned_malloc(
size, alignment);
9636 #elif defined _WIN32
9637 void *_aligned_malloc(
size_t,
size_t);
9638 res = _aligned_malloc(
size, alignment);
9639 #elif defined(HAVE_POSIX_MEMALIGN)
9646 #elif defined(HAVE_MEMALIGN)
9650 res =
malloc(alignment +
size +
sizeof(
void*));
9651 aligned = (
char*)res + alignment +
sizeof(
void*);
9652 aligned -= ((
VALUE)aligned & (alignment - 1));
9653 ((
void**)aligned)[-1] = res;
9654 res = (
void*)aligned;
9658 GC_ASSERT(((alignment - 1) & alignment) == 0);
9659 GC_ASSERT(alignment %
sizeof(
void*) == 0);
9664 rb_aligned_free(
void *
ptr)
9666 #if defined __MINGW32__
9667 __mingw_aligned_free(
ptr);
9668 #elif defined _WIN32
9670 #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
9677 static inline size_t
9680 #ifdef HAVE_MALLOC_USABLE_SIZE
9681 return malloc_usable_size(
ptr);
9694 atomic_sub_nounderflow(
size_t *var,
size_t sub)
9696 if (
sub == 0)
return;
9700 if (val <
sub)
sub = val;
9715 garbage_collect_with_gvl(objspace, reason);
9722 if (new_size > old_size) {
9724 #if RGENGC_ESTIMATE_OLDMALLOC
9730 #if RGENGC_ESTIMATE_OLDMALLOC
9746 #if MALLOC_ALLOCATED_SIZE
9747 if (new_size >= old_size) {
9751 size_t dec_size = old_size - new_size;
9752 size_t allocated_size = objspace->
malloc_params.allocated_size;
9754 #if MALLOC_ALLOCATED_SIZE_CHECK
9755 if (allocated_size < dec_size) {
9756 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
9759 atomic_sub_nounderflow(&objspace->
malloc_params.allocated_size, dec_size);
9762 if (0)
fprintf(
stderr,
"increase - ptr: %p, type: %s, new_size: %d, old_size: %d\n",
9767 (
int)new_size, (
int)old_size);
9776 if (allocations > 0) {
9777 atomic_sub_nounderflow(&objspace->
malloc_params.allocations, 1);
9779 #if MALLOC_ALLOCATED_SIZE_CHECK
9793 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9800 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9801 const char *ruby_malloc_info_file;
9802 int ruby_malloc_info_line;
9805 static inline size_t
9810 #if CALC_EXACT_MALLOC_SIZE
9817 static inline void *
9820 size = objspace_malloc_size(objspace, mem,
size);
9823 #if CALC_EXACT_MALLOC_SIZE
9827 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9829 info->file = ruby_malloc_info_file;
9830 info->line = info->file ? ruby_malloc_info_line : 0;
9841 #define TRY_WITH_GC(alloc) do { \
9842 objspace_malloc_gc_stress(objspace); \
9844 (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
9845 GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
9846 GPR_FLAG_MALLOC) || \
9860 size = objspace_malloc_prepare(objspace,
size);
9863 return objspace_malloc_fixup(objspace, mem,
size);
9866 static inline size_t
9867 xmalloc2_size(
const size_t count,
const size_t elsize)
9873 objspace_xrealloc(
rb_objspace_t *objspace,
void *
ptr,
size_t new_size,
size_t old_size)
9877 if (!
ptr)
return objspace_xmalloc0(objspace, new_size);
9884 if (new_size == 0) {
9885 if ((mem = objspace_xmalloc0(objspace, 0)) !=
NULL) {
9908 objspace_xfree(objspace,
ptr, old_size);
9922 #if CALC_EXACT_MALLOC_SIZE
9927 old_size = info->
size;
9931 old_size = objspace_malloc_size(objspace,
ptr, old_size);
9933 new_size = objspace_malloc_size(objspace, mem, new_size);
9935 #if CALC_EXACT_MALLOC_SIZE
9938 info->
size = new_size;
9949 #if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
9951 #define MALLOC_INFO_GEN_SIZE 100
9952 #define MALLOC_INFO_SIZE_SIZE 10
9953 static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
9954 static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
9955 static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
9956 static st_table *malloc_info_file_table;
9961 const char *file = (
void *)
key;
9962 const size_t *data = (
void *)val;
9964 fprintf(
stderr,
"%s\t%d\t%d\n", file, (
int)data[0], (
int)data[1]);
9976 for (
i=0;
i<MALLOC_INFO_GEN_SIZE;
i++) {
9977 if (
i == MALLOC_INFO_GEN_SIZE-1) {
9978 fprintf(
stderr,
"more\t%d\t%d\n", (
int)malloc_info_gen_cnt[
i], (
int)malloc_info_gen_size[
i]);
9981 fprintf(
stderr,
"%d\t%d\t%d\n",
i, (
int)malloc_info_gen_cnt[
i], (
int)malloc_info_gen_size[
i]);
9986 for (
i=0;
i<MALLOC_INFO_SIZE_SIZE;
i++) {
9992 if (malloc_info_file_table) {
9994 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
10014 #if CALC_EXACT_MALLOC_SIZE
10017 old_size = info->
size;
10019 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10022 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
10025 malloc_info_gen_cnt[gen_index]++;
10026 malloc_info_gen_size[gen_index] += info->
size;
10028 for (
i=0;
i<MALLOC_INFO_SIZE_SIZE;
i++) {
10029 size_t s = 16 <<
i;
10030 if (info->
size <= s) {
10031 malloc_info_size[
i]++;
10035 malloc_info_size[
i]++;
10042 if (malloc_info_file_table ==
NULL) {
10049 data =
malloc(xmalloc2_size(2,
sizeof(
size_t)));
10050 if (data ==
NULL)
rb_bug(
"objspace_xfree: can not allocate memory");
10051 data[0] = data[1] = 0;
10055 data[1] += info->
size;
10060 fprintf(
stderr,
"free - size:%d, gen:%d, pos: %s:%d\n", (
int)info->
size, gen, info->file, (
int)info->line);
10070 old_size = objspace_malloc_size(objspace,
ptr, old_size);
10079 ruby_xmalloc0(
size_t size)
10088 negative_size_allocation_error(
"too large allocation size");
10090 return ruby_xmalloc0(
size);
10112 size = objspace_malloc_prepare(objspace,
size);
10114 return objspace_malloc_fixup(objspace, mem,
size);
10123 #ifdef ruby_sized_xrealloc
10124 #undef ruby_sized_xrealloc
10130 negative_size_allocation_error(
"too large allocation size");
10133 return objspace_xrealloc(&
rb_objspace,
ptr, new_size, old_size);
10142 #ifdef ruby_sized_xrealloc2
10143 #undef ruby_sized_xrealloc2
10148 size_t len = xmalloc2_size(
n,
size);
10158 #ifdef ruby_sized_xfree
10159 #undef ruby_sized_xfree
10178 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
10185 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
10192 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
10199 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
10210 #if CALC_EXACT_MALLOC_SIZE
10214 #if CALC_EXACT_MALLOC_SIZE
10223 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10239 #if CALC_EXACT_MALLOC_SIZE
10255 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
NULL, 0);
10287 #if MALLOC_ALLOCATED_SIZE
10298 gc_malloc_allocated_size(
VALUE self)
10313 gc_malloc_allocations(
VALUE self)
10326 else if (diff < 0) {
10341 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
10343 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10355 wmap_compact(
void *
ptr)
10364 wmap_mark(
void *
ptr)
10367 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10382 wmap_free(
void *
ptr)
10399 wmap_memsize(
const void *
ptr)
10439 if (!is_id_value(objspace,
obj))
return FALSE;
10440 if (!is_live_object(objspace,
obj))
return FALSE;
10448 if (!existing)
return ST_STOP;
10451 if (
ptr[
i] != wmap) {
10478 rb_bug(
"wmap_finalize: objid is not found.");
10484 rids = (
VALUE *)data;
10529 wmap_inspect(
VALUE self)
10550 if (wmap_live_p(objspace,
obj)) {
10558 wmap_each(
VALUE self)
10573 if (wmap_live_p(objspace,
obj)) {
10581 wmap_each_key(
VALUE self)
10596 if (wmap_live_p(objspace,
obj)) {
10604 wmap_each_value(
VALUE self)
10629 wmap_keys(
VALUE self)
10656 wmap_values(
VALUE self)
10680 ptr = ruby_xmalloc0(2 *
sizeof(
VALUE));
10697 define_final0(orig, w->
final);
10700 define_final0(wmap, w->
final);
10720 if (!wmap_live_p(objspace,
obj))
return Qundef;
10741 wmap_size(
VALUE self)
10748 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
10759 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
10763 getrusage_time(
void)
10765 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
10767 static int try_clock_gettime = 1;
10773 try_clock_gettime = 0;
10780 struct rusage usage;
10782 if (getrusage(RUSAGE_SELF, &usage) == 0) {
10783 time = usage.ru_utime;
10784 return time.tv_sec +
time.tv_usec * 1e-6;
10791 FILETIME creation_time, exit_time, kernel_time, user_time;
10796 if (GetProcessTimes(GetCurrentProcess(),
10797 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
10798 memcpy(&ui, &user_time,
sizeof(FILETIME));
10799 q = ui.QuadPart / 10
L;
10800 t = (
DWORD)(q % 1000000
L) * 1e-6;
10806 t += (
DWORD)q & ~(~0 << 16);
10817 gc_prof_setup_new_record(
rb_objspace_t *objspace,
int reason)
10838 rb_bug(
"gc_profile malloc or realloc miss");
10845 #if MALLOC_ALLOCATED_SIZE
10848 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
10851 struct rusage usage;
10852 if (getrusage(RUSAGE_SELF, &usage) == 0) {
10853 record->maxrss = usage.ru_maxrss;
10854 record->minflt = usage.ru_minflt;
10855 record->majflt = usage.ru_majflt;
10868 #if GC_PROFILE_MORE_DETAIL
10869 record->prepare_time = objspace->
profile.prepare_time;
10877 elapsed_time_from(
double time)
10879 double now = getrusage_time();
10898 #define RUBY_DTRACE_GC_HOOK(name) \
10899 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
10904 #if GC_PROFILE_MORE_DETAIL
10915 #if GC_PROFILE_MORE_DETAIL
10918 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
10948 record->
gc_time += sweep_time;
10954 #if GC_PROFILE_MORE_DETAIL
10955 record->gc_sweep_time += sweep_time;
10965 #if GC_PROFILE_MORE_DETAIL
10982 #if GC_PROFILE_MORE_DETAIL
10984 record->heap_live_objects = live;
10985 record->heap_free_objects = total - live;
11003 gc_profile_clear(
VALUE _)
11068 gc_profile_record_get(
VALUE _)
11090 #if GC_PROFILE_MORE_DETAIL
11105 #if RGENGC_PROFILE > 0
11116 #if GC_PROFILE_MORE_DETAIL
11117 #define MAJOR_REASON_MAX 0x10
11120 gc_profile_dump_major_reason(
int flags,
char *buff)
11131 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
11132 buff[i++] = #x[0]; \
11133 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
11139 #if RGENGC_ESTIMATE_OLDMALLOC
11153 #ifdef MAJOR_REASON_MAX
11154 char reason_str[MAJOR_REASON_MAX];
11162 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
11171 #if GC_PROFILE_MORE_DETAIL
11174 "Prepare Time = Previously GC's rest sweep time\n"
11175 "Index Flags Allocate Inc. Allocate Limit"
11179 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
11181 " OldgenObj RemNormObj RemShadObj"
11184 " MaxRSS(KB) MinorFLT MajorFLT"
11204 gc_profile_dump_major_reason(record->
flags, reason_str),
11211 record->allocate_increase, record->allocate_limit,
11213 record->allocated_size,
11215 record->heap_use_pages,
11216 record->gc_mark_time*1000,
11217 record->gc_sweep_time*1000,
11218 record->prepare_time*1000,
11220 record->heap_live_objects,
11221 record->heap_free_objects,
11222 record->removing_objects,
11223 record->empty_objects
11226 record->old_objects,
11227 record->remembered_normal_objects,
11228 record->remembered_shady_objects
11232 record->maxrss / 1024,
11255 gc_profile_result(
VALUE _)
11290 gc_profile_total_time(
VALUE self)
11314 gc_profile_enable_get(
VALUE self)
11329 gc_profile_enable(
VALUE _)
11346 gc_profile_disable(
VALUE _)
11359 static const char *
11363 #define TYPE_NAME(t) case (t): return #t;
11399 static const char *
11422 rb_bug(
"rb_method_type_name: unreachable (type: %d)",
type);
11426 # define ARY_SHARED_P(ary) \
11427 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11428 FL_TEST((ary),ELTS_SHARED)!=0)
11429 # define ARY_EMBED_P(ary) \
11430 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11431 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
11434 rb_raw_iseq_info(
char *buff,
const int buff_size,
const rb_iseq_t *
iseq)
11439 snprintf(buff, buff_size,
" %s@%s:%d",
11451 #define BUFF_ARGS buff + pos, buff_size - pos
11452 #define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
11464 #define TF(c) ((c) != 0 ? "true" : "false")
11465 #define C(c, s) ((c) != 0 ? (s) : " ")
11468 const int age = RVALUE_FLAGS_AGE(
RBASIC(
obj)->flags);
11478 obj_type_name(
obj)));
11484 obj_type_name(
obj)));
11490 obj_type_name(
obj)));
11493 if (internal_object_p(
obj)) {
11502 if (!
NIL_P(class_path)) {
11556 if (!
NIL_P(class_path)) {
11564 if (!
NIL_P(class_path)) {
11586 (block = vm_proc_block(
obj)) !=
NULL &&
11588 (
iseq = vm_block_iseq(block)) !=
NULL) {
11600 const char *imemo_name =
"\0";
11602 #define IMEMO_NAME(x) case imemo_##x: imemo_name = #x; break;
11623 APPENDF((
BUFF_ARGS,
"(called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)",
11656 #if RGENGC_OBJ_INFO
11657 #define OBJ_INFO_BUFFERS_NUM 10
11658 #define OBJ_INFO_BUFFERS_SIZE 0x100
11659 static int obj_info_buffers_index = 0;
11660 static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
11662 static const char *
11665 const int index = obj_info_buffers_index++;
11666 char *
const buff = &obj_info_buffers[
index][0];
11668 if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
11669 obj_info_buffers_index = 0;
11675 static const char *
11678 return obj_type_name(
obj);
11685 return obj_info(
obj);
11717 if (is_pointer_to_heap(objspace, (
void *)
obj)) {
11730 fprintf(
stderr,
"WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(
obj) ?
"false" :
"true");
11731 fprintf(
stderr,
"remembered? : %s\n", RVALUE_REMEMBERED(
obj) ?
"true" :
"false");
11736 fprintf(
stderr,
"swept?: %s\n", is_swept_object(objspace,
obj) ?
"done" :
"not yet");
11746 fprintf(
stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)
name, (
void *)
obj);
11758 #if GC_DEBUG_STRESS_TO_CLASS
11857 #include "gc.rbinc"
11863 VALUE rb_mObjSpace;
11864 VALUE rb_mProfiler;
11865 VALUE gc_constants;
11929 #if MALLOC_ALLOCATED_SIZE
11934 #if GC_DEBUG_STRESS_TO_CLASS
11943 #define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
11961 #ifdef ruby_xmalloc
11962 #undef ruby_xmalloc
11964 #ifdef ruby_xmalloc2
11965 #undef ruby_xmalloc2
11967 #ifdef ruby_xcalloc
11968 #undef ruby_xcalloc
11970 #ifdef ruby_xrealloc
11971 #undef ruby_xrealloc
11973 #ifdef ruby_xrealloc2
11974 #undef ruby_xrealloc2
11980 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11981 ruby_malloc_info_file = __FILE__;
11982 ruby_malloc_info_line = __LINE__;
11990 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11991 ruby_malloc_info_file = __FILE__;
11992 ruby_malloc_info_line = __LINE__;
12000 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
12001 ruby_malloc_info_file = __FILE__;
12002 ruby_malloc_info_line = __LINE__;
12010 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
12011 ruby_malloc_info_file = __FILE__;
12012 ruby_malloc_info_line = __LINE__;
12020 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
12021 ruby_malloc_info_file = __FILE__;
12022 ruby_malloc_info_line = __LINE__;