Ruby  1.9.3p537(2014-02-19revision0)
gc.c
Go to the documentation of this file.
00001 /**********************************************************************
00002 
00003   gc.c -
00004 
00005   $Author$
00006   created at: Tue Oct  5 09:44:46 JST 1993
00007 
00008   Copyright (C) 1993-2007 Yukihiro Matsumoto
00009   Copyright (C) 2000  Network Applied Communication Laboratory, Inc.
00010   Copyright (C) 2000  Information-technology Promotion Agency, Japan
00011 
00012 **********************************************************************/
00013 
00014 #include "ruby/ruby.h"
00015 #include "ruby/st.h"
00016 #include "ruby/re.h"
00017 #include "ruby/io.h"
00018 #include "ruby/util.h"
00019 #include "eval_intern.h"
00020 #include "vm_core.h"
00021 #include "internal.h"
00022 #include "gc.h"
00023 #include "constant.h"
00024 #include "ruby_atomic.h"
00025 #include <stdio.h>
00026 #include <setjmp.h>
00027 #include <sys/types.h>
00028 #include <assert.h>
00029 
00030 #ifdef HAVE_SYS_TIME_H
00031 #include <sys/time.h>
00032 #endif
00033 
00034 #ifdef HAVE_SYS_RESOURCE_H
00035 #include <sys/resource.h>
00036 #endif
00037 
00038 #if defined _WIN32 || defined __CYGWIN__
00039 #include <windows.h>
00040 #endif
00041 
00042 #ifdef HAVE_VALGRIND_MEMCHECK_H
00043 # include <valgrind/memcheck.h>
00044 # ifndef VALGRIND_MAKE_MEM_DEFINED
00045 #  define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n))
00046 # endif
00047 # ifndef VALGRIND_MAKE_MEM_UNDEFINED
00048 #  define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n))
00049 # endif
00050 #else
00051 # define VALGRIND_MAKE_MEM_DEFINED(p, n) /* empty */
00052 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) /* empty */
00053 #endif
00054 
00055 #define rb_setjmp(env) RUBY_SETJMP(env)
00056 #define rb_jmp_buf rb_jmpbuf_t
00057 
00058 /* Make alloca work the best possible way.  */
00059 #ifdef __GNUC__
00060 # ifndef atarist
00061 #  ifndef alloca
00062 #   define alloca __builtin_alloca
00063 #  endif
00064 # endif /* atarist */
00065 #else
00066 # ifdef HAVE_ALLOCA_H
00067 #  include <alloca.h>
00068 # else
00069 #  ifdef _AIX
00070  #pragma alloca
00071 #  else
00072 #   ifndef alloca /* predefined by HP cc +Olibcalls */
00073 void *alloca ();
00074 #   endif
00075 #  endif /* AIX */
00076 # endif /* HAVE_ALLOCA_H */
00077 #endif /* __GNUC__ */
00078 
00079 #ifndef GC_MALLOC_LIMIT
00080 #define GC_MALLOC_LIMIT 8000000
00081 #endif
00082 #define HEAP_MIN_SLOTS 10000
00083 #define FREE_MIN  4096
00084 
00085 typedef struct {
00086     unsigned int initial_malloc_limit;
00087     unsigned int initial_heap_min_slots;
00088     unsigned int initial_free_min;
00089     int gc_stress;
00090 } ruby_gc_params_t;
00091 
00092 ruby_gc_params_t initial_params = {
00093     GC_MALLOC_LIMIT,
00094     HEAP_MIN_SLOTS,
00095     FREE_MIN,
00096 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00097     FALSE,
00098 #endif
00099 };
00100 
00101 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
00102 
00103 #if SIZEOF_LONG == SIZEOF_VOIDP
00104 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
00105 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
00106 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
00107 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
00108 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
00109    ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
00110 #else
00111 # error not supported
00112 #endif
00113 
00114 int ruby_gc_debug_indent = 0;
00115 
00116 /* for GC profile */
00117 #define GC_PROFILE_MORE_DETAIL 0
00118 typedef struct gc_profile_record {
00119     double gc_time;
00120     double gc_mark_time;
00121     double gc_sweep_time;
00122     double gc_invoke_time;
00123 
00124     size_t heap_use_slots;
00125     size_t heap_live_objects;
00126     size_t heap_free_objects;
00127     size_t heap_total_objects;
00128     size_t heap_use_size;
00129     size_t heap_total_size;
00130 
00131     int have_finalize;
00132     int is_marked;
00133 
00134     size_t allocate_increase;
00135     size_t allocate_limit;
00136 } gc_profile_record;
00137 
00138 static double
00139 getrusage_time(void)
00140 {
00141 #ifdef RUSAGE_SELF
00142     struct rusage usage;
00143     struct timeval time;
00144     getrusage(RUSAGE_SELF, &usage);
00145     time = usage.ru_utime;
00146     return time.tv_sec + time.tv_usec * 1e-6;
00147 #elif defined _WIN32
00148     FILETIME creation_time, exit_time, kernel_time, user_time;
00149     ULARGE_INTEGER ui;
00150     LONG_LONG q;
00151     double t;
00152 
00153     if (GetProcessTimes(GetCurrentProcess(),
00154                         &creation_time, &exit_time, &kernel_time, &user_time) == 0)
00155     {
00156         return 0.0;
00157     }
00158     memcpy(&ui, &user_time, sizeof(FILETIME));
00159     q = ui.QuadPart / 10L;
00160     t = (DWORD)(q % 1000000L) * 1e-6;
00161     q /= 1000000L;
00162 #ifdef __GNUC__
00163     t += q;
00164 #else
00165     t += (double)(DWORD)(q >> 16) * (1 << 16);
00166     t += (DWORD)q & ~(~0 << 16);
00167 #endif
00168     return t;
00169 #else
00170     return 0.0;
00171 #endif
00172 }
00173 
00174 #define GC_PROF_TIMER_START do {\
00175         if (objspace->profile.run) {\
00176             if (!objspace->profile.record) {\
00177                 objspace->profile.size = 1000;\
00178                 objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);\
00179             }\
00180             if (count >= objspace->profile.size) {\
00181                 objspace->profile.size += 1000;\
00182                 objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);\
00183             }\
00184             if (!objspace->profile.record) {\
00185                 rb_bug("gc_profile malloc or realloc miss");\
00186             }\
00187             MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);\
00188             gc_time = getrusage_time();\
00189             objspace->profile.record[count].gc_invoke_time = gc_time - objspace->profile.invoke_time;\
00190         }\
00191     } while(0)
00192 
00193 #define GC_PROF_TIMER_STOP(marked) do {\
00194         if (objspace->profile.run) {\
00195             gc_time = getrusage_time() - gc_time;\
00196             if (gc_time < 0) gc_time = 0;\
00197             objspace->profile.record[count].gc_time = gc_time;\
00198             objspace->profile.record[count].is_marked = !!(marked);\
00199             GC_PROF_SET_HEAP_INFO(objspace->profile.record[count]);\
00200             objspace->profile.count++;\
00201         }\
00202     } while(0)
00203 
00204 #if GC_PROFILE_MORE_DETAIL
00205 #define INIT_GC_PROF_PARAMS double gc_time = 0, sweep_time = 0;\
00206     size_t count = objspace->profile.count, total = 0, live = 0
00207 
00208 #define GC_PROF_MARK_TIMER_START double mark_time = 0;\
00209     do {\
00210         if (objspace->profile.run) {\
00211             mark_time = getrusage_time();\
00212         }\
00213     } while(0)
00214 
00215 #define GC_PROF_MARK_TIMER_STOP do {\
00216         if (objspace->profile.run) {\
00217             mark_time = getrusage_time() - mark_time;\
00218             if (mark_time < 0) mark_time = 0;\
00219             objspace->profile.record[objspace->profile.count].gc_mark_time = mark_time;\
00220         }\
00221     } while(0)
00222 
00223 #define GC_PROF_SWEEP_TIMER_START do {\
00224         if (objspace->profile.run) {\
00225             sweep_time = getrusage_time();\
00226         }\
00227     } while(0)
00228 
00229 #define GC_PROF_SWEEP_TIMER_STOP do {\
00230         if (objspace->profile.run) {\
00231             sweep_time = getrusage_time() - sweep_time;\
00232             if (sweep_time < 0) sweep_time = 0;\
00233             objspace->profile.record[count].gc_sweep_time = sweep_time;\
00234         }\
00235     } while(0)
00236 #define GC_PROF_SET_MALLOC_INFO do {\
00237         if (objspace->profile.run) {\
00238             gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
00239             record->allocate_increase = malloc_increase;\
00240             record->allocate_limit = malloc_limit; \
00241         }\
00242     } while(0)
00243 #define GC_PROF_SET_HEAP_INFO(record) do {\
00244         live = objspace->heap.live_num;\
00245         total = heaps_used * HEAP_OBJ_LIMIT;\
00246         (record).heap_use_slots = heaps_used;\
00247         (record).heap_live_objects = live;\
00248         (record).heap_free_objects = total - live;\
00249         (record).heap_total_objects = total;\
00250         (record).have_finalize = deferred_final_list ? Qtrue : Qfalse;\
00251         (record).heap_use_size = live * sizeof(RVALUE);\
00252         (record).heap_total_size = total * sizeof(RVALUE);\
00253     } while(0)
00254 #define GC_PROF_INC_LIVE_NUM objspace->heap.live_num++
00255 #define GC_PROF_DEC_LIVE_NUM objspace->heap.live_num--
00256 #else
00257 #define INIT_GC_PROF_PARAMS double gc_time = 0;\
00258     size_t count = objspace->profile.count, total = 0, live = 0
00259 #define GC_PROF_MARK_TIMER_START
00260 #define GC_PROF_MARK_TIMER_STOP
00261 #define GC_PROF_SWEEP_TIMER_START
00262 #define GC_PROF_SWEEP_TIMER_STOP
00263 #define GC_PROF_SET_MALLOC_INFO
00264 #define GC_PROF_SET_HEAP_INFO(record) do {\
00265         live = objspace->heap.live_num;\
00266         total = heaps_used * HEAP_OBJ_LIMIT;\
00267         (record).heap_total_objects = total;\
00268         (record).heap_use_size = live * sizeof(RVALUE);\
00269         (record).heap_total_size = total * sizeof(RVALUE);\
00270     } while(0)
00271 #define GC_PROF_INC_LIVE_NUM
00272 #define GC_PROF_DEC_LIVE_NUM
00273 #endif
00274 
00275 
00276 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
00277 #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
00278 #endif
00279 
00280 typedef struct RVALUE {
00281     union {
00282         struct {
00283             VALUE flags;                /* always 0 for freed obj */
00284             struct RVALUE *next;
00285         } free;
00286         struct RBasic  basic;
00287         struct RObject object;
00288         struct RClass  klass;
00289         struct RFloat  flonum;
00290         struct RString string;
00291         struct RArray  array;
00292         struct RRegexp regexp;
00293         struct RHash   hash;
00294         struct RData   data;
00295         struct RTypedData   typeddata;
00296         struct RStruct rstruct;
00297         struct RBignum bignum;
00298         struct RFile   file;
00299         struct RNode   node;
00300         struct RMatch  match;
00301         struct RRational rational;
00302         struct RComplex complex;
00303     } as;
00304 #ifdef GC_DEBUG
00305     const char *file;
00306     int   line;
00307 #endif
00308 } RVALUE;
00309 
00310 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
00311 #pragma pack(pop)
00312 #endif
00313 
00314 struct heaps_slot {
00315     void *membase;
00316     RVALUE *slot;
00317     size_t limit;
00318     struct heaps_slot *next;
00319     struct heaps_slot *prev;
00320 };
00321 
00322 struct sorted_heaps_slot {
00323     RVALUE *start;
00324     RVALUE *end;
00325     struct heaps_slot *slot;
00326 };
00327 
00328 struct gc_list {
00329     VALUE *varptr;
00330     struct gc_list *next;
00331 };
00332 
00333 #define STACK_CHUNK_SIZE 500
00334 
00335 typedef struct stack_chunk {
00336     VALUE data[STACK_CHUNK_SIZE];
00337     struct stack_chunk *next;
00338 } stack_chunk_t;
00339 
00340 typedef struct mark_stack {
00341     stack_chunk_t *chunk;
00342     stack_chunk_t *cache;
00343     size_t index;
00344     size_t limit;
00345     size_t cache_size;
00346     size_t unused_cache_size;
00347 } mark_stack_t;
00348 
00349 #define CALC_EXACT_MALLOC_SIZE 0
00350 
00351 typedef struct rb_objspace {
00352     struct {
00353         size_t limit;
00354         size_t increase;
00355 #if CALC_EXACT_MALLOC_SIZE
00356         size_t allocated_size;
00357         size_t allocations;
00358 #endif
00359     } malloc_params;
00360     struct {
00361         size_t increment;
00362         struct heaps_slot *ptr;
00363         struct heaps_slot *sweep_slots;
00364         struct sorted_heaps_slot *sorted;
00365         size_t length;
00366         size_t used;
00367         RVALUE *freelist;
00368         RVALUE *range[2];
00369         RVALUE *freed;
00370         size_t live_num;
00371         size_t free_num;
00372         size_t free_min;
00373         size_t final_num;
00374         size_t do_heap_free;
00375     } heap;
00376     struct {
00377         int dont_gc;
00378         int dont_lazy_sweep;
00379         int during_gc;
00380         rb_atomic_t finalizing;
00381     } flags;
00382     struct {
00383         st_table *table;
00384         RVALUE *deferred;
00385     } final;
00386     mark_stack_t mark_stack;
00387     struct {
00388         int run;
00389         gc_profile_record *record;
00390         size_t count;
00391         size_t size;
00392         double invoke_time;
00393     } profile;
00394     struct gc_list *global_list;
00395     size_t count;
00396     int gc_stress;
00397 } rb_objspace_t;
00398 
00399 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00400 #define rb_objspace (*GET_VM()->objspace)
00401 #define ruby_initial_gc_stress  initial_params.gc_stress
00402 int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
00403 #else
00404 static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}, {HEAP_MIN_SLOTS}};
00405 int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
00406 #endif
00407 #define malloc_limit            objspace->malloc_params.limit
00408 #define malloc_increase         objspace->malloc_params.increase
00409 #define heaps                   objspace->heap.ptr
00410 #define heaps_length            objspace->heap.length
00411 #define heaps_used              objspace->heap.used
00412 #define freelist                objspace->heap.freelist
00413 #define lomem                   objspace->heap.range[0]
00414 #define himem                   objspace->heap.range[1]
00415 #define heaps_inc               objspace->heap.increment
00416 #define heaps_freed             objspace->heap.freed
00417 #define dont_gc                 objspace->flags.dont_gc
00418 #define during_gc               objspace->flags.during_gc
00419 #define finalizing              objspace->flags.finalizing
00420 #define finalizer_table         objspace->final.table
00421 #define deferred_final_list     objspace->final.deferred
00422 #define global_List             objspace->global_list
00423 #define ruby_gc_stress          objspace->gc_stress
00424 #define initial_malloc_limit    initial_params.initial_malloc_limit
00425 #define initial_heap_min_slots  initial_params.initial_heap_min_slots
00426 #define initial_free_min        initial_params.initial_free_min
00427 
00428 static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
00429 
00430 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00431 rb_objspace_t *
00432 rb_objspace_alloc(void)
00433 {
00434     rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
00435     memset(objspace, 0, sizeof(*objspace));
00436     malloc_limit = initial_malloc_limit;
00437     ruby_gc_stress = ruby_initial_gc_stress;
00438 
00439     return objspace;
00440 }
00441 #endif
00442 
00443 static void initial_expand_heap(rb_objspace_t *objspace);
00444 static void init_mark_stack(mark_stack_t *stack);
00445 
00446 void
00447 rb_gc_set_params(void)
00448 {
00449     char *malloc_limit_ptr, *heap_min_slots_ptr, *free_min_ptr;
00450 
00451     if (rb_safe_level() > 0) return;
00452 
00453     malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT");
00454     if (malloc_limit_ptr != NULL) {
00455         int malloc_limit_i = atoi(malloc_limit_ptr);
00456         if (RTEST(ruby_verbose))
00457             fprintf(stderr, "malloc_limit=%d (%d)\n",
00458                     malloc_limit_i, initial_malloc_limit);
00459         if (malloc_limit_i > 0) {
00460             initial_malloc_limit = malloc_limit_i;
00461         }
00462     }
00463 
00464     heap_min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
00465     if (heap_min_slots_ptr != NULL) {
00466         int heap_min_slots_i = atoi(heap_min_slots_ptr);
00467         if (RTEST(ruby_verbose))
00468             fprintf(stderr, "heap_min_slots=%d (%d)\n",
00469                     heap_min_slots_i, initial_heap_min_slots);
00470         if (heap_min_slots_i > 0) {
00471             initial_heap_min_slots = heap_min_slots_i;
00472             initial_expand_heap(&rb_objspace);
00473         }
00474     }
00475 
00476     free_min_ptr = getenv("RUBY_FREE_MIN");
00477     if (free_min_ptr != NULL) {
00478         int free_min_i = atoi(free_min_ptr);
00479         if (RTEST(ruby_verbose))
00480             fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min);
00481         if (free_min_i > 0) {
00482             initial_free_min = free_min_i;
00483         }
00484     }
00485 }
00486 
00487 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00488 static void gc_sweep(rb_objspace_t *);
00489 static void slot_sweep(rb_objspace_t *, struct heaps_slot *);
00490 static void rest_sweep(rb_objspace_t *);
00491 static void free_stack_chunks(mark_stack_t *);
00492 
00493 void
00494 rb_objspace_free(rb_objspace_t *objspace)
00495 {
00496     rest_sweep(objspace);
00497     if (objspace->profile.record) {
00498         free(objspace->profile.record);
00499         objspace->profile.record = 0;
00500     }
00501     if (global_List) {
00502         struct gc_list *list, *next;
00503         for (list = global_List; list; list = next) {
00504             next = list->next;
00505             free(list);
00506         }
00507     }
00508     if (objspace->heap.sorted) {
00509         size_t i;
00510         for (i = 0; i < heaps_used; ++i) {
00511             free(objspace->heap.sorted[i].slot->membase);
00512             free(objspace->heap.sorted[i].slot);
00513         }
00514         free(objspace->heap.sorted);
00515         heaps_used = 0;
00516         heaps = 0;
00517     }
00518     free_stack_chunks(&objspace->mark_stack);
00519     free(objspace);
00520 }
00521 #endif
00522 
00523 /* tiny heap size */
00524 /* 32KB */
00525 /*#define HEAP_SIZE 0x8000 */
00526 /* 128KB */
00527 /*#define HEAP_SIZE 0x20000 */
00528 /* 64KB */
00529 /*#define HEAP_SIZE 0x10000 */
00530 /* 16KB */
00531 #define HEAP_SIZE 0x4000
00532 /* 8KB */
00533 /*#define HEAP_SIZE 0x2000 */
00534 /* 4KB */
00535 /*#define HEAP_SIZE 0x1000 */
00536 /* 2KB */
00537 /*#define HEAP_SIZE 0x800 */
00538 
00539 #define HEAP_OBJ_LIMIT (unsigned int)(HEAP_SIZE / sizeof(struct RVALUE))
00540 
00541 extern st_table *rb_class_tbl;
00542 
00543 int ruby_disable_gc_stress = 0;
00544 
00545 static void run_final(rb_objspace_t *objspace, VALUE obj);
00546 static int garbage_collect(rb_objspace_t *objspace);
00547 static int gc_lazy_sweep(rb_objspace_t *objspace);
00548 
00549 void
00550 rb_global_variable(VALUE *var)
00551 {
00552     rb_gc_register_address(var);
00553 }
00554 
00555 static void *
00556 ruby_memerror_body(void *dummy)
00557 {
00558     rb_memerror();
00559     return 0;
00560 }
00561 
00562 static void
00563 ruby_memerror(void)
00564 {
00565     if (ruby_thread_has_gvl_p()) {
00566         rb_memerror();
00567     }
00568     else {
00569         if (ruby_native_thread_p()) {
00570             rb_thread_call_with_gvl(ruby_memerror_body, 0);
00571         }
00572         else {
00573             /* no ruby thread */
00574             fprintf(stderr, "[FATAL] failed to allocate memory\n");
00575             exit(EXIT_FAILURE);
00576         }
00577     }
00578 }
00579 
00580 void
00581 rb_memerror(void)
00582 {
00583     rb_thread_t *th = GET_THREAD();
00584     if (!nomem_error ||
00585         (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
00586         fprintf(stderr, "[FATAL] failed to allocate memory\n");
00587         exit(EXIT_FAILURE);
00588     }
00589     if (rb_thread_raised_p(th, RAISED_NOMEMORY)) {
00590         rb_thread_raised_clear(th);
00591         GET_THREAD()->errinfo = nomem_error;
00592         JUMP_TAG(TAG_RAISE);
00593     }
00594     rb_thread_raised_set(th, RAISED_NOMEMORY);
00595     rb_exc_raise(nomem_error);
00596 }
00597 
00598 /*
00599  *  call-seq:
00600  *    GC.stress                 -> true or false
00601  *
00602  *  returns current status of GC stress mode.
00603  */
00604 
00605 static VALUE
00606 gc_stress_get(VALUE self)
00607 {
00608     rb_objspace_t *objspace = &rb_objspace;
00609     return ruby_gc_stress ? Qtrue : Qfalse;
00610 }
00611 
00612 /*
00613  *  call-seq:
00614  *    GC.stress = bool          -> bool
00615  *
00616  *  Updates the GC stress mode.
00617  *
00618  *  When stress mode is enabled the GC is invoked at every GC opportunity:
00619  *  all memory and object allocations.
00620  *
00621  *  Enabling stress mode makes Ruby very slow, it is only for debugging.
00622  */
00623 
00624 static VALUE
00625 gc_stress_set(VALUE self, VALUE flag)
00626 {
00627     rb_objspace_t *objspace = &rb_objspace;
00628     rb_secure(2);
00629     ruby_gc_stress = RTEST(flag);
00630     return flag;
00631 }
00632 
00633 /*
00634  *  call-seq:
00635  *    GC::Profiler.enable?                 -> true or false
00636  *
00637  *  The current status of GC profile mode.
00638  */
00639 
00640 static VALUE
00641 gc_profile_enable_get(VALUE self)
00642 {
00643     rb_objspace_t *objspace = &rb_objspace;
00644     return objspace->profile.run ? Qtrue : Qfalse;
00645 }
00646 
00647 /*
00648  *  call-seq:
00649  *    GC::Profiler.enable          -> nil
00650  *
00651  *  Starts the GC profiler.
00652  *
00653  */
00654 
00655 static VALUE
00656 gc_profile_enable(void)
00657 {
00658     rb_objspace_t *objspace = &rb_objspace;
00659 
00660     objspace->profile.run = TRUE;
00661     return Qnil;
00662 }
00663 
00664 /*
00665  *  call-seq:
00666  *    GC::Profiler.disable          -> nil
00667  *
00668  *  Stops the GC profiler.
00669  *
00670  */
00671 
00672 static VALUE
00673 gc_profile_disable(void)
00674 {
00675     rb_objspace_t *objspace = &rb_objspace;
00676 
00677     objspace->profile.run = FALSE;
00678     return Qnil;
00679 }
00680 
00681 /*
00682  *  call-seq:
00683  *    GC::Profiler.clear          -> nil
00684  *
00685  *  Clears the GC profiler data.
00686  *
00687  */
00688 
00689 static VALUE
00690 gc_profile_clear(void)
00691 {
00692     rb_objspace_t *objspace = &rb_objspace;
00693     MEMZERO(objspace->profile.record, gc_profile_record, objspace->profile.size);
00694     objspace->profile.count = 0;
00695     return Qnil;
00696 }
00697 
00698 static void *
00699 negative_size_allocation_error_with_gvl(void *ptr)
00700 {
00701     rb_raise(rb_eNoMemError, "%s", (const char *)ptr);
00702     return 0; /* should not be reached */
00703 }
00704 
00705 static void
00706 negative_size_allocation_error(const char *msg)
00707 {
00708     if (ruby_thread_has_gvl_p()) {
00709         rb_raise(rb_eNoMemError, "%s", msg);
00710     }
00711     else {
00712         if (ruby_native_thread_p()) {
00713             rb_thread_call_with_gvl(negative_size_allocation_error_with_gvl, (void *)msg);
00714         }
00715         else {
00716             fprintf(stderr, "[FATAL] %s\n", msg);
00717             exit(EXIT_FAILURE);
00718         }
00719     }
00720 }
00721 
00722 static void *
00723 gc_with_gvl(void *ptr)
00724 {
00725     return (void *)(VALUE)garbage_collect((rb_objspace_t *)ptr);
00726 }
00727 
00728 static int
00729 garbage_collect_with_gvl(rb_objspace_t *objspace)
00730 {
00731     if (dont_gc) return TRUE;
00732     if (ruby_thread_has_gvl_p()) {
00733         return garbage_collect(objspace);
00734     }
00735     else {
00736         if (ruby_native_thread_p()) {
00737             return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)objspace);
00738         }
00739         else {
00740             /* no ruby thread */
00741             fprintf(stderr, "[FATAL] failed to allocate memory\n");
00742             exit(EXIT_FAILURE);
00743         }
00744     }
00745 }
00746 
00747 static void vm_xfree(rb_objspace_t *objspace, void *ptr);
00748 
00749 static inline size_t
00750 vm_malloc_prepare(rb_objspace_t *objspace, size_t size)
00751 {
00752     if ((ssize_t)size < 0) {
00753         negative_size_allocation_error("negative allocation size (or too big)");
00754     }
00755     if (size == 0) size = 1;
00756 
00757 #if CALC_EXACT_MALLOC_SIZE
00758     size += sizeof(size_t);
00759 #endif
00760 
00761     if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
00762         (malloc_increase+size) > malloc_limit) {
00763         garbage_collect_with_gvl(objspace);
00764     }
00765 
00766     return size;
00767 }
00768 
00769 static inline void *
00770 vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
00771 {
00772     malloc_increase += size;
00773 
00774 #if CALC_EXACT_MALLOC_SIZE
00775     objspace->malloc_params.allocated_size += size;
00776     objspace->malloc_params.allocations++;
00777     ((size_t *)mem)[0] = size;
00778     mem = (size_t *)mem + 1;
00779 #endif
00780 
00781     return mem;
00782 }
00783 
00784 #define TRY_WITH_GC(alloc) do { \
00785         if (!(alloc) && \
00786             (!garbage_collect_with_gvl(objspace) || \
00787              !(alloc))) { \
00788             ruby_memerror(); \
00789         } \
00790     } while (0)
00791 
00792 static void *
00793 vm_xmalloc(rb_objspace_t *objspace, size_t size)
00794 {
00795     void *mem;
00796 
00797     size = vm_malloc_prepare(objspace, size);
00798     TRY_WITH_GC(mem = malloc(size));
00799     return vm_malloc_fixup(objspace, mem, size);
00800 }
00801 
00802 static void *
00803 vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
00804 {
00805     void *mem;
00806 
00807     if ((ssize_t)size < 0) {
00808         negative_size_allocation_error("negative re-allocation size");
00809     }
00810     if (!ptr) return vm_xmalloc(objspace, size);
00811     if (size == 0) {
00812         vm_xfree(objspace, ptr);
00813         return 0;
00814     }
00815     if (ruby_gc_stress && !ruby_disable_gc_stress)
00816         garbage_collect_with_gvl(objspace);
00817 
00818 #if CALC_EXACT_MALLOC_SIZE
00819     size += sizeof(size_t);
00820     objspace->malloc_params.allocated_size -= size;
00821     ptr = (size_t *)ptr - 1;
00822 #endif
00823 
00824     mem = realloc(ptr, size);
00825     if (!mem) {
00826         if (garbage_collect_with_gvl(objspace)) {
00827             mem = realloc(ptr, size);
00828         }
00829         if (!mem) {
00830             ruby_memerror();
00831         }
00832     }
00833     malloc_increase += size;
00834 
00835 #if CALC_EXACT_MALLOC_SIZE
00836     objspace->malloc_params.allocated_size += size;
00837     ((size_t *)mem)[0] = size;
00838     mem = (size_t *)mem + 1;
00839 #endif
00840 
00841     return mem;
00842 }
00843 
00844 static void
00845 vm_xfree(rb_objspace_t *objspace, void *ptr)
00846 {
00847 #if CALC_EXACT_MALLOC_SIZE
00848     size_t size;
00849     ptr = ((size_t *)ptr) - 1;
00850     size = ((size_t*)ptr)[0];
00851     objspace->malloc_params.allocated_size -= size;
00852     objspace->malloc_params.allocations--;
00853 #endif
00854 
00855     free(ptr);
00856 }
00857 
00858 void *
00859 ruby_xmalloc(size_t size)
00860 {
00861     return vm_xmalloc(&rb_objspace, size);
00862 }
00863 
00864 static inline size_t
00865 xmalloc2_size(size_t n, size_t size)
00866 {
00867     size_t len = size * n;
00868     if (n != 0 && size != len / n) {
00869         rb_raise(rb_eArgError, "malloc: possible integer overflow");
00870     }
00871     return len;
00872 }
00873 
00874 void *
00875 ruby_xmalloc2(size_t n, size_t size)
00876 {
00877     return vm_xmalloc(&rb_objspace, xmalloc2_size(n, size));
00878 }
00879 
00880 static void *
00881 vm_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
00882 {
00883     void *mem;
00884     size_t size;
00885 
00886     size = xmalloc2_size(count, elsize);
00887     size = vm_malloc_prepare(objspace, size);
00888 
00889     TRY_WITH_GC(mem = calloc(1, size));
00890     return vm_malloc_fixup(objspace, mem, size);
00891 }
00892 
00893 void *
00894 ruby_xcalloc(size_t n, size_t size)
00895 {
00896     return vm_xcalloc(&rb_objspace, n, size);
00897 }
00898 
00899 void *
00900 ruby_xrealloc(void *ptr, size_t size)
00901 {
00902     return vm_xrealloc(&rb_objspace, ptr, size);
00903 }
00904 
00905 void *
00906 ruby_xrealloc2(void *ptr, size_t n, size_t size)
00907 {
00908     size_t len = size * n;
00909     if (n != 0 && size != len / n) {
00910         rb_raise(rb_eArgError, "realloc: possible integer overflow");
00911     }
00912     return ruby_xrealloc(ptr, len);
00913 }
00914 
00915 void
00916 ruby_xfree(void *x)
00917 {
00918     if (x)
00919         vm_xfree(&rb_objspace, x);
00920 }
00921 
00922 
00923 /*
00924  *  call-seq:
00925  *     GC.enable    -> true or false
00926  *
00927  *  Enables garbage collection, returning <code>true</code> if garbage
00928  *  collection was previously disabled.
00929  *
00930  *     GC.disable   #=> false
00931  *     GC.enable    #=> true
00932  *     GC.enable    #=> false
00933  *
00934  */
00935 
00936 VALUE
00937 rb_gc_enable(void)
00938 {
00939     rb_objspace_t *objspace = &rb_objspace;
00940     int old = dont_gc;
00941 
00942     dont_gc = FALSE;
00943     return old ? Qtrue : Qfalse;
00944 }
00945 
00946 /*
00947  *  call-seq:
00948  *     GC.disable    -> true or false
00949  *
00950  *  Disables garbage collection, returning <code>true</code> if garbage
00951  *  collection was already disabled.
00952  *
00953  *     GC.disable   #=> false
00954  *     GC.disable   #=> true
00955  *
00956  */
00957 
00958 VALUE
00959 rb_gc_disable(void)
00960 {
00961     rb_objspace_t *objspace = &rb_objspace;
00962     int old = dont_gc;
00963 
00964     dont_gc = TRUE;
00965     return old ? Qtrue : Qfalse;
00966 }
00967 
00968 VALUE rb_mGC;
00969 
00970 void
00971 rb_gc_register_mark_object(VALUE obj)
00972 {
00973     VALUE ary = GET_THREAD()->vm->mark_object_ary;
00974     rb_ary_push(ary, obj);
00975 }
00976 
00977 void
00978 rb_gc_register_address(VALUE *addr)
00979 {
00980     rb_objspace_t *objspace = &rb_objspace;
00981     struct gc_list *tmp;
00982 
00983     tmp = ALLOC(struct gc_list);
00984     tmp->next = global_List;
00985     tmp->varptr = addr;
00986     global_List = tmp;
00987 }
00988 
00989 void
00990 rb_gc_unregister_address(VALUE *addr)
00991 {
00992     rb_objspace_t *objspace = &rb_objspace;
00993     struct gc_list *tmp = global_List;
00994 
00995     if (tmp->varptr == addr) {
00996         global_List = tmp->next;
00997         xfree(tmp);
00998         return;
00999     }
01000     while (tmp->next) {
01001         if (tmp->next->varptr == addr) {
01002             struct gc_list *t = tmp->next;
01003 
01004             tmp->next = tmp->next->next;
01005             xfree(t);
01006             break;
01007         }
01008         tmp = tmp->next;
01009     }
01010 }
01011 
01012 
01013 static void
01014 allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
01015 {
01016     struct sorted_heaps_slot *p;
01017     size_t size;
01018 
01019     size = next_heaps_length*sizeof(struct sorted_heaps_slot);
01020 
01021     if (heaps_used > 0) {
01022         p = (struct sorted_heaps_slot *)realloc(objspace->heap.sorted, size);
01023         if (p) objspace->heap.sorted = p;
01024     }
01025     else {
01026         p = objspace->heap.sorted = (struct sorted_heaps_slot *)malloc(size);
01027     }
01028 
01029     if (p == 0) {
01030         during_gc = 0;
01031         rb_memerror();
01032     }
01033     heaps_length = next_heaps_length;
01034 }
01035 
01036 static void
01037 assign_heap_slot(rb_objspace_t *objspace)
01038 {
01039     RVALUE *p, *pend, *membase;
01040     struct heaps_slot *slot;
01041     size_t hi, lo, mid;
01042     size_t objs;
01043 
01044     objs = HEAP_OBJ_LIMIT;
01045     p = (RVALUE*)malloc(HEAP_SIZE);
01046     if (p == 0) {
01047         during_gc = 0;
01048         rb_memerror();
01049     }
01050     slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot));
01051     if (slot == 0) {
01052         xfree(p);
01053         during_gc = 0;
01054         rb_memerror();
01055     }
01056     MEMZERO((void*)slot, struct heaps_slot, 1);
01057 
01058     slot->next = heaps;
01059     if (heaps) heaps->prev = slot;
01060     heaps = slot;
01061 
01062     membase = p;
01063     if ((VALUE)p % sizeof(RVALUE) != 0) {
01064         p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
01065         if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < (size_t)((char*)p - (char*)membase)) {
01066             objs--;
01067         }
01068     }
01069 
01070     lo = 0;
01071     hi = heaps_used;
01072     while (lo < hi) {
01073         register RVALUE *mid_membase;
01074         mid = (lo + hi) / 2;
01075         mid_membase = objspace->heap.sorted[mid].slot->membase;
01076         if (mid_membase < membase) {
01077             lo = mid + 1;
01078         }
01079         else if (mid_membase > membase) {
01080             hi = mid;
01081         }
01082         else {
01083             rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
01084         }
01085     }
01086     if (hi < heaps_used) {
01087         MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct sorted_heaps_slot, heaps_used - hi);
01088     }
01089     objspace->heap.sorted[hi].slot = slot;
01090     objspace->heap.sorted[hi].start = p;
01091     objspace->heap.sorted[hi].end = (p + objs);
01092     heaps->membase = membase;
01093     heaps->slot = p;
01094     heaps->limit = objs;
01095     objspace->heap.free_num += objs;
01096     pend = p + objs;
01097     if (lomem == 0 || lomem > p) lomem = p;
01098     if (himem < pend) himem = pend;
01099     heaps_used++;
01100 
01101     while (p < pend) {
01102         p->as.free.flags = 0;
01103         p->as.free.next = freelist;
01104         freelist = p;
01105         p++;
01106     }
01107 }
01108 
01109 static void
01110 add_heap_slots(rb_objspace_t *objspace, size_t add)
01111 {
01112     size_t i;
01113 
01114     if ((heaps_used + add) > heaps_length) {
01115         allocate_sorted_heaps(objspace, heaps_used + add);
01116     }
01117 
01118     for (i = 0; i < add; i++) {
01119         assign_heap_slot(objspace);
01120     }
01121     heaps_inc = 0;
01122 }
01123 
01124 static void
01125 init_heap(rb_objspace_t *objspace)
01126 {
01127     add_heap_slots(objspace, HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT);
01128     init_mark_stack(&objspace->mark_stack);
01129 #ifdef USE_SIGALTSTACK
01130     {
01131         /* altstack of another threads are allocated in another place */
01132         rb_thread_t *th = GET_THREAD();
01133         void *tmp = th->altstack;
01134         th->altstack = malloc(ALT_STACK_SIZE);
01135         free(tmp); /* free previously allocated area */
01136     }
01137 #endif
01138 
01139     objspace->profile.invoke_time = getrusage_time();
01140     finalizer_table = st_init_numtable();
01141 }
01142 
01143 static void
01144 initial_expand_heap(rb_objspace_t *objspace)
01145 {
01146     size_t min_size = initial_heap_min_slots / HEAP_OBJ_LIMIT;
01147 
01148     if (min_size > heaps_used) {
01149         add_heap_slots(objspace, min_size - heaps_used);
01150     }
01151 }
01152 
01153 static void
01154 set_heaps_increment(rb_objspace_t *objspace)
01155 {
01156     size_t next_heaps_length = (size_t)(heaps_used * 1.8);
01157 
01158     if (next_heaps_length == heaps_used) {
01159         next_heaps_length++;
01160     }
01161 
01162     heaps_inc = next_heaps_length - heaps_used;
01163 
01164     if (next_heaps_length > heaps_length) {
01165         allocate_sorted_heaps(objspace, next_heaps_length);
01166     }
01167 }
01168 
01169 static int
01170 heaps_increment(rb_objspace_t *objspace)
01171 {
01172     if (heaps_inc > 0) {
01173         assign_heap_slot(objspace);
01174         heaps_inc--;
01175         return TRUE;
01176     }
01177     return FALSE;
01178 }
01179 
01180 int
01181 rb_during_gc(void)
01182 {
01183     rb_objspace_t *objspace = &rb_objspace;
01184     return during_gc;
01185 }
01186 
01187 #define RANY(o) ((RVALUE*)(o))
01188 
01189 VALUE
01190 rb_newobj(void)
01191 {
01192     rb_objspace_t *objspace = &rb_objspace;
01193     VALUE obj;
01194 
01195     if (UNLIKELY(during_gc)) {
01196         dont_gc = 1;
01197         during_gc = 0;
01198         rb_bug("object allocation during garbage collection phase");
01199     }
01200 
01201     if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) {
01202         if (!garbage_collect(objspace)) {
01203             during_gc = 0;
01204             rb_memerror();
01205         }
01206     }
01207 
01208     if (UNLIKELY(!freelist)) {
01209         if (!gc_lazy_sweep(objspace)) {
01210             during_gc = 0;
01211             rb_memerror();
01212         }
01213     }
01214 
01215     obj = (VALUE)freelist;
01216     freelist = freelist->as.free.next;
01217 
01218     MEMZERO((void*)obj, RVALUE, 1);
01219 #ifdef GC_DEBUG
01220     RANY(obj)->file = rb_sourcefile();
01221     RANY(obj)->line = rb_sourceline();
01222 #endif
01223     GC_PROF_INC_LIVE_NUM;
01224 
01225     return obj;
01226 }
01227 
01228 NODE*
01229 rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
01230 {
01231     NODE *n = (NODE*)rb_newobj();
01232 
01233     n->flags |= T_NODE;
01234     nd_set_type(n, type);
01235 
01236     n->u1.value = a0;
01237     n->u2.value = a1;
01238     n->u3.value = a2;
01239 
01240     return n;
01241 }
01242 
01243 VALUE
01244 rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
01245 {
01246     NEWOBJ(data, struct RData);
01247     if (klass) Check_Type(klass, T_CLASS);
01248     OBJSETUP(data, klass, T_DATA);
01249     data->data = datap;
01250     data->dfree = dfree;
01251     data->dmark = dmark;
01252 
01253     return (VALUE)data;
01254 }
01255 
01256 VALUE
01257 rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type)
01258 {
01259     NEWOBJ(data, struct RTypedData);
01260 
01261     if (klass) Check_Type(klass, T_CLASS);
01262 
01263     OBJSETUP(data, klass, T_DATA);
01264 
01265     data->data = datap;
01266     data->typed_flag = 1;
01267     data->type = type;
01268 
01269     return (VALUE)data;
01270 }
01271 
01272 size_t
01273 rb_objspace_data_type_memsize(VALUE obj)
01274 {
01275     if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) {
01276         return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj));
01277     }
01278     else {
01279         return 0;
01280     }
01281 }
01282 
01283 const char *
01284 rb_objspace_data_type_name(VALUE obj)
01285 {
01286     if (RTYPEDDATA_P(obj)) {
01287         return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
01288     }
01289     else {
01290         return 0;
01291     }
01292 }
01293 
01294 #ifdef __ia64
01295 #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
01296 #else
01297 #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
01298 #endif
01299 
01300 #define STACK_START (th->machine_stack_start)
01301 #define STACK_END (th->machine_stack_end)
01302 #define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
01303 
01304 #if STACK_GROW_DIRECTION < 0
01305 # define STACK_LENGTH  (size_t)(STACK_START - STACK_END)
01306 #elif STACK_GROW_DIRECTION > 0
01307 # define STACK_LENGTH  (size_t)(STACK_END - STACK_START + 1)
01308 #else
01309 # define STACK_LENGTH  ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
01310                         : (size_t)(STACK_END - STACK_START + 1))
01311 #endif
01312 #if !STACK_GROW_DIRECTION
01313 int ruby_stack_grow_direction;
01314 int
01315 ruby_get_stack_grow_direction(volatile VALUE *addr)
01316 {
01317     VALUE *end;
01318     SET_MACHINE_STACK_END(&end);
01319 
01320     if (end > addr) return ruby_stack_grow_direction = 1;
01321     return ruby_stack_grow_direction = -1;
01322 }
01323 #endif
01324 
01325 /* Marking stack */
01326 
01327 static void push_mark_stack(mark_stack_t *, VALUE);
01328 static int pop_mark_stack(mark_stack_t *, VALUE *);
01329 static void shrink_stack_chunk_cache(mark_stack_t *stack);
01330 
01331 static stack_chunk_t *
01332 stack_chunk_alloc(void)
01333 {
01334     stack_chunk_t *res;
01335 
01336     res = malloc(sizeof(stack_chunk_t));
01337     if (!res)
01338         rb_memerror();
01339 
01340     return res;
01341 }
01342 
01343 static inline int
01344 is_mark_stask_empty(mark_stack_t *stack)
01345 {
01346     return stack->chunk == NULL;
01347 }
01348 
01349 static void
01350 add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
01351 {
01352     chunk->next = stack->cache;
01353     stack->cache = chunk;
01354     stack->cache_size++;
01355 }
01356 
01357 static void
01358 shrink_stack_chunk_cache(mark_stack_t *stack)
01359 {
01360     stack_chunk_t *chunk;
01361 
01362     if (stack->unused_cache_size > (stack->cache_size/2)) {
01363         chunk = stack->cache;
01364         stack->cache = stack->cache->next;
01365         stack->cache_size--;
01366         free(chunk);
01367     }
01368     stack->unused_cache_size = stack->cache_size;
01369 }
01370 
01371 static void
01372 push_mark_stack_chunk(mark_stack_t *stack)
01373 {
01374     stack_chunk_t *next;
01375 
01376     if (stack->cache_size > 0) {
01377         next = stack->cache;
01378         stack->cache = stack->cache->next;
01379         stack->cache_size--;
01380         if (stack->unused_cache_size > stack->cache_size)
01381             stack->unused_cache_size = stack->cache_size;
01382     }
01383     else {
01384         next = stack_chunk_alloc();
01385     }
01386     next->next = stack->chunk;
01387     stack->chunk = next;
01388     stack->index = 0;
01389 }
01390 
01391 static void
01392 pop_mark_stack_chunk(mark_stack_t *stack)
01393 {
01394     stack_chunk_t *prev;
01395 
01396     prev = stack->chunk->next;
01397     add_stack_chunk_cache(stack, stack->chunk);
01398     stack->chunk = prev;
01399     stack->index = stack->limit;
01400 }
01401 
01402 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01403 static void
01404 free_stack_chunks(mark_stack_t *stack)
01405 {
01406     stack_chunk_t *chunk = stack->chunk;
01407     stack_chunk_t *next = NULL;
01408 
01409     while (chunk != NULL) {
01410         next = chunk->next;
01411         free(chunk);
01412         chunk = next;
01413     }
01414 }
01415 #endif
01416 
01417 static void
01418 push_mark_stack(mark_stack_t *stack, VALUE data)
01419 {
01420     if (stack->index == stack->limit) {
01421         push_mark_stack_chunk(stack);
01422     }
01423     stack->chunk->data[stack->index++] = data;
01424 }
01425 
01426 static int
01427 pop_mark_stack(mark_stack_t *stack, VALUE *data)
01428 {
01429     if (is_mark_stask_empty(stack)) {
01430         return FALSE;
01431     }
01432     if (stack->index == 1) {
01433         *data = stack->chunk->data[--stack->index];
01434         pop_mark_stack_chunk(stack);
01435         return TRUE;
01436     }
01437     *data = stack->chunk->data[--stack->index];
01438     return TRUE;
01439 }
01440 
01441 static void
01442 init_mark_stack(mark_stack_t *stack)
01443 {
01444     int i;
01445 
01446     push_mark_stack_chunk(stack);
01447     stack->limit = STACK_CHUNK_SIZE;
01448 
01449     for(i=0; i < 4; i++) {
01450         add_stack_chunk_cache(stack, stack_chunk_alloc());
01451     }
01452     stack->unused_cache_size = stack->cache_size;
01453 }
01454 
01455 
01456 size_t
01457 ruby_stack_length(VALUE **p)
01458 {
01459     rb_thread_t *th = GET_THREAD();
01460     SET_STACK_END;
01461     if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
01462     return STACK_LENGTH;
01463 }
01464 
01465 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
01466 static int
01467 stack_check(int water_mark)
01468 {
01469     int ret;
01470     rb_thread_t *th = GET_THREAD();
01471     SET_STACK_END;
01472     ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark;
01473 #ifdef __ia64
01474     if (!ret) {
01475         ret = (VALUE*)rb_ia64_bsp() - th->machine_register_stack_start >
01476               th->machine_register_stack_maxsize/sizeof(VALUE) - water_mark;
01477     }
01478 #endif
01479     return ret;
01480 }
01481 #endif
01482 
01483 #define STACKFRAME_FOR_CALL_CFUNC 512
01484 
01485 int
01486 ruby_stack_check(void)
01487 {
01488 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
01489     return 0;
01490 #else
01491     return stack_check(STACKFRAME_FOR_CALL_CFUNC);
01492 #endif
01493 }
01494 
01495 #define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack)
01496 
01497 static void gc_mark(rb_objspace_t *objspace, VALUE ptr);
01498 static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
01499 
01500 static void
01501 gc_mark_stacked_objects(rb_objspace_t *objspace)
01502 {
01503     mark_stack_t *mstack = &objspace->mark_stack;
01504     VALUE obj = 0;
01505 
01506     if (!mstack->index) return;
01507     while (pop_mark_stack(mstack, &obj)) {
01508         gc_mark_children(objspace, obj);
01509     }
01510     shrink_stack_chunk_cache(mstack);
01511 }
01512 
01513 static inline int
01514 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
01515 {
01516     register RVALUE *p = RANY(ptr);
01517     register struct sorted_heaps_slot *heap;
01518     register size_t hi, lo, mid;
01519 
01520     if (p < lomem || p > himem) return FALSE;
01521     if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
01522 
01523     /* check if p looks like a pointer using bsearch*/
01524     lo = 0;
01525     hi = heaps_used;
01526     while (lo < hi) {
01527         mid = (lo + hi) / 2;
01528         heap = &objspace->heap.sorted[mid];
01529         if (heap->start <= p) {
01530             if (p < heap->end)
01531                 return TRUE;
01532             lo = mid + 1;
01533         }
01534         else {
01535             hi = mid;
01536         }
01537     }
01538     return FALSE;
01539 }
01540 
01541 static void
01542 mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
01543 {
01544     VALUE v;
01545     while (n--) {
01546         v = *x;
01547         VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
01548         if (is_pointer_to_heap(objspace, (void *)v)) {
01549             gc_mark(objspace, v);
01550         }
01551         x++;
01552     }
01553 }
01554 
01555 static void
01556 gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
01557 {
01558     long n;
01559 
01560     if (end <= start) return;
01561     n = end - start;
01562     mark_locations_array(objspace, start, n);
01563 }
01564 
01565 void
01566 rb_gc_mark_locations(VALUE *start, VALUE *end)
01567 {
01568     gc_mark_locations(&rb_objspace, start, end);
01569 }
01570 
01571 #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end))
01572 
01573 struct mark_tbl_arg {
01574     rb_objspace_t *objspace;
01575 };
01576 
01577 static int
01578 mark_entry(ID key, VALUE value, st_data_t data)
01579 {
01580     struct mark_tbl_arg *arg = (void*)data;
01581     gc_mark(arg->objspace, value);
01582     return ST_CONTINUE;
01583 }
01584 
01585 static void
01586 mark_tbl(rb_objspace_t *objspace, st_table *tbl)
01587 {
01588     struct mark_tbl_arg arg;
01589     if (!tbl || tbl->num_entries == 0) return;
01590     arg.objspace = objspace;
01591     st_foreach(tbl, mark_entry, (st_data_t)&arg);
01592 }
01593 
01594 static int
01595 mark_key(VALUE key, VALUE value, st_data_t data)
01596 {
01597     struct mark_tbl_arg *arg = (void*)data;
01598     gc_mark(arg->objspace, key);
01599     return ST_CONTINUE;
01600 }
01601 
01602 static void
01603 mark_set(rb_objspace_t *objspace, st_table *tbl)
01604 {
01605     struct mark_tbl_arg arg;
01606     if (!tbl) return;
01607     arg.objspace = objspace;
01608     st_foreach(tbl, mark_key, (st_data_t)&arg);
01609 }
01610 
01611 void
01612 rb_mark_set(st_table *tbl)
01613 {
01614     mark_set(&rb_objspace, tbl);
01615 }
01616 
01617 static int
01618 mark_keyvalue(VALUE key, VALUE value, st_data_t data)
01619 {
01620     struct mark_tbl_arg *arg = (void*)data;
01621     gc_mark(arg->objspace, key);
01622     gc_mark(arg->objspace, value);
01623     return ST_CONTINUE;
01624 }
01625 
01626 static void
01627 mark_hash(rb_objspace_t *objspace, st_table *tbl)
01628 {
01629     struct mark_tbl_arg arg;
01630     if (!tbl) return;
01631     arg.objspace = objspace;
01632     st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
01633 }
01634 
01635 void
01636 rb_mark_hash(st_table *tbl)
01637 {
01638     mark_hash(&rb_objspace, tbl);
01639 }
01640 
01641 static void
01642 mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
01643 {
01644     const rb_method_definition_t *def = me->def;
01645 
01646     gc_mark(objspace, me->klass);
01647     if (!def) return;
01648     switch (def->type) {
01649       case VM_METHOD_TYPE_ISEQ:
01650         gc_mark(objspace, def->body.iseq->self);
01651         break;
01652       case VM_METHOD_TYPE_BMETHOD:
01653         gc_mark(objspace, def->body.proc);
01654         break;
01655       case VM_METHOD_TYPE_ATTRSET:
01656       case VM_METHOD_TYPE_IVAR:
01657         gc_mark(objspace, def->body.attr.location);
01658         break;
01659       default:
01660         break; /* ignore */
01661     }
01662 }
01663 
01664 void
01665 rb_mark_method_entry(const rb_method_entry_t *me)
01666 {
01667     mark_method_entry(&rb_objspace, me);
01668 }
01669 
01670 static int
01671 mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
01672 {
01673     struct mark_tbl_arg *arg = (void*)data;
01674     mark_method_entry(arg->objspace, me);
01675     return ST_CONTINUE;
01676 }
01677 
01678 static void
01679 mark_m_tbl(rb_objspace_t *objspace, st_table *tbl)
01680 {
01681     struct mark_tbl_arg arg;
01682     if (!tbl) return;
01683     arg.objspace = objspace;
01684     st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg);
01685 }
01686 
01687 static int
01688 free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
01689 {
01690     if (!me->mark) {
01691         rb_free_method_entry(me);
01692     }
01693     return ST_CONTINUE;
01694 }
01695 
01696 void
01697 rb_free_m_table(st_table *tbl)
01698 {
01699     st_foreach(tbl, free_method_entry_i, 0);
01700     st_free_table(tbl);
01701 }
01702 
01703 static int
01704 mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data)
01705 {
01706     struct mark_tbl_arg *arg = (void*)data;
01707     gc_mark(arg->objspace, ce->value);
01708     return ST_CONTINUE;
01709 }
01710 
01711 static void
01712 mark_const_tbl(rb_objspace_t *objspace, st_table *tbl)
01713 {
01714     struct mark_tbl_arg arg;
01715     if (!tbl) return;
01716     arg.objspace = objspace;
01717     st_foreach(tbl, mark_const_entry_i, (st_data_t)&arg);
01718 }
01719 
01720 static int
01721 free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data)
01722 {
01723     xfree(ce);
01724     return ST_CONTINUE;
01725 }
01726 
01727 void
01728 rb_free_const_table(st_table *tbl)
01729 {
01730     st_foreach(tbl, free_const_entry_i, 0);
01731     st_free_table(tbl);
01732 }
01733 
01734 void
01735 rb_mark_tbl(st_table *tbl)
01736 {
01737     mark_tbl(&rb_objspace, tbl);
01738 }
01739 
01740 void
01741 rb_gc_mark_maybe(VALUE obj)
01742 {
01743     if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
01744         gc_mark(&rb_objspace, obj);
01745     }
01746 }
01747 
01748 static void
01749 gc_mark(rb_objspace_t *objspace, VALUE ptr)
01750 {
01751     register RVALUE *obj;
01752 
01753     obj = RANY(ptr);
01754     if (rb_special_const_p(ptr)) return; /* special const not marked */
01755     if (obj->as.basic.flags == 0) return;       /* free cell */
01756     if (obj->as.basic.flags & FL_MARK) return;  /* already marked */
01757     obj->as.basic.flags |= FL_MARK;
01758     objspace->heap.live_num++;
01759 
01760     push_mark_stack(&objspace->mark_stack, ptr);
01761 }
01762 
01763 void
01764 rb_gc_mark(VALUE ptr)
01765 {
01766     gc_mark(&rb_objspace, ptr);
01767 }
01768 
01769 static void
01770 gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
01771 {
01772     register RVALUE *obj = RANY(ptr);
01773 
01774     goto marking;               /* skip */
01775 
01776   again:
01777     obj = RANY(ptr);
01778     if (rb_special_const_p(ptr)) return; /* special const not marked */
01779     if (obj->as.basic.flags == 0) return;       /* free cell */
01780     if (obj->as.basic.flags & FL_MARK) return;  /* already marked */
01781     obj->as.basic.flags |= FL_MARK;
01782     objspace->heap.live_num++;
01783 
01784   marking:
01785     if (FL_TEST(obj, FL_EXIVAR)) {
01786         rb_mark_generic_ivar(ptr);
01787     }
01788 
01789     switch (BUILTIN_TYPE(obj)) {
01790       case T_NIL:
01791       case T_FIXNUM:
01792         rb_bug("rb_gc_mark() called for broken object");
01793         break;
01794 
01795       case T_NODE:
01796         switch (nd_type(obj)) {
01797           case NODE_IF:         /* 1,2,3 */
01798           case NODE_FOR:
01799           case NODE_ITER:
01800           case NODE_WHEN:
01801           case NODE_MASGN:
01802           case NODE_RESCUE:
01803           case NODE_RESBODY:
01804           case NODE_CLASS:
01805           case NODE_BLOCK_PASS:
01806             gc_mark(objspace, (VALUE)obj->as.node.u2.node);
01807             /* fall through */
01808           case NODE_BLOCK:      /* 1,3 */
01809           case NODE_OPTBLOCK:
01810           case NODE_ARRAY:
01811           case NODE_DSTR:
01812           case NODE_DXSTR:
01813           case NODE_DREGX:
01814           case NODE_DREGX_ONCE:
01815           case NODE_ENSURE:
01816           case NODE_CALL:
01817           case NODE_DEFS:
01818           case NODE_OP_ASGN1:
01819           case NODE_ARGS:
01820             gc_mark(objspace, (VALUE)obj->as.node.u1.node);
01821             /* fall through */
01822           case NODE_SUPER:      /* 3 */
01823           case NODE_FCALL:
01824           case NODE_DEFN:
01825           case NODE_ARGS_AUX:
01826             ptr = (VALUE)obj->as.node.u3.node;
01827             goto again;
01828 
01829           case NODE_WHILE:      /* 1,2 */
01830           case NODE_UNTIL:
01831           case NODE_AND:
01832           case NODE_OR:
01833           case NODE_CASE:
01834           case NODE_SCLASS:
01835           case NODE_DOT2:
01836           case NODE_DOT3:
01837           case NODE_FLIP2:
01838           case NODE_FLIP3:
01839           case NODE_MATCH2:
01840           case NODE_MATCH3:
01841           case NODE_OP_ASGN_OR:
01842           case NODE_OP_ASGN_AND:
01843           case NODE_MODULE:
01844           case NODE_ALIAS:
01845           case NODE_VALIAS:
01846           case NODE_ARGSCAT:
01847             gc_mark(objspace, (VALUE)obj->as.node.u1.node);
01848             /* fall through */
01849           case NODE_GASGN:      /* 2 */
01850           case NODE_LASGN:
01851           case NODE_DASGN:
01852           case NODE_DASGN_CURR:
01853           case NODE_IASGN:
01854           case NODE_IASGN2:
01855           case NODE_CVASGN:
01856           case NODE_COLON3:
01857           case NODE_OPT_N:
01858           case NODE_EVSTR:
01859           case NODE_UNDEF:
01860           case NODE_POSTEXE:
01861             ptr = (VALUE)obj->as.node.u2.node;
01862             goto again;
01863 
01864           case NODE_HASH:       /* 1 */
01865           case NODE_LIT:
01866           case NODE_STR:
01867           case NODE_XSTR:
01868           case NODE_DEFINED:
01869           case NODE_MATCH:
01870           case NODE_RETURN:
01871           case NODE_BREAK:
01872           case NODE_NEXT:
01873           case NODE_YIELD:
01874           case NODE_COLON2:
01875           case NODE_SPLAT:
01876           case NODE_TO_ARY:
01877             ptr = (VALUE)obj->as.node.u1.node;
01878             goto again;
01879 
01880           case NODE_SCOPE:      /* 2,3 */
01881           case NODE_CDECL:
01882           case NODE_OPT_ARG:
01883             gc_mark(objspace, (VALUE)obj->as.node.u3.node);
01884             ptr = (VALUE)obj->as.node.u2.node;
01885             goto again;
01886 
01887           case NODE_ZARRAY:     /* - */
01888           case NODE_ZSUPER:
01889           case NODE_VCALL:
01890           case NODE_GVAR:
01891           case NODE_LVAR:
01892           case NODE_DVAR:
01893           case NODE_IVAR:
01894           case NODE_CVAR:
01895           case NODE_NTH_REF:
01896           case NODE_BACK_REF:
01897           case NODE_REDO:
01898           case NODE_RETRY:
01899           case NODE_SELF:
01900           case NODE_NIL:
01901           case NODE_TRUE:
01902           case NODE_FALSE:
01903           case NODE_ERRINFO:
01904           case NODE_BLOCK_ARG:
01905             break;
01906           case NODE_ALLOCA:
01907             mark_locations_array(objspace,
01908                                  (VALUE*)obj->as.node.u1.value,
01909                                  obj->as.node.u3.cnt);
01910             ptr = (VALUE)obj->as.node.u2.node;
01911             goto again;
01912 
01913           default:              /* unlisted NODE */
01914             if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
01915                 gc_mark(objspace, (VALUE)obj->as.node.u1.node);
01916             }
01917             if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
01918                 gc_mark(objspace, (VALUE)obj->as.node.u2.node);
01919             }
01920             if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
01921                 gc_mark(objspace, (VALUE)obj->as.node.u3.node);
01922             }
01923         }
01924         return;                 /* no need to mark class. */
01925     }
01926 
01927     gc_mark(objspace, obj->as.basic.klass);
01928     switch (BUILTIN_TYPE(obj)) {
01929       case T_ICLASS:
01930       case T_CLASS:
01931       case T_MODULE:
01932         mark_m_tbl(objspace, RCLASS_M_TBL(obj));
01933         mark_tbl(objspace, RCLASS_IV_TBL(obj));
01934         mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
01935         ptr = RCLASS_SUPER(obj);
01936         goto again;
01937 
01938       case T_ARRAY:
01939         if (FL_TEST(obj, ELTS_SHARED)) {
01940             ptr = obj->as.array.as.heap.aux.shared;
01941             goto again;
01942         }
01943         else {
01944             long i, len = RARRAY_LEN(obj);
01945             VALUE *ptr = RARRAY_PTR(obj);
01946             for (i=0; i < len; i++) {
01947                 gc_mark(objspace, *ptr++);
01948             }
01949         }
01950         break;
01951 
01952       case T_HASH:
01953         mark_hash(objspace, obj->as.hash.ntbl);
01954         ptr = obj->as.hash.ifnone;
01955         goto again;
01956 
01957       case T_STRING:
01958 #define STR_ASSOC FL_USER3   /* copied from string.c */
01959         if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
01960             ptr = obj->as.string.as.heap.aux.shared;
01961             goto again;
01962         }
01963         break;
01964 
01965       case T_DATA:
01966         if (RTYPEDDATA_P(obj)) {
01967             RUBY_DATA_FUNC mark_func = obj->as.typeddata.type->function.dmark;
01968             if (mark_func) (*mark_func)(DATA_PTR(obj));
01969         }
01970         else {
01971             if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
01972         }
01973         break;
01974 
01975       case T_OBJECT:
01976         {
01977             long i, len = ROBJECT_NUMIV(obj);
01978             VALUE *ptr = ROBJECT_IVPTR(obj);
01979             for (i  = 0; i < len; i++) {
01980                 gc_mark(objspace, *ptr++);
01981             }
01982         }
01983         break;
01984 
01985       case T_FILE:
01986         if (obj->as.file.fptr) {
01987             gc_mark(objspace, obj->as.file.fptr->pathv);
01988             gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing);
01989             gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat);
01990             gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts);
01991             gc_mark(objspace, obj->as.file.fptr->encs.ecopts);
01992             gc_mark(objspace, obj->as.file.fptr->write_lock);
01993         }
01994         break;
01995 
01996       case T_REGEXP:
01997         ptr = obj->as.regexp.src;
01998         goto again;
01999 
02000       case T_FLOAT:
02001       case T_BIGNUM:
02002       case T_ZOMBIE:
02003         break;
02004 
02005       case T_MATCH:
02006         gc_mark(objspace, obj->as.match.regexp);
02007         if (obj->as.match.str) {
02008             ptr = obj->as.match.str;
02009             goto again;
02010         }
02011         break;
02012 
02013       case T_RATIONAL:
02014         gc_mark(objspace, obj->as.rational.num);
02015         ptr = obj->as.rational.den;
02016         goto again;
02017 
02018       case T_COMPLEX:
02019         gc_mark(objspace, obj->as.complex.real);
02020         ptr = obj->as.complex.imag;
02021         goto again;
02022 
02023       case T_STRUCT:
02024         {
02025             long len = RSTRUCT_LEN(obj);
02026             VALUE *ptr = RSTRUCT_PTR(obj);
02027 
02028             while (len--) {
02029                 gc_mark(objspace, *ptr++);
02030             }
02031         }
02032         break;
02033 
02034       default:
02035         rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
02036                BUILTIN_TYPE(obj), (void *)obj,
02037                is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
02038     }
02039 }
02040 
02041 static int obj_free(rb_objspace_t *, VALUE);
02042 
02043 static inline void
02044 add_freelist(rb_objspace_t *objspace, RVALUE *p)
02045 {
02046     VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
02047     p->as.free.flags = 0;
02048     p->as.free.next = freelist;
02049     freelist = p;
02050 }
02051 
02052 static void
02053 finalize_list(rb_objspace_t *objspace, RVALUE *p)
02054 {
02055     while (p) {
02056         RVALUE *tmp = p->as.free.next;
02057         run_final(objspace, (VALUE)p);
02058         if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
02059             if (objspace->heap.sweep_slots) {
02060                 p->as.free.flags = 0;
02061             }
02062             else {
02063                 GC_PROF_DEC_LIVE_NUM;
02064                 add_freelist(objspace, p);
02065             }
02066         }
02067         else {
02068             struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark;
02069             slot->limit--;
02070         }
02071         p = tmp;
02072     }
02073 }
02074 
02075 static void
02076 unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
02077 {
02078     if (slot->prev)
02079         slot->prev->next = slot->next;
02080     if (slot->next)
02081         slot->next->prev = slot->prev;
02082     if (heaps == slot)
02083         heaps = slot->next;
02084     if (objspace->heap.sweep_slots == slot)
02085         objspace->heap.sweep_slots = slot->next;
02086     slot->prev = NULL;
02087     slot->next = NULL;
02088 }
02089 
02090 
02091 static void
02092 free_unused_heaps(rb_objspace_t *objspace)
02093 {
02094     size_t i, j;
02095     RVALUE *last = 0;
02096 
02097     for (i = j = 1; j < heaps_used; i++) {
02098         if (objspace->heap.sorted[i].slot->limit == 0) {
02099             if (!last) {
02100                 last = objspace->heap.sorted[i].slot->membase;
02101             }
02102             else {
02103                 free(objspace->heap.sorted[i].slot->membase);
02104             }
02105             free(objspace->heap.sorted[i].slot);
02106             heaps_used--;
02107         }
02108         else {
02109             if (i != j) {
02110                 objspace->heap.sorted[j] = objspace->heap.sorted[i];
02111             }
02112             j++;
02113         }
02114     }
02115     if (last) {
02116         if (last < heaps_freed) {
02117             free(heaps_freed);
02118             heaps_freed = last;
02119         }
02120         else {
02121             free(last);
02122         }
02123     }
02124 }
02125 
02126 static void
02127 slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
02128 {
02129     size_t free_num = 0, final_num = 0;
02130     RVALUE *p, *pend;
02131     RVALUE *free = freelist, *final = deferred_final_list;
02132     int deferred;
02133 
02134     p = sweep_slot->slot; pend = p + sweep_slot->limit;
02135     while (p < pend) {
02136         if (!(p->as.basic.flags & FL_MARK)) {
02137             if (p->as.basic.flags &&
02138                 ((deferred = obj_free(objspace, (VALUE)p)) ||
02139                  (FL_TEST(p, FL_FINALIZE)))) {
02140                 if (!deferred) {
02141                     p->as.free.flags = T_ZOMBIE;
02142                     RDATA(p)->dfree = 0;
02143                 }
02144                 p->as.free.flags |= FL_MARK;
02145                 p->as.free.next = deferred_final_list;
02146                 deferred_final_list = p;
02147                 final_num++;
02148             }
02149             else {
02150                 add_freelist(objspace, p);
02151                 free_num++;
02152             }
02153         }
02154         else if (BUILTIN_TYPE(p) == T_ZOMBIE) {
02155             /* objects to be finalized */
02156             /* do nothing remain marked */
02157         }
02158         else {
02159             RBASIC(p)->flags &= ~FL_MARK;
02160         }
02161         p++;
02162     }
02163     if (final_num + free_num == sweep_slot->limit &&
02164         objspace->heap.free_num > objspace->heap.do_heap_free) {
02165         RVALUE *pp;
02166 
02167         for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
02168             RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot;
02169             pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
02170         }
02171         sweep_slot->limit = final_num;
02172         freelist = free;        /* cancel this page from freelist */
02173         unlink_heap_slot(objspace, sweep_slot);
02174     }
02175     else {
02176         objspace->heap.free_num += free_num;
02177     }
02178     objspace->heap.final_num += final_num;
02179 
02180     if (deferred_final_list && !finalizing) {
02181         rb_thread_t *th = GET_THREAD();
02182         if (th) {
02183             RUBY_VM_SET_FINALIZER_INTERRUPT(th);
02184         }
02185     }
02186 }
02187 
02188 static int
02189 ready_to_gc(rb_objspace_t *objspace)
02190 {
02191     if (dont_gc || during_gc) {
02192         if (!freelist) {
02193             if (!heaps_increment(objspace)) {
02194                 set_heaps_increment(objspace);
02195                 heaps_increment(objspace);
02196             }
02197         }
02198         return FALSE;
02199     }
02200     return TRUE;
02201 }
02202 
02203 static void
02204 before_gc_sweep(rb_objspace_t *objspace)
02205 {
02206     freelist = 0;
02207     objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
02208     objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT)  * 0.2);
02209     if (objspace->heap.free_min < initial_free_min) {
02210         objspace->heap.do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
02211         objspace->heap.free_min = initial_free_min;
02212     }
02213     objspace->heap.sweep_slots = heaps;
02214     objspace->heap.free_num = 0;
02215 
02216     /* sweep unlinked method entries */
02217     if (GET_VM()->unlinked_method_entry_list) {
02218         rb_sweep_method_entry(GET_VM());
02219     }
02220 }
02221 
02222 static void
02223 after_gc_sweep(rb_objspace_t *objspace)
02224 {
02225     GC_PROF_SET_MALLOC_INFO;
02226 
02227     if (objspace->heap.free_num < objspace->heap.free_min) {
02228         set_heaps_increment(objspace);
02229         heaps_increment(objspace);
02230     }
02231 
02232     if (malloc_increase > malloc_limit) {
02233         malloc_limit += (size_t)((malloc_increase - malloc_limit) * (double)objspace->heap.live_num / (heaps_used * HEAP_OBJ_LIMIT));
02234         if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit;
02235     }
02236     malloc_increase = 0;
02237 
02238     free_unused_heaps(objspace);
02239 }
02240 
02241 static int
02242 lazy_sweep(rb_objspace_t *objspace)
02243 {
02244     struct heaps_slot *next;
02245 
02246     heaps_increment(objspace);
02247     while (objspace->heap.sweep_slots) {
02248         next = objspace->heap.sweep_slots->next;
02249         slot_sweep(objspace, objspace->heap.sweep_slots);
02250         objspace->heap.sweep_slots = next;
02251         if (freelist) {
02252             during_gc = 0;
02253             return TRUE;
02254         }
02255     }
02256     return FALSE;
02257 }
02258 
02259 static void
02260 rest_sweep(rb_objspace_t *objspace)
02261 {
02262     if (objspace->heap.sweep_slots) {
02263        while (objspace->heap.sweep_slots) {
02264            lazy_sweep(objspace);
02265        }
02266        after_gc_sweep(objspace);
02267     }
02268 }
02269 
02270 static void gc_marks(rb_objspace_t *objspace);
02271 
02272 static int
02273 gc_lazy_sweep(rb_objspace_t *objspace)
02274 {
02275     int res;
02276     INIT_GC_PROF_PARAMS;
02277 
02278     if (objspace->flags.dont_lazy_sweep)
02279         return garbage_collect(objspace);
02280 
02281 
02282     if (!ready_to_gc(objspace)) return TRUE;
02283 
02284     during_gc++;
02285     GC_PROF_TIMER_START;
02286     GC_PROF_SWEEP_TIMER_START;
02287 
02288     if (objspace->heap.sweep_slots) {
02289         res = lazy_sweep(objspace);
02290         if (res) {
02291             GC_PROF_SWEEP_TIMER_STOP;
02292             GC_PROF_SET_MALLOC_INFO;
02293             GC_PROF_TIMER_STOP(Qfalse);
02294             return res;
02295         }
02296         after_gc_sweep(objspace);
02297     }
02298     else {
02299         if (heaps_increment(objspace)) {
02300             during_gc = 0;
02301             return TRUE;
02302         }
02303     }
02304 
02305     gc_marks(objspace);
02306 
02307     before_gc_sweep(objspace);
02308     if (objspace->heap.free_min > (heaps_used * HEAP_OBJ_LIMIT - objspace->heap.live_num)) {
02309         set_heaps_increment(objspace);
02310     }
02311 
02312     GC_PROF_SWEEP_TIMER_START;
02313     if(!(res = lazy_sweep(objspace))) {
02314         after_gc_sweep(objspace);
02315         if(freelist) {
02316             res = TRUE;
02317             during_gc = 0;
02318         }
02319     }
02320     GC_PROF_SWEEP_TIMER_STOP;
02321 
02322     GC_PROF_TIMER_STOP(Qtrue);
02323     return res;
02324 }
02325 
02326 static void
02327 gc_sweep(rb_objspace_t *objspace)
02328 {
02329     struct heaps_slot *next;
02330 
02331     before_gc_sweep(objspace);
02332 
02333     while (objspace->heap.sweep_slots) {
02334         next = objspace->heap.sweep_slots->next;
02335         slot_sweep(objspace, objspace->heap.sweep_slots);
02336         objspace->heap.sweep_slots = next;
02337     }
02338 
02339     after_gc_sweep(objspace);
02340 
02341     during_gc = 0;
02342 }
02343 
02344 void
02345 rb_gc_force_recycle(VALUE p)
02346 {
02347     rb_objspace_t *objspace = &rb_objspace;
02348     GC_PROF_DEC_LIVE_NUM;
02349     if (RBASIC(p)->flags & FL_MARK) {
02350         RANY(p)->as.free.flags = 0;
02351     }
02352     else {
02353         add_freelist(objspace, (RVALUE *)p);
02354     }
02355 }
02356 
02357 static inline void
02358 make_deferred(RVALUE *p)
02359 {
02360     p->as.basic.flags = (p->as.basic.flags & ~T_MASK) | T_ZOMBIE;
02361 }
02362 
02363 static inline void
02364 make_io_deferred(RVALUE *p)
02365 {
02366     rb_io_t *fptr = p->as.file.fptr;
02367     make_deferred(p);
02368     p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
02369     p->as.data.data = fptr;
02370 }
02371 
02372 static int
02373 obj_free(rb_objspace_t *objspace, VALUE obj)
02374 {
02375     switch (BUILTIN_TYPE(obj)) {
02376       case T_NIL:
02377       case T_FIXNUM:
02378       case T_TRUE:
02379       case T_FALSE:
02380         rb_bug("obj_free() called for broken object");
02381         break;
02382     }
02383 
02384     if (FL_TEST(obj, FL_EXIVAR)) {
02385         rb_free_generic_ivar((VALUE)obj);
02386         FL_UNSET(obj, FL_EXIVAR);
02387     }
02388 
02389     switch (BUILTIN_TYPE(obj)) {
02390       case T_OBJECT:
02391         if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
02392             RANY(obj)->as.object.as.heap.ivptr) {
02393             xfree(RANY(obj)->as.object.as.heap.ivptr);
02394         }
02395         break;
02396       case T_MODULE:
02397       case T_CLASS:
02398         rb_clear_cache_by_class((VALUE)obj);
02399         rb_free_m_table(RCLASS_M_TBL(obj));
02400         if (RCLASS_IV_TBL(obj)) {
02401             st_free_table(RCLASS_IV_TBL(obj));
02402         }
02403         if (RCLASS_CONST_TBL(obj)) {
02404             rb_free_const_table(RCLASS_CONST_TBL(obj));
02405         }
02406         if (RCLASS_IV_INDEX_TBL(obj)) {
02407             st_free_table(RCLASS_IV_INDEX_TBL(obj));
02408         }
02409         xfree(RANY(obj)->as.klass.ptr);
02410         break;
02411       case T_STRING:
02412         rb_str_free(obj);
02413         break;
02414       case T_ARRAY:
02415         rb_ary_free(obj);
02416         break;
02417       case T_HASH:
02418         if (RANY(obj)->as.hash.ntbl) {
02419             st_free_table(RANY(obj)->as.hash.ntbl);
02420         }
02421         break;
02422       case T_REGEXP:
02423         if (RANY(obj)->as.regexp.ptr) {
02424             onig_free(RANY(obj)->as.regexp.ptr);
02425         }
02426         break;
02427       case T_DATA:
02428         if (DATA_PTR(obj)) {
02429             if (RTYPEDDATA_P(obj)) {
02430                 RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree;
02431             }
02432             if (RANY(obj)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
02433                 xfree(DATA_PTR(obj));
02434             }
02435             else if (RANY(obj)->as.data.dfree) {
02436                 make_deferred(RANY(obj));
02437                 return 1;
02438             }
02439         }
02440         break;
02441       case T_MATCH:
02442         if (RANY(obj)->as.match.rmatch) {
02443             struct rmatch *rm = RANY(obj)->as.match.rmatch;
02444             onig_region_free(&rm->regs, 0);
02445             if (rm->char_offset)
02446                 xfree(rm->char_offset);
02447             xfree(rm);
02448         }
02449         break;
02450       case T_FILE:
02451         if (RANY(obj)->as.file.fptr) {
02452             make_io_deferred(RANY(obj));
02453             return 1;
02454         }
02455         break;
02456       case T_RATIONAL:
02457       case T_COMPLEX:
02458         break;
02459       case T_ICLASS:
02460         /* iClass shares table with the module */
02461         xfree(RANY(obj)->as.klass.ptr);
02462         break;
02463 
02464       case T_FLOAT:
02465         break;
02466 
02467       case T_BIGNUM:
02468         if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
02469             xfree(RBIGNUM_DIGITS(obj));
02470         }
02471         break;
02472       case T_NODE:
02473         switch (nd_type(obj)) {
02474           case NODE_SCOPE:
02475             if (RANY(obj)->as.node.u1.tbl) {
02476                 xfree(RANY(obj)->as.node.u1.tbl);
02477             }
02478             break;
02479           case NODE_ALLOCA:
02480             xfree(RANY(obj)->as.node.u1.node);
02481             break;
02482         }
02483         break;                  /* no need to free iv_tbl */
02484 
02485       case T_STRUCT:
02486         if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
02487             RANY(obj)->as.rstruct.as.heap.ptr) {
02488             xfree(RANY(obj)->as.rstruct.as.heap.ptr);
02489         }
02490         break;
02491 
02492       default:
02493         rb_bug("gc_sweep(): unknown data type 0x%x(%p)",
02494                BUILTIN_TYPE(obj), (void*)obj);
02495     }
02496 
02497     return 0;
02498 }
02499 
02500 #define GC_NOTIFY 0
02501 
02502 #if STACK_GROW_DIRECTION < 0
02503 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
02504 #elif STACK_GROW_DIRECTION > 0
02505 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
02506 #else
02507 #define GET_STACK_BOUNDS(start, end, appendix) \
02508     ((STACK_END < STACK_START) ? \
02509      ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
02510 #endif
02511 
02512 #define numberof(array) (int)(sizeof(array) / sizeof((array)[0]))
02513 
02514 static void
02515 mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
02516 {
02517     union {
02518         rb_jmp_buf j;
02519         VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
02520     } save_regs_gc_mark;
02521     VALUE *stack_start, *stack_end;
02522 
02523     FLUSH_REGISTER_WINDOWS;
02524     /* This assumes that all registers are saved into the jmp_buf (and stack) */
02525     rb_setjmp(save_regs_gc_mark.j);
02526 
02527     SET_STACK_END;
02528     GET_STACK_BOUNDS(stack_start, stack_end, 1);
02529 
02530     mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
02531 
02532     rb_gc_mark_locations(stack_start, stack_end);
02533 #ifdef __ia64
02534     rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
02535 #endif
02536 #if defined(__mc68000__)
02537     mark_locations_array(objspace, (VALUE*)((char*)STACK_END + 2),
02538                          (STACK_START - STACK_END));
02539 #endif
02540 }
02541 
02542 static void
02543 gc_marks(rb_objspace_t *objspace)
02544 {
02545     struct gc_list *list;
02546     rb_thread_t *th = GET_THREAD();
02547     GC_PROF_MARK_TIMER_START;
02548 
02549     objspace->heap.live_num = 0;
02550     objspace->count++;
02551 
02552 
02553     SET_STACK_END;
02554 
02555     th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
02556 
02557     mark_tbl(objspace, finalizer_table);
02558     mark_current_machine_context(objspace, th);
02559 
02560     rb_gc_mark_symbols();
02561     rb_gc_mark_encodings();
02562 
02563     /* mark protected global variables */
02564     for (list = global_List; list; list = list->next) {
02565         rb_gc_mark_maybe(*list->varptr);
02566     }
02567     rb_mark_end_proc();
02568     rb_gc_mark_global_tbl();
02569 
02570     mark_tbl(objspace, rb_class_tbl);
02571 
02572     /* mark generic instance variables for special constants */
02573     rb_mark_generic_ivar_tbl();
02574 
02575     rb_gc_mark_parser();
02576 
02577     rb_gc_mark_unlinked_live_method_entries(th->vm);
02578 
02579     /* marking-loop */
02580     gc_mark_stacked_objects(objspace);
02581 
02582     GC_PROF_MARK_TIMER_STOP;
02583 }
02584 
02585 static int
02586 garbage_collect(rb_objspace_t *objspace)
02587 {
02588     INIT_GC_PROF_PARAMS;
02589 
02590     if (GC_NOTIFY) printf("start garbage_collect()\n");
02591 
02592     if (!heaps) {
02593         return FALSE;
02594     }
02595     if (!ready_to_gc(objspace)) {
02596         return TRUE;
02597     }
02598 
02599     GC_PROF_TIMER_START;
02600 
02601     rest_sweep(objspace);
02602 
02603     during_gc++;
02604     gc_marks(objspace);
02605 
02606     GC_PROF_SWEEP_TIMER_START;
02607     gc_sweep(objspace);
02608     GC_PROF_SWEEP_TIMER_STOP;
02609 
02610     GC_PROF_TIMER_STOP(Qtrue);
02611     if (GC_NOTIFY) printf("end garbage_collect()\n");
02612     return TRUE;
02613 }
02614 
02615 int
02616 rb_garbage_collect(void)
02617 {
02618     return garbage_collect(&rb_objspace);
02619 }
02620 
02621 void
02622 rb_gc_mark_machine_stack(rb_thread_t *th)
02623 {
02624     rb_objspace_t *objspace = &rb_objspace;
02625     VALUE *stack_start, *stack_end;
02626 
02627     GET_STACK_BOUNDS(stack_start, stack_end, 0);
02628     rb_gc_mark_locations(stack_start, stack_end);
02629 #ifdef __ia64
02630     rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
02631 #endif
02632 }
02633 
02634 
02635 /*
02636  *  call-seq:
02637  *     GC.start                     -> nil
02638  *     gc.garbage_collect           -> nil
02639  *     ObjectSpace.garbage_collect  -> nil
02640  *
02641  *  Initiates garbage collection, unless manually disabled.
02642  *
02643  */
02644 
02645 VALUE
02646 rb_gc_start(void)
02647 {
02648     rb_gc();
02649     return Qnil;
02650 }
02651 
02652 #undef Init_stack
02653 
02654 void
02655 Init_stack(volatile VALUE *addr)
02656 {
02657     ruby_init_stack(addr);
02658 }
02659 
02660 /*
02661  * Document-class: ObjectSpace
02662  *
02663  *  The <code>ObjectSpace</code> module contains a number of routines
02664  *  that interact with the garbage collection facility and allow you to
02665  *  traverse all living objects with an iterator.
02666  *
02667  *  <code>ObjectSpace</code> also provides support for object
02668  *  finalizers, procs that will be called when a specific object is
02669  *  about to be destroyed by garbage collection.
02670  *
02671  *     include ObjectSpace
02672  *
02673  *
02674  *     a = "A"
02675  *     b = "B"
02676  *     c = "C"
02677  *
02678  *
02679  *     define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
02680  *     define_finalizer(a, proc {|id| puts "Finalizer two on #{id}" })
02681  *     define_finalizer(b, proc {|id| puts "Finalizer three on #{id}" })
02682  *
02683  *  <em>produces:</em>
02684  *
02685  *     Finalizer three on 537763470
02686  *     Finalizer one on 537763480
02687  *     Finalizer two on 537763480
02688  *
02689  */
02690 
02691 void
02692 Init_heap(void)
02693 {
02694     init_heap(&rb_objspace);
02695 }
02696 
02697 static VALUE
02698 lazy_sweep_enable(void)
02699 {
02700     rb_objspace_t *objspace = &rb_objspace;
02701 
02702     objspace->flags.dont_lazy_sweep = FALSE;
02703     return Qnil;
02704 }
02705 
02706 typedef int each_obj_callback(void *, void *, size_t, void *);
02707 
02708 struct each_obj_args {
02709     each_obj_callback *callback;
02710     void *data;
02711 };
02712 
02713 static VALUE
02714 objspace_each_objects(VALUE arg)
02715 {
02716     size_t i;
02717     RVALUE *membase = 0;
02718     RVALUE *pstart, *pend;
02719     rb_objspace_t *objspace = &rb_objspace;
02720     struct each_obj_args *args = (struct each_obj_args *)arg;
02721     volatile VALUE v;
02722 
02723     i = 0;
02724     while (i < heaps_used) {
02725         while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1].slot->membase)
02726             i--;
02727         while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i].slot->membase <= (uintptr_t)membase)
02728             i++;
02729         if (heaps_used <= i)
02730           break;
02731         membase = objspace->heap.sorted[i].slot->membase;
02732 
02733         pstart = objspace->heap.sorted[i].slot->slot;
02734         pend = pstart + objspace->heap.sorted[i].slot->limit;
02735 
02736         for (; pstart != pend; pstart++) {
02737             if (pstart->as.basic.flags) {
02738                 v = (VALUE)pstart; /* acquire to save this object */
02739                 break;
02740             }
02741         }
02742         if (pstart != pend) {
02743             if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) {
02744                 break;
02745             }
02746         }
02747     }
02748 
02749     return Qnil;
02750 }
02751 
02752 /*
02753  * rb_objspace_each_objects() is special C API to walk through
02754  * Ruby object space.  This C API is too difficult to use it.
02755  * To be frank, you should not use it. Or you need to read the
02756  * source code of this function and understand what this function does.
02757  *
02758  * 'callback' will be called several times (the number of heap slot,
02759  * at current implementation) with:
02760  *   vstart: a pointer to the first living object of the heap_slot.
02761  *   vend: a pointer to next to the valid heap_slot area.
02762  *   stride: a distance to next VALUE.
02763  *
02764  * If callback() returns non-zero, the iteration will be stopped.
02765  *
02766  * This is a sample callback code to iterate liveness objects:
02767  *
02768  *   int
02769  *   sample_callback(void *vstart, void *vend, int stride, void *data) {
02770  *     VALUE v = (VALUE)vstart;
02771  *     for (; v != (VALUE)vend; v += stride) {
02772  *       if (RBASIC(v)->flags) { // liveness check
02773  *       // do something with live object 'v'
02774  *     }
02775  *     return 0; // continue to iteration
02776  *   }
02777  *
02778  * Note: 'vstart' is not a top of heap_slot.  This point the first
02779  *       living object to grasp at least one object to avoid GC issue.
02780  *       This means that you can not walk through all Ruby object slot
02781  *       including freed object slot.
02782  *
02783  * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
02784  *       However, there are possibilities to pass variable values with
02785  *       'stride' with some reasons.  You must use stride instead of
02786  *       use some constant value in the iteration.
02787  */
02788 void
02789 rb_objspace_each_objects(each_obj_callback *callback, void *data)
02790 {
02791     struct each_obj_args args;
02792     rb_objspace_t *objspace = &rb_objspace;
02793 
02794     rest_sweep(objspace);
02795     objspace->flags.dont_lazy_sweep = TRUE;
02796 
02797     args.callback = callback;
02798     args.data = data;
02799     rb_ensure(objspace_each_objects, (VALUE)&args, lazy_sweep_enable, Qnil);
02800 }
02801 
02802 struct os_each_struct {
02803     size_t num;
02804     VALUE of;
02805 };
02806 
02807 static int
02808 os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
02809 {
02810     struct os_each_struct *oes = (struct os_each_struct *)data;
02811     RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
02812     volatile VALUE v;
02813 
02814     for (; p != pend; p++) {
02815         if (p->as.basic.flags) {
02816             switch (BUILTIN_TYPE(p)) {
02817               case T_NONE:
02818               case T_ICLASS:
02819               case T_NODE:
02820               case T_ZOMBIE:
02821                 continue;
02822               case T_CLASS:
02823                 if (FL_TEST(p, FL_SINGLETON))
02824                   continue;
02825               default:
02826                 if (!p->as.basic.klass) continue;
02827                 v = (VALUE)p;
02828                 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
02829                     rb_yield(v);
02830                     oes->num++;
02831                 }
02832             }
02833         }
02834     }
02835 
02836     return 0;
02837 }
02838 
02839 static VALUE
02840 os_obj_of(VALUE of)
02841 {
02842     struct os_each_struct oes;
02843 
02844     oes.num = 0;
02845     oes.of = of;
02846     rb_objspace_each_objects(os_obj_of_i, &oes);
02847     return SIZET2NUM(oes.num);
02848 }
02849 
02850 /*
02851  *  call-seq:
02852  *     ObjectSpace.each_object([module]) {|obj| ... } -> fixnum
02853  *     ObjectSpace.each_object([module])              -> an_enumerator
02854  *
02855  *  Calls the block once for each living, nonimmediate object in this
02856  *  Ruby process. If <i>module</i> is specified, calls the block
02857  *  for only those classes or modules that match (or are a subclass of)
02858  *  <i>module</i>. Returns the number of objects found. Immediate
02859  *  objects (<code>Fixnum</code>s, <code>Symbol</code>s
02860  *  <code>true</code>, <code>false</code>, and <code>nil</code>) are
02861  *  never returned. In the example below, <code>each_object</code>
02862  *  returns both the numbers we defined and several constants defined in
02863  *  the <code>Math</code> module.
02864  *
02865  *  If no block is given, an enumerator is returned instead.
02866  *
02867  *     a = 102.7
02868  *     b = 95       # Won't be returned
02869  *     c = 12345678987654321
02870  *     count = ObjectSpace.each_object(Numeric) {|x| p x }
02871  *     puts "Total count: #{count}"
02872  *
02873  *  <em>produces:</em>
02874  *
02875  *     12345678987654321
02876  *     102.7
02877  *     2.71828182845905
02878  *     3.14159265358979
02879  *     2.22044604925031e-16
02880  *     1.7976931348623157e+308
02881  *     2.2250738585072e-308
02882  *     Total count: 7
02883  *
02884  */
02885 
02886 static VALUE
02887 os_each_obj(int argc, VALUE *argv, VALUE os)
02888 {
02889     VALUE of;
02890 
02891     rb_secure(4);
02892     if (argc == 0) {
02893         of = 0;
02894     }
02895     else {
02896         rb_scan_args(argc, argv, "01", &of);
02897     }
02898     RETURN_ENUMERATOR(os, 1, &of);
02899     return os_obj_of(of);
02900 }
02901 
02902 /*
02903  *  call-seq:
02904  *     ObjectSpace.undefine_finalizer(obj)
02905  *
02906  *  Removes all finalizers for <i>obj</i>.
02907  *
02908  */
02909 
02910 static VALUE
02911 undefine_final(VALUE os, VALUE obj)
02912 {
02913     rb_objspace_t *objspace = &rb_objspace;
02914     st_data_t data = obj;
02915     rb_check_frozen(obj);
02916     st_delete(finalizer_table, &data, 0);
02917     FL_UNSET(obj, FL_FINALIZE);
02918     return obj;
02919 }
02920 
02921 /*
02922  *  call-seq:
02923  *     ObjectSpace.define_finalizer(obj, aProc=proc())
02924  *
02925  *  Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
02926  *  was destroyed.
02927  *
02928  */
02929 
02930 static VALUE
02931 define_final(int argc, VALUE *argv, VALUE os)
02932 {
02933     rb_objspace_t *objspace = &rb_objspace;
02934     VALUE obj, block, table;
02935     st_data_t data;
02936 
02937     rb_scan_args(argc, argv, "11", &obj, &block);
02938     rb_check_frozen(obj);
02939     if (argc == 1) {
02940         block = rb_block_proc();
02941     }
02942     else if (!rb_respond_to(block, rb_intern("call"))) {
02943         rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
02944                  rb_obj_classname(block));
02945     }
02946     if (!FL_ABLE(obj)) {
02947         rb_raise(rb_eArgError, "cannot define finalizer for %s",
02948                  rb_obj_classname(obj));
02949     }
02950     RBASIC(obj)->flags |= FL_FINALIZE;
02951 
02952     block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
02953     OBJ_FREEZE(block);
02954 
02955     if (st_lookup(finalizer_table, obj, &data)) {
02956         table = (VALUE)data;
02957         rb_ary_push(table, block);
02958     }
02959     else {
02960         table = rb_ary_new3(1, block);
02961         RBASIC(table)->klass = 0;
02962         st_add_direct(finalizer_table, obj, table);
02963     }
02964     return block;
02965 }
02966 
02967 void
02968 rb_gc_copy_finalizer(VALUE dest, VALUE obj)
02969 {
02970     rb_objspace_t *objspace = &rb_objspace;
02971     VALUE table;
02972     st_data_t data;
02973 
02974     if (!FL_TEST(obj, FL_FINALIZE)) return;
02975     if (st_lookup(finalizer_table, obj, &data)) {
02976         table = (VALUE)data;
02977         st_insert(finalizer_table, dest, table);
02978     }
02979     FL_SET(dest, FL_FINALIZE);
02980 }
02981 
02982 static VALUE
02983 run_single_final(VALUE arg)
02984 {
02985     VALUE *args = (VALUE *)arg;
02986     rb_eval_cmd(args[0], args[1], (int)args[2]);
02987     return Qnil;
02988 }
02989 
02990 static void
02991 run_finalizer(rb_objspace_t *objspace, VALUE objid, VALUE table)
02992 {
02993     long i;
02994     int status;
02995     VALUE args[3];
02996 
02997     if (RARRAY_LEN(table) > 0) {
02998         args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
02999     }
03000     else {
03001         args[1] = 0;
03002     }
03003 
03004     args[2] = (VALUE)rb_safe_level();
03005     for (i=0; i<RARRAY_LEN(table); i++) {
03006         VALUE final = RARRAY_PTR(table)[i];
03007         args[0] = RARRAY_PTR(final)[1];
03008         args[2] = FIX2INT(RARRAY_PTR(final)[0]);
03009         status = 0;
03010         rb_protect(run_single_final, (VALUE)args, &status);
03011         if (status)
03012             rb_set_errinfo(Qnil);
03013     }
03014 }
03015 
03016 static void
03017 run_final(rb_objspace_t *objspace, VALUE obj)
03018 {
03019     VALUE objid;
03020     RUBY_DATA_FUNC free_func = 0;
03021     st_data_t key, table;
03022 
03023     objspace->heap.final_num--;
03024 
03025     objid = rb_obj_id(obj);     /* make obj into id */
03026     RBASIC(obj)->klass = 0;
03027 
03028     if (RTYPEDDATA_P(obj)) {
03029         free_func = RTYPEDDATA_TYPE(obj)->function.dfree;
03030     }
03031     else {
03032         free_func = RDATA(obj)->dfree;
03033     }
03034     if (free_func) {
03035         (*free_func)(DATA_PTR(obj));
03036     }
03037 
03038     key = (st_data_t)obj;
03039     if (st_delete(finalizer_table, &key, &table)) {
03040         run_finalizer(objspace, objid, (VALUE)table);
03041     }
03042 }
03043 
03044 static void
03045 finalize_deferred(rb_objspace_t *objspace)
03046 {
03047     RVALUE *p;
03048 
03049     while ((p = ATOMIC_PTR_EXCHANGE(deferred_final_list, 0)) != 0) {
03050         finalize_list(objspace, p);
03051     }
03052 }
03053 
03054 void
03055 rb_gc_finalize_deferred(void)
03056 {
03057     rb_objspace_t *objspace = &rb_objspace;
03058     if (ATOMIC_EXCHANGE(finalizing, 1)) return;
03059     finalize_deferred(objspace);
03060     ATOMIC_SET(finalizing, 0);
03061 }
03062 
03063 struct force_finalize_list {
03064     VALUE obj;
03065     VALUE table;
03066     struct force_finalize_list *next;
03067 };
03068 
03069 static int
03070 force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
03071 {
03072     struct force_finalize_list **prev = (struct force_finalize_list **)arg;
03073     struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
03074     curr->obj = key;
03075     curr->table = val;
03076     curr->next = *prev;
03077     *prev = curr;
03078     return ST_CONTINUE;
03079 }
03080 
03081 void
03082 rb_gc_call_finalizer_at_exit(void)
03083 {
03084     rb_objspace_call_finalizer(&rb_objspace);
03085 }
03086 
03087 static void
03088 rb_objspace_call_finalizer(rb_objspace_t *objspace)
03089 {
03090     RVALUE *p, *pend;
03091     RVALUE *final_list = 0;
03092     size_t i;
03093 
03094     rest_sweep(objspace);
03095 
03096     /* run finalizers */
03097     finalize_deferred(objspace);
03098     assert(deferred_final_list == 0);
03099 
03100     if (ATOMIC_EXCHANGE(finalizing, 1)) return;
03101 
03102     /* force to run finalizer */
03103     while (finalizer_table->num_entries) {
03104         struct force_finalize_list *list = 0;
03105         st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
03106         while (list) {
03107             struct force_finalize_list *curr = list;
03108             run_finalizer(objspace, rb_obj_id(curr->obj), curr->table);
03109             st_delete(finalizer_table, (st_data_t*)&curr->obj, 0);
03110             list = curr->next;
03111             xfree(curr);
03112         }
03113     }
03114 
03115     /* finalizers are part of garbage collection */
03116     during_gc++;
03117 
03118     /* run data object's finalizers */
03119     for (i = 0; i < heaps_used; i++) {
03120         p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
03121         while (p < pend) {
03122             if (BUILTIN_TYPE(p) == T_DATA &&
03123                 DATA_PTR(p) && RANY(p)->as.data.dfree &&
03124                 !rb_obj_is_thread((VALUE)p) && !rb_obj_is_mutex((VALUE)p) &&
03125                 !rb_obj_is_fiber((VALUE)p)) {
03126                 p->as.free.flags = 0;
03127                 if (RTYPEDDATA_P(p)) {
03128                     RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
03129                 }
03130                 if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
03131                     xfree(DATA_PTR(p));
03132                 }
03133                 else if (RANY(p)->as.data.dfree) {
03134                     make_deferred(RANY(p));
03135                     RANY(p)->as.free.next = final_list;
03136                     final_list = p;
03137                 }
03138             }
03139             else if (BUILTIN_TYPE(p) == T_FILE) {
03140                 if (RANY(p)->as.file.fptr) {
03141                     make_io_deferred(RANY(p));
03142                     RANY(p)->as.free.next = final_list;
03143                     final_list = p;
03144                 }
03145             }
03146             p++;
03147         }
03148     }
03149     during_gc = 0;
03150     if (final_list) {
03151         finalize_list(objspace, final_list);
03152     }
03153 
03154     st_free_table(finalizer_table);
03155     finalizer_table = 0;
03156     ATOMIC_SET(finalizing, 0);
03157 }
03158 
03159 void
03160 rb_gc(void)
03161 {
03162     rb_objspace_t *objspace = &rb_objspace;
03163     garbage_collect(objspace);
03164     if (!finalizing) finalize_deferred(objspace);
03165     free_unused_heaps(objspace);
03166 }
03167 
03168 /*
03169  *  call-seq:
03170  *     ObjectSpace._id2ref(object_id) -> an_object
03171  *
03172  *  Converts an object id to a reference to the object. May not be
03173  *  called on an object id passed as a parameter to a finalizer.
03174  *
03175  *     s = "I am a string"                    #=> "I am a string"
03176  *     r = ObjectSpace._id2ref(s.object_id)   #=> "I am a string"
03177  *     r == s                                 #=> true
03178  *
03179  */
03180 
03181 static VALUE
03182 id2ref(VALUE obj, VALUE objid)
03183 {
03184 #if SIZEOF_LONG == SIZEOF_VOIDP
03185 #define NUM2PTR(x) NUM2ULONG(x)
03186 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
03187 #define NUM2PTR(x) NUM2ULL(x)
03188 #endif
03189     rb_objspace_t *objspace = &rb_objspace;
03190     VALUE ptr;
03191     void *p0;
03192 
03193     rb_secure(4);
03194     ptr = NUM2PTR(objid);
03195     p0 = (void *)ptr;
03196 
03197     if (ptr == Qtrue) return Qtrue;
03198     if (ptr == Qfalse) return Qfalse;
03199     if (ptr == Qnil) return Qnil;
03200     if (FIXNUM_P(ptr)) return (VALUE)ptr;
03201     ptr = obj_id_to_ref(objid);
03202 
03203     if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
03204         ID symid = ptr / sizeof(RVALUE);
03205         if (rb_id2name(symid) == 0)
03206             rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
03207         return ID2SYM(symid);
03208     }
03209 
03210     if (!is_pointer_to_heap(objspace, (void *)ptr) ||
03211         BUILTIN_TYPE(ptr) > T_FIXNUM || BUILTIN_TYPE(ptr) == T_ICLASS) {
03212         rb_raise(rb_eRangeError, "%p is not id value", p0);
03213     }
03214     if (BUILTIN_TYPE(ptr) == 0 || RBASIC(ptr)->klass == 0) {
03215         rb_raise(rb_eRangeError, "%p is recycled object", p0);
03216     }
03217     return (VALUE)ptr;
03218 }
03219 
03220 /*
03221  *  Document-method: __id__
03222  *  Document-method: object_id
03223  *
03224  *  call-seq:
03225  *     obj.__id__       -> fixnum
03226  *     obj.object_id    -> fixnum
03227  *
03228  *  Returns an integer identifier for <i>obj</i>. The same number will
03229  *  be returned on all calls to <code>id</code> for a given object, and
03230  *  no two active objects will share an id.
03231  *  <code>Object#object_id</code> is a different concept from the
03232  *  <code>:name</code> notation, which returns the symbol id of
03233  *  <code>name</code>. Replaces the deprecated <code>Object#id</code>.
03234  */
03235 
03236 /*
03237  *  call-seq:
03238  *     obj.hash    -> fixnum
03239  *
03240  *  Generates a <code>Fixnum</code> hash value for this object. This
03241  *  function must have the property that <code>a.eql?(b)</code> implies
03242  *  <code>a.hash == b.hash</code>. The hash value is used by class
03243  *  <code>Hash</code>. Any hash value that exceeds the capacity of a
03244  *  <code>Fixnum</code> will be truncated before being used.
03245  */
03246 
03247 VALUE
03248 rb_obj_id(VALUE obj)
03249 {
03250     /*
03251      *                32-bit VALUE space
03252      *          MSB ------------------------ LSB
03253      *  false   00000000000000000000000000000000
03254      *  true    00000000000000000000000000000010
03255      *  nil     00000000000000000000000000000100
03256      *  undef   00000000000000000000000000000110
03257      *  symbol  ssssssssssssssssssssssss00001110
03258      *  object  oooooooooooooooooooooooooooooo00        = 0 (mod sizeof(RVALUE))
03259      *  fixnum  fffffffffffffffffffffffffffffff1
03260      *
03261      *                    object_id space
03262      *                                       LSB
03263      *  false   00000000000000000000000000000000
03264      *  true    00000000000000000000000000000010
03265      *  nil     00000000000000000000000000000100
03266      *  undef   00000000000000000000000000000110
03267      *  symbol   000SSSSSSSSSSSSSSSSSSSSSSSSSSS0        S...S % A = 4 (S...S = s...s * A + 4)
03268      *  object   oooooooooooooooooooooooooooooo0        o...o % A = 0
03269      *  fixnum  fffffffffffffffffffffffffffffff1        bignum if required
03270      *
03271      *  where A = sizeof(RVALUE)/4
03272      *
03273      *  sizeof(RVALUE) is
03274      *  20 if 32-bit, double is 4-byte aligned
03275      *  24 if 32-bit, double is 8-byte aligned
03276      *  40 if 64-bit
03277      */
03278     if (SYMBOL_P(obj)) {
03279         return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
03280     }
03281     if (SPECIAL_CONST_P(obj)) {
03282         return LONG2NUM((SIGNED_VALUE)obj);
03283     }
03284     return nonspecial_obj_id(obj);
03285 }
03286 
03287 static int
03288 set_zero(st_data_t key, st_data_t val, st_data_t arg)
03289 {
03290     VALUE k = (VALUE)key;
03291     VALUE hash = (VALUE)arg;
03292     rb_hash_aset(hash, k, INT2FIX(0));
03293     return ST_CONTINUE;
03294 }
03295 
03296 /*
03297  *  call-seq:
03298  *     ObjectSpace.count_objects([result_hash]) -> hash
03299  *
03300  *  Counts objects for each type.
03301  *
03302  *  It returns a hash as:
03303  *  {:TOTAL=>10000, :FREE=>3011, :T_OBJECT=>6, :T_CLASS=>404, ...}
03304  *
03305  *  If the optional argument, result_hash, is given,
03306  *  it is overwritten and returned.
03307  *  This is intended to avoid probe effect.
03308  *
03309  *  The contents of the returned hash is implementation defined.
03310  *  It may be changed in future.
03311  *
03312  *  This method is not expected to work except C Ruby.
03313  *
03314  */
03315 
03316 static VALUE
03317 count_objects(int argc, VALUE *argv, VALUE os)
03318 {
03319     rb_objspace_t *objspace = &rb_objspace;
03320     size_t counts[T_MASK+1];
03321     size_t freed = 0;
03322     size_t total = 0;
03323     size_t i;
03324     VALUE hash;
03325 
03326     if (rb_scan_args(argc, argv, "01", &hash) == 1) {
03327         if (TYPE(hash) != T_HASH)
03328             rb_raise(rb_eTypeError, "non-hash given");
03329     }
03330 
03331     for (i = 0; i <= T_MASK; i++) {
03332         counts[i] = 0;
03333     }
03334 
03335     for (i = 0; i < heaps_used; i++) {
03336         RVALUE *p, *pend;
03337 
03338         p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
03339         for (;p < pend; p++) {
03340             if (p->as.basic.flags) {
03341                 counts[BUILTIN_TYPE(p)]++;
03342             }
03343             else {
03344                 freed++;
03345             }
03346         }
03347         total += objspace->heap.sorted[i].slot->limit;
03348     }
03349 
03350     if (hash == Qnil) {
03351         hash = rb_hash_new();
03352     }
03353     else if (!RHASH_EMPTY_P(hash)) {
03354         st_foreach(RHASH_TBL(hash), set_zero, hash);
03355     }
03356     rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
03357     rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
03358 
03359     for (i = 0; i <= T_MASK; i++) {
03360         VALUE type;
03361         switch (i) {
03362 #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
03363             COUNT_TYPE(T_NONE);
03364             COUNT_TYPE(T_OBJECT);
03365             COUNT_TYPE(T_CLASS);
03366             COUNT_TYPE(T_MODULE);
03367             COUNT_TYPE(T_FLOAT);
03368             COUNT_TYPE(T_STRING);
03369             COUNT_TYPE(T_REGEXP);
03370             COUNT_TYPE(T_ARRAY);
03371             COUNT_TYPE(T_HASH);
03372             COUNT_TYPE(T_STRUCT);
03373             COUNT_TYPE(T_BIGNUM);
03374             COUNT_TYPE(T_FILE);
03375             COUNT_TYPE(T_DATA);
03376             COUNT_TYPE(T_MATCH);
03377             COUNT_TYPE(T_COMPLEX);
03378             COUNT_TYPE(T_RATIONAL);
03379             COUNT_TYPE(T_NIL);
03380             COUNT_TYPE(T_TRUE);
03381             COUNT_TYPE(T_FALSE);
03382             COUNT_TYPE(T_SYMBOL);
03383             COUNT_TYPE(T_FIXNUM);
03384             COUNT_TYPE(T_UNDEF);
03385             COUNT_TYPE(T_NODE);
03386             COUNT_TYPE(T_ICLASS);
03387             COUNT_TYPE(T_ZOMBIE);
03388 #undef COUNT_TYPE
03389           default:              type = INT2NUM(i); break;
03390         }
03391         if (counts[i])
03392             rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
03393     }
03394 
03395     return hash;
03396 }
03397 
03398 /*
03399  *  call-seq:
03400  *     GC.count -> Integer
03401  *
03402  *  The number of times GC occurred.
03403  *
03404  *  It returns the number of times GC occurred since the process started.
03405  *
03406  */
03407 
03408 static VALUE
03409 gc_count(VALUE self)
03410 {
03411     return UINT2NUM((&rb_objspace)->count);
03412 }
03413 
03414 /*
03415  *  call-seq:
03416  *     GC.stat -> Hash
03417  *
03418  *  Returns a Hash containing information about the GC.
03419  *
03420  *  The hash includes information about internal statistics about GC such as:
03421  *
03422  *    {
03423  *      :count          => 18,
03424  *      :heap_used      => 77,
03425  *      :heap_length    => 77,
03426  *      :heap_increment => 0,
03427  *      :heap_live_num  => 23287,
03428  *      :heap_free_num  => 8115,
03429  *      :heap_final_num => 0,
03430  *    }
03431  *
03432  *  The contents of the hash are implementation defined and may be changed in
03433  *  the future.
03434  *
03435  *  This method is only expected to work on C Ruby.
03436  *
03437  */
03438 
03439 static VALUE
03440 gc_stat(int argc, VALUE *argv, VALUE self)
03441 {
03442     rb_objspace_t *objspace = &rb_objspace;
03443     VALUE hash;
03444 
03445     if (rb_scan_args(argc, argv, "01", &hash) == 1) {
03446         if (TYPE(hash) != T_HASH)
03447             rb_raise(rb_eTypeError, "non-hash given");
03448     }
03449 
03450     if (hash == Qnil) {
03451         hash = rb_hash_new();
03452     }
03453 
03454     rest_sweep(objspace);
03455 
03456     rb_hash_aset(hash, ID2SYM(rb_intern("count")), SIZET2NUM(objspace->count));
03457 
03458     /* implementation dependent counters */
03459     rb_hash_aset(hash, ID2SYM(rb_intern("heap_used")), SIZET2NUM(objspace->heap.used));
03460     rb_hash_aset(hash, ID2SYM(rb_intern("heap_length")), SIZET2NUM(objspace->heap.length));
03461     rb_hash_aset(hash, ID2SYM(rb_intern("heap_increment")), SIZET2NUM(objspace->heap.increment));
03462     rb_hash_aset(hash, ID2SYM(rb_intern("heap_live_num")), SIZET2NUM(objspace->heap.live_num));
03463     rb_hash_aset(hash, ID2SYM(rb_intern("heap_free_num")), SIZET2NUM(objspace->heap.free_num));
03464     rb_hash_aset(hash, ID2SYM(rb_intern("heap_final_num")), SIZET2NUM(objspace->heap.final_num));
03465     return hash;
03466 }
03467 
03468 
03469 #if CALC_EXACT_MALLOC_SIZE
03470 /*
03471  *  call-seq:
03472  *     GC.malloc_allocated_size -> Integer
03473  *
03474  *  The allocated size by malloc().
03475  *
03476  *  It returns the allocated size by malloc().
03477  */
03478 
03479 static VALUE
03480 gc_malloc_allocated_size(VALUE self)
03481 {
03482     return UINT2NUM((&rb_objspace)->malloc_params.allocated_size);
03483 }
03484 
03485 /*
03486  *  call-seq:
03487  *     GC.malloc_allocations -> Integer
03488  *
03489  *  The number of allocated memory object by malloc().
03490  *
03491  *  It returns the number of allocated memory object by malloc().
03492  */
03493 
03494 static VALUE
03495 gc_malloc_allocations(VALUE self)
03496 {
03497     return UINT2NUM((&rb_objspace)->malloc_params.allocations);
03498 }
03499 #endif
03500 
03501 static VALUE
03502 gc_profile_record_get(void)
03503 {
03504     VALUE prof;
03505     VALUE gc_profile = rb_ary_new();
03506     size_t i;
03507     rb_objspace_t *objspace = (&rb_objspace);
03508 
03509     if (!objspace->profile.run) {
03510         return Qnil;
03511     }
03512 
03513     for (i =0; i < objspace->profile.count; i++) {
03514         prof = rb_hash_new();
03515         rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(objspace->profile.record[i].gc_time));
03516         rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(objspace->profile.record[i].gc_invoke_time));
03517         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_use_size));
03518         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_total_size));
03519         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_total_objects));
03520         rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), objspace->profile.record[i].is_marked);
03521 #if GC_PROFILE_MORE_DETAIL
03522         rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(objspace->profile.record[i].gc_mark_time));
03523         rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(objspace->profile.record[i].gc_sweep_time));
03524         rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(objspace->profile.record[i].allocate_increase));
03525         rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(objspace->profile.record[i].allocate_limit));
03526         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SLOTS")), SIZET2NUM(objspace->profile.record[i].heap_use_slots));
03527         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_live_objects));
03528         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_free_objects));
03529         rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), objspace->profile.record[i].have_finalize);
03530 #endif
03531         rb_ary_push(gc_profile, prof);
03532     }
03533 
03534     return gc_profile;
03535 }
03536 
03537 /*
03538  *  call-seq:
03539  *     GC::Profiler.result -> String
03540  *
03541  *  Returns a profile data report such as:
03542  *
03543  *    GC 1 invokes.
03544  *    Index    Invoke Time(sec)       Use Size(byte)     Total Size(byte)         Total Object                    GC time(ms)
03545  *        1               0.012               159240               212940                10647         0.00000000000001530000
03546  */
03547 
03548 static VALUE
03549 gc_profile_result(void)
03550 {
03551     rb_objspace_t *objspace = &rb_objspace;
03552     VALUE record;
03553     VALUE result;
03554     int i, index;
03555 
03556     record = gc_profile_record_get();
03557     if (objspace->profile.run && objspace->profile.count) {
03558         result = rb_sprintf("GC %d invokes.\n", NUM2INT(gc_count(0)));
03559         index = 1;
03560         rb_str_cat2(result, "Index    Invoke Time(sec)       Use Size(byte)     Total Size(byte)         Total Object                    GC Time(ms)\n");
03561         for (i = 0; i < (int)RARRAY_LEN(record); i++) {
03562             VALUE r = RARRAY_PTR(record)[i];
03563 #if !GC_PROFILE_MORE_DETAIL
03564             if (rb_hash_aref(r, ID2SYM(rb_intern("GC_IS_MARKED")))) {
03565 #endif
03566             rb_str_catf(result, "%5d %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
03567                         index++, NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_INVOKE_TIME")))),
03568                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SIZE")))),
03569                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")))),
03570                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")))),
03571                         NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_TIME"))))*1000);
03572 #if !GC_PROFILE_MORE_DETAIL
03573             }
03574 #endif
03575         }
03576 #if GC_PROFILE_MORE_DETAIL
03577         rb_str_cat2(result, "\n\n");
03578         rb_str_cat2(result, "More detail.\n");
03579         rb_str_cat2(result, "Index Allocate Increase    Allocate Limit  Use Slot  Have Finalize             Mark Time(ms)            Sweep Time(ms)\n");
03580         index = 1;
03581         for (i = 0; i < (int)RARRAY_LEN(record); i++) {
03582             VALUE r = RARRAY_PTR(record)[i];
03583             rb_str_catf(result, "%5d %17"PRIuSIZE" %17"PRIuSIZE" %9"PRIuSIZE" %14s %25.20f %25.20f\n",
03584                         index++, (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_INCREASE")))),
03585                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_LIMIT")))),
03586                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SLOTS")))),
03587                         rb_hash_aref(r, ID2SYM(rb_intern("HAVE_FINALIZE")))? "true" : "false",
03588                         NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_MARK_TIME"))))*1000,
03589                         NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_SWEEP_TIME"))))*1000);
03590         }
03591 #endif
03592     }
03593     else {
03594         result = rb_str_new2("");
03595     }
03596     return result;
03597 }
03598 
03599 
03600 /*
03601  *  call-seq:
03602  *     GC::Profiler.report
03603  *     GC::Profiler.report io
03604  *
03605  *  Writes the GC::Profiler#result to <tt>$stdout</tt> or the given IO object.
03606  *
03607  */
03608 
03609 static VALUE
03610 gc_profile_report(int argc, VALUE *argv, VALUE self)
03611 {
03612     VALUE out;
03613 
03614     if (argc == 0) {
03615         out = rb_stdout;
03616     }
03617     else {
03618         rb_scan_args(argc, argv, "01", &out);
03619     }
03620     rb_io_write(out, gc_profile_result());
03621 
03622     return Qnil;
03623 }
03624 
03625 /*
03626  *  call-seq:
03627  *     GC::Profiler.total_time -> float
03628  *
03629  *  The total time used for garbage collection in milliseconds
03630  */
03631 
03632 static VALUE
03633 gc_profile_total_time(VALUE self)
03634 {
03635     double time = 0;
03636     rb_objspace_t *objspace = &rb_objspace;
03637     size_t i;
03638 
03639     if (objspace->profile.run && objspace->profile.count) {
03640         for (i = 0; i < objspace->profile.count; i++) {
03641             time += objspace->profile.record[i].gc_time;
03642         }
03643     }
03644     return DBL2NUM(time);
03645 }
03646 
03647 /*  Document-class: GC::Profiler
03648  *
03649  *  The GC profiler provides access to information on GC runs including time,
03650  *  length and object space size.
03651  *
03652  *  Example:
03653  *
03654  *    GC::Profiler.enable
03655  *
03656  *    require 'rdoc/rdoc'
03657  *
03658  *    puts GC::Profiler.result
03659  *
03660  *    GC::Profiler.disable
03661  *
03662  *  See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
03663  */
03664 
03665 /*
03666  *  The <code>GC</code> module provides an interface to Ruby's mark and
03667  *  sweep garbage collection mechanism. Some of the underlying methods
03668  *  are also available via the ObjectSpace module.
03669  *
03670  *  You may obtain information about the operation of the GC through
03671  *  GC::Profiler.
03672  */
03673 
03674 void
03675 Init_GC(void)
03676 {
03677     VALUE rb_mObSpace;
03678     VALUE rb_mProfiler;
03679 
03680     rb_mGC = rb_define_module("GC");
03681     rb_define_singleton_method(rb_mGC, "start", rb_gc_start, 0);
03682     rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0);
03683     rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0);
03684     rb_define_singleton_method(rb_mGC, "stress", gc_stress_get, 0);
03685     rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1);
03686     rb_define_singleton_method(rb_mGC, "count", gc_count, 0);
03687     rb_define_singleton_method(rb_mGC, "stat", gc_stat, -1);
03688     rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
03689 
03690     rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
03691     rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
03692     rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
03693     rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
03694     rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
03695     rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
03696     rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
03697     rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
03698 
03699     rb_mObSpace = rb_define_module("ObjectSpace");
03700     rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
03701     rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
03702 
03703     rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1);
03704     rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1);
03705 
03706     rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1);
03707 
03708     nomem_error = rb_exc_new3(rb_eNoMemError,
03709                               rb_obj_freeze(rb_str_new2("failed to allocate memory")));
03710     OBJ_TAINT(nomem_error);
03711     OBJ_FREEZE(nomem_error);
03712 
03713     rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
03714     rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
03715 
03716     rb_define_module_function(rb_mObSpace, "count_objects", count_objects, -1);
03717 
03718 #if CALC_EXACT_MALLOC_SIZE
03719     rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
03720     rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
03721 #endif
03722 }
03723