123456789_123456789_123456789_123456789_123456789_

Module: GC

Relationships & Source Files
Namespace Children
Modules:
Defined in: gc.c

Overview

The GC module provides an interface to Ruby's mark and sweep garbage collection mechanism.

Some of the underlying methods are also available via the ::ObjectSpace module.

You may obtain information about the operation of the GC through Profiler.

Constant Summary

Class Attribute Summary

Class Method Summary

Instance Method Summary

Class Attribute Details

.stressBoolean (rw)

Returns current status of GC stress mode.

[ GitHub ]

  
# File 'gc.c', line 8891

static VALUE
gc_stress_get(VALUE self)
{
    rb_objspace_t *objspace = &rb_objspace;
    return ruby_gc_stress_mode;
}

.stress=(flag) ⇒ flag (rw)

Updates the GC stress mode.

When stress mode is enabled, the GC is invoked at every GC opportunity: all memory and object allocations.

Enabling stress mode will degrade performance, it is only for debugging.

flag can be true, false, or an integer bit-ORed following flags.

0x01:: no major GC
0x02:: no immediate sweep
0x04:: full mark after malloc/calloc/realloc
[ GitHub ]

  
# File 'gc.c', line 8922

static VALUE
gc_stress_set_m(VALUE self, VALUE flag)
{
    rb_objspace_t *objspace = &rb_objspace;
    gc_stress_set(objspace, flag);
    return flag;
}

Class Method Details

.add_stress_to_class(class[, ...])

Raises NoMemoryError when allocating an instance of the given classes.

[ GitHub ]

  
# File 'gc.c', line 11363

static VALUE
rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
{
    rb_objspace_t *objspace = &rb_objspace;

    if (!stress_to_class) {
	stress_to_class = rb_ary_tmp_new(argc);
    }
    rb_ary_cat(stress_to_class, argv, argc);
    return self;
}

.compact

[ GitHub ]

  
# File 'gc.c', line 8124

static VALUE
rb_gc_compact(VALUE mod)
{
    rb_objspace_t *objspace = &rb_objspace;

    if (dont_gc) return Qnil;
    /* Ensure objects are pinned */
    rb_gc();
    gc_compact_after_gc(objspace, FALSE, FALSE, FALSE);
    return rb_gc_compact_stats(mod);
}

.countInteger

The number of times GC occurred.

It returns the number of times GC occurred since the process started.

[ GitHub ]

  
# File 'gc.c', line 8355

static VALUE
gc_count(VALUE self)
{
    return SIZET2NUM(rb_gc_count());
}

.disableBoolean

Disables garbage collection, returning true if garbage collection was already disabled.

GC.disable   #=> false
GC.disable   #=> true
[ GitHub ]

  
# File 'gc.c', line 8974

VALUE
rb_gc_disable(void)
{
    rb_objspace_t *objspace = &rb_objspace;
    gc_rest(objspace);
    return rb_gc_disable_no_rest();
}

.enableBoolean

Enables garbage collection, returning true if garbage collection was previously disabled.

GC.disable   #=> false
GC.enable    #=> true
GC.enable    #=> false
[ GitHub ]

  
# File 'gc.c', line 8943

VALUE
rb_gc_enable(void)
{
    rb_objspace_t *objspace = &rb_objspace;
    int old = dont_gc;

    dont_gc = FALSE;
    return old ? Qtrue : Qfalse;
}

.latest_gc_infoGC .latest_gc_info(hash) ⇒ Hash .latest_gc_info(:major_by) ⇒ :malloc

Returns information about the most recent garbage collection.

[ GitHub ]

  
# File 'gc.c', line 8470

static VALUE
gc_latest_gc_info(int argc, VALUE *argv, VALUE self)
{
    rb_objspace_t *objspace = &rb_objspace;
    VALUE arg = Qnil;

    if (rb_check_arity(argc, 0, 1) == 1) {
        arg = argv[0];
        if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
            rb_raise(rb_eTypeError, "non-hash or symbol given");
        }
    }
    else {
        arg = rb_hash_new();
    }

    return gc_info_decode(objspace, arg, 0);
}

.malloc_allocated_sizeInteger

Returns the size of memory allocated by malloc().

Only available if ruby was built with CALC_EXACT_MALLOC_SIZE.

[ GitHub ]

  
# File 'gc.c', line 9913

static VALUE
gc_malloc_allocated_size(VALUE self)
{
    return UINT2NUM(rb_objspace.malloc_params.allocated_size);
}

.malloc_allocationsInteger

Returns the number of malloc() allocations.

Only available if ruby was built with CALC_EXACT_MALLOC_SIZE.

[ GitHub ]

  
# File 'gc.c', line 9928

static VALUE
gc_malloc_allocations(VALUE self)
{
    return UINT2NUM(rb_objspace.malloc_params.allocations);
}

.remove_stress_to_class(class[, ...])

No longer raises ::NoMemoryError when allocating an instance of the given classes.

[ GitHub ]

  
# File 'gc.c', line 11383

static VALUE
rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
{
    rb_objspace_t *objspace = &rb_objspace;
    int i;

    if (stress_to_class) {
	for (i = 0; i < argc; ++i) {
	    rb_ary_delete_same(stress_to_class, argv[i]);
	}
	if (RARRAY_LEN(stress_to_class) == 0) {
	    stress_to_class = 0;
	}
    }
    return Qnil;
}

#startnil ObjectSpace.garbage_collectnil #include(GC;garbage_collect) ⇒ nil #start(full_mark:true, immediate_sweep:true) ⇒ nil ObjectSpace.garbage_collect(full_mark:true, immediate_sweep:true) ⇒ nil #include(GC;garbage_collect(full_mark: true, immediate_sweep: true)) ⇒ nil
Also known as: #garbage_collect

Initiates garbage collection, unless manually disabled.

This method is defined with keyword arguments that default to true:

def GC.start(full_mark: true, immediate_sweep: true); end

Use full_mark: false to perform a minor GC. Use immediate_sweep: false to defer sweeping (use lazy sweep).

Note: These keyword arguments are implementation and version dependent. They are not guaranteed to be future-compatible, and may be ignored if the underlying implementation does not support them.

[ GitHub ]

  
# File 'gc.c', line 7201

static VALUE
gc_start_internal(int argc, VALUE *argv, VALUE self)
{
    rb_objspace_t *objspace = &rb_objspace;
    int reason = GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
                GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_METHOD;
    VALUE opt = Qnil;
    static ID keyword_ids[3];

    rb_scan_args(argc, argv, "0:", &opt);

    if (!NIL_P(opt)) {
	VALUE kwvals[3];

	if (!keyword_ids[0]) {
	    keyword_ids[0] = rb_intern("full_mark");
	    keyword_ids[1] = rb_intern("immediate_mark");
	    keyword_ids[2] = rb_intern("immediate_sweep");
	}

	rb_get_kwargs(opt, keyword_ids, 0, 3, kwvals);

	if (kwvals[0] != Qundef && !RTEST(kwvals[0])) {
            reason &= ~GPR_FLAG_FULL_MARK;
        }
	if (kwvals[1] != Qundef && !RTEST(kwvals[1])) {
            reason &= ~GPR_FLAG_IMMEDIATE_MARK;
        }
	if (kwvals[2] != Qundef && !RTEST(kwvals[2])) {
            reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
        }
    }

    garbage_collect(objspace, reason);
    gc_finalize_deferred(objspace);

    return Qnil;
}

.statHash .stat(hash) ⇒ Hash .stat(:key) ⇒ Numeric

Returns a ::Hash containing information about the GC.

The hash includes information about internal statistics about GC such as:

{
    :count=>0,
    :heap_allocated_pages=>24,
    :heap_sorted_length=>24,
    :heap_allocatable_pages=>0,
    :heap_available_slots=>9783,
    :heap_live_slots=>7713,
    :heap_free_slots=>2070,
    :heap_final_slots=>0,
    :heap_marked_slots=>0,
    :heap_eden_pages=>24,
    :heap_tomb_pages=>0,
    :total_allocated_pages=>24,
    :total_freed_pages=>0,
    :total_allocated_objects=>7796,
    :total_freed_objects=>83,
    :malloc_increase_bytes=>2389312,
    :malloc_increase_bytes_limit=>16777216,
    :minor_gc_count=>0,
    :major_gc_count=>0,
    :remembered_wb_unprotected_objects=>0,
    :remembered_wb_unprotected_objects_limit=>0,
    :old_objects=>0,
    :old_objects_limit=>0,
    :oldmalloc_increase_bytes=>2389760,
    :oldmalloc_increase_bytes_limit=>16777216
}

The contents of the hash are implementation specific and may be changed in the future.

This method is only expected to work on C Ruby.

[ GitHub ]

  
# File 'gc.c', line 8849

static VALUE
gc_stat(int argc, VALUE *argv, VALUE self)
{
    VALUE arg = Qnil;

    if (rb_check_arity(argc, 0, 1) == 1) {
        arg = argv[0];
	if (SYMBOL_P(arg)) {
	    size_t value = gc_stat_internal(arg);
	    return SIZET2NUM(value);
	}
	else if (!RB_TYPE_P(arg, T_HASH)) {
	    rb_raise(rb_eTypeError, "non-hash or symbol given");
	}
    }
    else {
        arg = rb_hash_new();
    }
    gc_stat_internal(arg);
    return arg;
}

.verify_compaction_referencesnil

Verify compaction reference consistency.

This method is implementation specific. During compaction, objects that were moved are replaced with T_MOVED objects. No object should have a reference to a T_MOVED object after compaction.

This function doubles the heap to ensure room to move all objects, compacts the heap to make sure everything moves, updates all references, then performs a full GC. If any object contains a reference to a T_MOVED object, that object should be pushed on the mark stack, and will make a SEGV.

[ GitHub ]

  
# File 'gc.c', line 8259

static VALUE
gc_verify_compaction_references(int argc, VALUE *argv, VALUE mod)
{
    rb_objspace_t *objspace = &rb_objspace;
    int use_toward_empty = FALSE;
    int use_double_pages = FALSE;

    if (dont_gc) return Qnil;

    VALUE opt = Qnil;
    static ID keyword_ids[2];
    VALUE kwvals[2];

    kwvals[1] = Qtrue;

    rb_scan_args(argc, argv, "0:", &opt);

    if (!NIL_P(opt)) {
        if (!keyword_ids[0]) {
            keyword_ids[0] = rb_intern("toward");
            keyword_ids[1] = rb_intern("double_heap");
        }

        rb_get_kwargs(opt, keyword_ids, 0, 2, kwvals);
        if (rb_intern("empty") == rb_sym2id(kwvals[0])) {
            use_toward_empty = TRUE;
        }
        if (kwvals[1] != Qundef && RTEST(kwvals[1])) {
            use_double_pages = TRUE;
        }
    }

    /* Ensure objects are pinned */
    rb_gc();
    if (mjit_enabled) mjit_pause(false); // debugging. suspecting that JIT is triggered for a broken ISeq during `gc_compact_after_gc`.
    gc_compact_after_gc(objspace, use_toward_empty, use_double_pages, TRUE);
    if (mjit_enabled) mjit_resume(); // debugging
    return rb_gc_compact_stats(mod);
}

.verify_internal_consistencynil

Verify internal consistency.

This method is implementation specific. Now this method checks generational consistency if RGenGC is supported.

[ GitHub ]

  
# File 'gc.c', line 5784

static VALUE
gc_verify_internal_consistency(VALUE dummy)
{
    rb_objspace_t *objspace = &rb_objspace;
    struct verify_internal_consistency_struct data = {0};
    struct each_obj_args eo_args;

    data.objspace = objspace;
    gc_report(5, objspace, "gc_verify_internal_consistency: start\n");

    /* check relations */

    eo_args.callback = verify_internal_consistency_i;
    eo_args.data = (void *)&data;
    objspace_each_objects((VALUE)&eo_args);

    if (data.err_count != 0) {
#if RGENGC_CHECK_MODE >= 5
	objspace->rgengc.error_count = data.err_count;
	gc_marks_check(objspace, NULL, NULL);
	allrefs_dump(objspace);
#endif
	rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
    }

    /* check heap_page status */
    gc_verify_heap_pages(objspace);

    /* check counters */

    if (!is_lazy_sweeping(heap_eden) && !finalizing) {
	if (objspace_live_slots(objspace) != data.live_object_count) {
	    fprintf(stderr, "heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n",
		    (int)heap_pages_final_slots, (int)objspace->profile.total_freed_objects);
	    rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace_live_slots(objspace), data.live_object_count);
	}
    }

#if USE_RGENGC
    if (!is_marking(objspace)) {
	if (objspace->rgengc.old_objects != data.old_object_count) {
	    rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace->rgengc.old_objects, data.old_object_count);
	}
	if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
	    rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
	}
    }
#endif

    if (!finalizing) {
	size_t list_count = 0;

	{
	    VALUE z = heap_pages_deferred_final;
	    while (z) {
		list_count++;
		z = RZOMBIE(z)->next;
	    }
	}

	if (heap_pages_final_slots != data.zombie_object_count ||
	    heap_pages_final_slots != list_count) {

	    rb_bug("inconsistent finalizing object count:\n"
		   "  expect %"PRIuSIZE"\n"
		   "  but    %"PRIuSIZE" zombies\n"
		   "  heap_pages_deferred_final list has %"PRIuSIZE" items.",
		   heap_pages_final_slots,
		   data.zombie_object_count,
		   list_count);
	}
    }

    gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");

    return Qnil;
}

.verify_transient_heap_internal_consistency

[ GitHub ]

  
# File 'gc.c', line 5868

static VALUE
gc_verify_transient_heap_internal_consistency(VALUE dmy)
{
    rb_transient_heap_verify();
    return Qnil;
}

Instance Method Details

#startnil ObjectSpace.garbage_collectnil #include(GC;garbage_collect) ⇒ nil #start(full_mark:true, immediate_sweep:true) ⇒ nil ObjectSpace.garbage_collect(full_mark:true, immediate_sweep:true) ⇒ nil #include(GC;garbage_collect(full_mark: true, immediate_sweep: true)) ⇒ nil

Alias for .start.