123456789_123456789_123456789_123456789_123456789_

Module: ObjectSpace

Overview

The objspace library extends the ObjectSpace module and adds several methods to get internal statistic information about object/memory management.

You need to require 'objspace' to use this extension module.

Generally, you *SHOULD NOT* use this library if you do not know about the MRI implementation. Mainly, this library is for (memory) profiler developers and MRI developers who need to know about MRI memory usage.

Class Method Summary

Class Method Details

._dump(obj, output) (mod_func)

This method is for internal use only.
[ GitHub ]

  
# File 'ext/objspace/objspace_dump.c', line 700

static VALUE
objspace_dump(VALUE os, VALUE obj, VALUE output)
{
    struct dump_config dc = {0,};
    if (!RB_SPECIAL_CONST_P(obj)) {
        dc.cur_page_slot_size = rb_gc_obj_slot_size(obj);
    }

    dump_output(&dc, output, Qnil, Qnil, Qnil);

    dump_object(obj, &dc);

    return dump_result(&dc);
}

._dump_all(output, full, since, shapes) (mod_func)

This method is for internal use only.
[ GitHub ]

  
# File 'ext/objspace/objspace_dump.c', line 784

static VALUE
objspace_dump_all(VALUE os, VALUE output, VALUE full, VALUE since, VALUE shapes)
{
    struct dump_config dc = {0,};
    dump_output(&dc, output, full, since, shapes);

    if (!dc.partial_dump || dc.since == 0) {
        /* dump roots */
        rb_objspace_reachable_objects_from_root(root_obj_i, &dc);
        if (dc.roots) dump_append(&dc, "]}\n");
    }

    if (RTEST(shapes)) {
        rb_shape_each_shape(shape_i, &dc);
    }

    /* dump all objects */
    rb_objspace_each_objects(heap_i, &dc);

    return dump_result(&dc);
}

._dump_shapes(output, shapes) (mod_func)

This method is for internal use only.
[ GitHub ]

  
# File 'ext/objspace/objspace_dump.c', line 807

static VALUE
objspace_dump_shapes(VALUE os, VALUE output, VALUE shapes)
{
    struct dump_config dc = {0,};
    dump_output(&dc, output, Qfalse, Qnil, shapes);

    if (RTEST(shapes)) {
        rb_shape_each_shape(shape_i, &dc);
    }
    return dump_result(&dc);
}

.allocation_class_path(object) ⇒ String (mod_func)

Returns the class for the given object.

class A
  def foo
    ObjectSpace::trace_object_allocations do
      obj = Object.new
      p "#{ObjectSpace::allocation_class_path(obj)}"
    end
  end
end

A.new.foo #=> "Class"

See .trace_object_allocations for more information and examples.

[ GitHub ]

  
# File 'ext/objspace/object_tracing.c', line 490

static VALUE
allocation_class_path(VALUE self, VALUE obj)
{
    struct allocation_info *info = lookup_allocation_info(obj);

    if (info && info->class_path) {
        return rb_str_new2(info->class_path);
    }
    else {
        return Qnil;
    }
}

.allocation_generation(object) ⇒ Integer? (mod_func)

Returns garbage collector generation for the given object.

class B
  include ObjectSpace

  def foo
    trace_object_allocations do
      obj = Object.new
      p "Generation is #{allocation_generation(obj)}"
    end
  end
end

B.new.foo #=> "Generation is 3"

See .trace_object_allocations for more information and examples.

[ GitHub ]

  
# File 'ext/objspace/object_tracing.c', line 555

static VALUE
allocation_generation(VALUE self, VALUE obj)
{
    struct allocation_info *info = lookup_allocation_info(obj);
    if (info) {
        return SIZET2NUM(info->generation);
    }
    else {
        return Qnil;
    }
}

.allocation_method_id(object) ⇒ String (mod_func)

Returns the method identifier for the given object.

class A
  include ObjectSpace

  def foo
    trace_object_allocations do
      obj = Object.new
      p "#{allocation_class_path(obj)}##{allocation_method_id(obj)}"
    end
  end
end

A.new.foo #=> "Class#new"

See .trace_object_allocations for more information and examples.

[ GitHub ]

  
# File 'ext/objspace/object_tracing.c', line 523

static VALUE
allocation_method_id(VALUE self, VALUE obj)
{
    struct allocation_info *info = lookup_allocation_info(obj);
    if (info) {
        return info->mid;
    }
    else {
        return Qnil;
    }
}

.allocation_sourcefile(object) ⇒ String (mod_func)

Returns the source file origin from the given object.

See .trace_object_allocations for more information and examples.

[ GitHub ]

  
# File 'ext/objspace/object_tracing.c', line 439

static VALUE
allocation_sourcefile(VALUE self, VALUE obj)
{
    struct allocation_info *info = lookup_allocation_info(obj);

    if (info && info->path) {
        return rb_str_new2(info->path);
    }
    else {
        return Qnil;
    }
}

.allocation_sourceline(object) ⇒ Integer (mod_func)

Returns the original line from source for from the given object.

See .trace_object_allocations for more information and examples.

[ GitHub ]

  
# File 'ext/objspace/object_tracing.c', line 459

static VALUE
allocation_sourceline(VALUE self, VALUE obj)
{
    struct allocation_info *info = lookup_allocation_info(obj);

    if (info) {
        return INT2FIX(info->line);
    }
    else {
        return Qnil;
    }
}

.count_imemo_objects([result_hash]) ⇒ Hash (mod_func)

Counts objects for each T_IMEMO type.

This method is only for MRI developers interested in performance and memory usage of Ruby programs.

It returns a hash as:

{:imemo_ifunc=>8,
 :imemo_svar=>7,
 :imemo_cref=>509,
 :imemo_memo=>1,
 :imemo_throw_data=>1}

If the optional argument, result_hash, is given, it is overwritten and returned. This is intended to avoid probe effect.

The contents of the returned hash is implementation specific and may change in the future.

In this version, keys are symbol objects.

This method is only expected to work with C Ruby.

[ GitHub ]

  
# File 'ext/objspace/objspace.c', line 627

static VALUE
count_imemo_objects(int argc, VALUE *argv, VALUE self)
{
    VALUE hash = setup_hash(argc, argv);

    if (imemo_type_ids[0] == 0) {
#define INIT_IMEMO_TYPE_ID(n) (imemo_type_ids[n] = rb_intern_const(#n))
        INIT_IMEMO_TYPE_ID(imemo_env);
        INIT_IMEMO_TYPE_ID(imemo_cref);
        INIT_IMEMO_TYPE_ID(imemo_svar);
        INIT_IMEMO_TYPE_ID(imemo_throw_data);
        INIT_IMEMO_TYPE_ID(imemo_ifunc);
        INIT_IMEMO_TYPE_ID(imemo_memo);
        INIT_IMEMO_TYPE_ID(imemo_ment);
        INIT_IMEMO_TYPE_ID(imemo_iseq);
        INIT_IMEMO_TYPE_ID(imemo_tmpbuf);
        INIT_IMEMO_TYPE_ID(imemo_ast);
        INIT_IMEMO_TYPE_ID(imemo_parser_strterm);
        INIT_IMEMO_TYPE_ID(imemo_callinfo);
        INIT_IMEMO_TYPE_ID(imemo_callcache);
        INIT_IMEMO_TYPE_ID(imemo_constcache);
#undef INIT_IMEMO_TYPE_ID
    }

    each_object_with_flags(count_imemo_objects_i, (void *)hash);

    return hash;
}

.count_nodes([result_hash]) ⇒ Hash (mod_func)

Counts nodes for each node type.

This method is only for MRI developers interested in performance and memory usage of Ruby programs.

It returns a hash as:

{:NODE_METHOD=>2027, :NODE_FBODY=>1927, :NODE_CFUNC=>1798, ...}

If the optional argument, result_hash, is given, it is overwritten and returned. This is intended to avoid probe effect.

Note: The contents of the returned hash is implementation defined. It may be changed in future.

This method is only expected to work with C Ruby.

[ GitHub ]

  
# File 'ext/objspace/objspace.c', line 374

static VALUE
count_nodes(int argc, VALUE *argv, VALUE os)
{
    size_t nodes[NODE_LAST+1];
    enum node_type i;
    VALUE hash = setup_hash(argc, argv);

    for (i = 0; i <= NODE_LAST; i++) {
        nodes[i] = 0;
    }

    each_object_with_flags(cn_i, &nodes[0]);

    for (i=0; i<NODE_LAST; i++) {
        if (nodes[i] != 0) {
            VALUE node;
            switch (i) {
#define COUNT_NODE(n) case n: node = ID2SYM(rb_intern(#n)); goto set
                COUNT_NODE(NODE_SCOPE);
                COUNT_NODE(NODE_BLOCK);
                COUNT_NODE(NODE_IF);
                COUNT_NODE(NODE_UNLESS);
                COUNT_NODE(NODE_CASE);
                COUNT_NODE(NODE_CASE2);
                COUNT_NODE(NODE_CASE3);
                COUNT_NODE(NODE_WHEN);
                COUNT_NODE(NODE_IN);
                COUNT_NODE(NODE_WHILE);
                COUNT_NODE(NODE_UNTIL);
                COUNT_NODE(NODE_ITER);
                COUNT_NODE(NODE_FOR);
                COUNT_NODE(NODE_FOR_MASGN);
                COUNT_NODE(NODE_BREAK);
                COUNT_NODE(NODE_NEXT);
                COUNT_NODE(NODE_REDO);
                COUNT_NODE(NODE_RETRY);
                COUNT_NODE(NODE_BEGIN);
                COUNT_NODE(NODE_RESCUE);
                COUNT_NODE(NODE_RESBODY);
                COUNT_NODE(NODE_ENSURE);
                COUNT_NODE(NODE_AND);
                COUNT_NODE(NODE_OR);
                COUNT_NODE(NODE_MASGN);
                COUNT_NODE(NODE_LASGN);
                COUNT_NODE(NODE_DASGN);
                COUNT_NODE(NODE_GASGN);
                COUNT_NODE(NODE_IASGN);
                COUNT_NODE(NODE_CDECL);
                COUNT_NODE(NODE_CVASGN);
                COUNT_NODE(NODE_OP_ASGN1);
                COUNT_NODE(NODE_OP_ASGN2);
                COUNT_NODE(NODE_OP_ASGN_AND);
                COUNT_NODE(NODE_OP_ASGN_OR);
                COUNT_NODE(NODE_OP_CDECL);
                COUNT_NODE(NODE_CALL);
                COUNT_NODE(NODE_OPCALL);
                COUNT_NODE(NODE_FCALL);
                COUNT_NODE(NODE_VCALL);
                COUNT_NODE(NODE_QCALL);
                COUNT_NODE(NODE_SUPER);
                COUNT_NODE(NODE_ZSUPER);
                COUNT_NODE(NODE_LIST);
                COUNT_NODE(NODE_ZLIST);
                COUNT_NODE(NODE_VALUES);
                COUNT_NODE(NODE_HASH);
                COUNT_NODE(NODE_RETURN);
                COUNT_NODE(NODE_YIELD);
                COUNT_NODE(NODE_LVAR);
                COUNT_NODE(NODE_DVAR);
                COUNT_NODE(NODE_GVAR);
                COUNT_NODE(NODE_IVAR);
                COUNT_NODE(NODE_CONST);
                COUNT_NODE(NODE_CVAR);
                COUNT_NODE(NODE_NTH_REF);
                COUNT_NODE(NODE_BACK_REF);
                COUNT_NODE(NODE_MATCH);
                COUNT_NODE(NODE_MATCH2);
                COUNT_NODE(NODE_MATCH3);
                COUNT_NODE(NODE_LIT);
                COUNT_NODE(NODE_STR);
                COUNT_NODE(NODE_DSTR);
                COUNT_NODE(NODE_XSTR);
                COUNT_NODE(NODE_DXSTR);
                COUNT_NODE(NODE_EVSTR);
                COUNT_NODE(NODE_DREGX);
                COUNT_NODE(NODE_ONCE);
                COUNT_NODE(NODE_ARGS);
                COUNT_NODE(NODE_ARGS_AUX);
                COUNT_NODE(NODE_OPT_ARG);
                COUNT_NODE(NODE_KW_ARG);
                COUNT_NODE(NODE_POSTARG);
                COUNT_NODE(NODE_ARGSCAT);
                COUNT_NODE(NODE_ARGSPUSH);
                COUNT_NODE(NODE_SPLAT);
                COUNT_NODE(NODE_BLOCK_PASS);
                COUNT_NODE(NODE_DEFN);
                COUNT_NODE(NODE_DEFS);
                COUNT_NODE(NODE_ALIAS);
                COUNT_NODE(NODE_VALIAS);
                COUNT_NODE(NODE_UNDEF);
                COUNT_NODE(NODE_CLASS);
                COUNT_NODE(NODE_MODULE);
                COUNT_NODE(NODE_SCLASS);
                COUNT_NODE(NODE_COLON2);
                COUNT_NODE(NODE_COLON3);
                COUNT_NODE(NODE_DOT2);
                COUNT_NODE(NODE_DOT3);
                COUNT_NODE(NODE_FLIP2);
                COUNT_NODE(NODE_FLIP3);
                COUNT_NODE(NODE_SELF);
                COUNT_NODE(NODE_NIL);
                COUNT_NODE(NODE_TRUE);
                COUNT_NODE(NODE_FALSE);
                COUNT_NODE(NODE_ERRINFO);
                COUNT_NODE(NODE_DEFINED);
                COUNT_NODE(NODE_POSTEXE);
                COUNT_NODE(NODE_DSYM);
                COUNT_NODE(NODE_ATTRASGN);
                COUNT_NODE(NODE_LAMBDA);
                COUNT_NODE(NODE_ARYPTN);
                COUNT_NODE(NODE_FNDPTN);
                COUNT_NODE(NODE_HSHPTN);
                COUNT_NODE(NODE_ERROR);
#undef COUNT_NODE
              case NODE_LAST: break;
            }
            UNREACHABLE;
          set:
            rb_hash_aset(hash, node, SIZET2NUM(nodes[i]));
        }
    }
    return hash;
}

.count_objects_size([result_hash]) ⇒ Hash (mod_func)

Counts objects size (in bytes) for each type.

Note that this information is incomplete. You need to deal with this information as only a HINT. Especially, total size of T_DATA may be wrong.

It returns a hash as:

{:TOTAL=>1461154, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249, ...}

If the optional argument, result_hash, is given, it is overwritten and returned. This is intended to avoid probe effect.

The contents of the returned hash is implementation defined. It may be changed in future.

This method is only expected to work with C Ruby.

[ GitHub ]

  
# File 'ext/objspace/objspace.c', line 249

static VALUE
count_objects_size(int argc, VALUE *argv, VALUE os)
{
    size_t counts[T_MASK+1];
    size_t total = 0;
    enum ruby_value_type i;
    VALUE hash = setup_hash(argc, argv);

    for (i = 0; i <= T_MASK; i++) {
        counts[i] = 0;
    }

    each_object_with_flags(cos_i, &counts[0]);

    for (i = 0; i <= T_MASK; i++) {
        if (counts[i]) {
            VALUE type = type2sym(i);
            total += counts[i];
            rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
        }
    }
    rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
    return hash;
}

.count_symbols([result_hash]) ⇒ Hash (mod_func)

Counts symbols for each Symbol type.

This method is only for MRI developers interested in performance and memory usage of Ruby programs.

If the optional argument, result_hash, is given, it is overwritten and returned. This is intended to avoid probe effect.

Note: The contents of the returned hash is implementation defined. It may be changed in future.

This method is only expected to work with C Ruby.

On this version of MRI, they have 3 types of Symbols (and 1 total counts).

* mortal_dynamic_symbol: GC target symbols (collected by GC)
* immortal_dynamic_symbol: Immortal symbols promoted from dynamic symbols (do not collected by GC)
* immortal_static_symbol: Immortal symbols (do not collected by GC)
* immortal_symbol: total immortal symbols (immortal_dynamic_symbol+immortal_static_symbol)
[ GitHub ]

  
# File 'ext/objspace/objspace.c', line 323

static VALUE
count_symbols(int argc, VALUE *argv, VALUE os)
{
    struct dynamic_symbol_counts dynamic_counts = {0, 0};
    VALUE hash = setup_hash(argc, argv);

    size_t immortal_symbols = rb_sym_immortal_count();
    each_object_with_flags(cs_i, &dynamic_counts);

    rb_hash_aset(hash, ID2SYM(rb_intern("mortal_dynamic_symbol")),   SIZET2NUM(dynamic_counts.mortal));
    rb_hash_aset(hash, ID2SYM(rb_intern("immortal_dynamic_symbol")), SIZET2NUM(dynamic_counts.immortal));
    rb_hash_aset(hash, ID2SYM(rb_intern("immortal_static_symbol")),  SIZET2NUM(immortal_symbols - dynamic_counts.immortal));
    rb_hash_aset(hash, ID2SYM(rb_intern("immortal_symbol")),         SIZET2NUM(immortal_symbols));

    return hash;
}

.count_tdata_objects([result_hash]) ⇒ Hash (mod_func)

Counts objects for each T_DATA type.

This method is only for MRI developers interested in performance and memory usage of Ruby programs.

It returns a hash as:

{RubyVM::InstructionSequence=>504, :parser=>5, :barrier=>6,
 :mutex=>6, Proc=>60, RubyVM::Env=>57, Mutex=>1, Encoding=>99,
 ThreadGroup=>1, Binding=>1, Thread=>1, RubyVM=>1, :iseq=>1,
 Random=>1, ARGF.class=>1, Data=>1, :autoload=>3, Time=>2}
# T_DATA objects existing at startup on r32276.

If the optional argument, result_hash, is given, it is overwritten and returned. This is intended to avoid probe effect.

The contents of the returned hash is implementation specific and may change in the future.

In this version, keys are Class object or Symbol object.

If object is kind of normal (accessible) object, the key is Class object. If object is not a kind of normal (internal) object, the key is symbol name, registered by rb_data_type_struct.

This method is only expected to work with C Ruby.

[ GitHub ]

  
# File 'ext/objspace/objspace.c', line 567

static VALUE
count_tdata_objects(int argc, VALUE *argv, VALUE self)
{
    VALUE hash = setup_hash(argc, argv);
    each_object_with_flags(cto_i, (void *)hash);
    return hash;
}

.dump(obj[, output: :string]) ⇒ "{ ... }" (mod_func) .dump(obj, output: :file) ⇒ 1 .dump(obj, output: :stdout) ⇒ nil

Dump the contents of a ruby object as JSON.

This method is only expected to work with C Ruby. This is an experimental method and is subject to change. In particular, the function signature and output format are not guaranteed to be compatible in future versions of ruby.

[ GitHub ]

  
# File 'ext/objspace/lib/objspace.rb', line 25

def dump(obj, output: :string)
  out = case output
  when :file, nil
    require 'tempfile'
    Tempfile.create(%w(rubyobj .json))
  when :stdout
    STDOUT
  when :string
    +''
  when IO
    output
  else
    raise ArgumentError, "wrong output option: #{output.inspect}"
  end

  ret = _dump(obj, out)
  return nil if output == :stdout
  ret
end

.dump_all([output: :file]) ⇒ 1 (mod_func) .dump_all(output: :stdout) ⇒ nil .dump_all(output: :string) ⇒ "{...}\n{...}\n..." .dump_all(output: File.open('heap.json','w')) ⇒ #<File:heap.json .dump_all(output: :string, since: 42) ⇒ "{...}\n{...}\n..."

Dump the contents of the ruby heap as JSON.

full must be a boolean. If true all heap slots are dumped including the empty ones (T_NONE).

since must be a non-negative integer or nil.

If since is a positive integer, only objects of that generation and newer generations are dumped. The current generation can be accessed using GC.count. Objects that were allocated without object allocation tracing enabled are ignored. See .trace_object_allocations for more information and examples.

If since is omitted or is nil, all objects are dumped.

shapes must be a boolean or a non-negative integer.

If shapes is a positive integer, only shapes newer than the provided shape id are dumped. The current shape_id can be accessed using RubyVM.stat(:next_shape_id).

If shapes is false, no shapes are dumped.

To only dump objects allocated past a certain point you can combine since and shapes:

ObjectSpace.trace_object_allocations
GC.start
gc_generation = GC.count
shape_generation = RubyVM.stat(:next_shape_id)
call_method_to_instrument
ObjectSpace.dump_all(since: gc_generation, shapes: shape_generation)

This method is only expected to work with C Ruby. This is an experimental method and is subject to change. In particular, the function signature and output format are not guaranteed to be compatible in future versions of ruby.

[ GitHub ]

  
# File 'ext/objspace/lib/objspace.rb', line 86

def dump_all(output: :file, full: false, since: nil, shapes: true)
  out = case output
  when :file, nil
    require 'tempfile'
    Tempfile.create(%w(rubyheap .json))
  when :stdout
    STDOUT
  when :string
    +''
  when IO
    output
  else
    raise ArgumentError, "wrong output option: #{output.inspect}"
  end

  shapes = 0 if shapes == true
  ret = _dump_all(out, full, since, shapes)
  return nil if output == :stdout
  ret
end

.dump_shapes([output: :file]) ⇒ 1 (mod_func) .dump_shapes(output: :stdout) ⇒ nil .dump_shapes(output: :string) ⇒ "{...}\n{...}\n..." .dump_shapes(output: File.open('shapes.json','w')) ⇒ #<File:shapes.json .dump_all(output: :string, since: 42) ⇒ "{...}\n{...}\n..."

Dump the contents of the ruby shape tree as JSON.

If shapes is a positive integer, only shapes newer than the provided shape id are dumped. The current shape_id can be accessed using RubyVM.stat(:next_shape_id).

This method is only expected to work with C Ruby. This is an experimental method and is subject to change. In particular, the function signature and output format are not guaranteed to be compatible in future versions of ruby.

[ GitHub ]

  
# File 'ext/objspace/lib/objspace.rb', line 123

def dump_shapes(output: :file, since: 0)
  out = case output
  when :file, nil
    require 'tempfile'
    Tempfile.create(%w(rubyshapes .json))
  when :stdout
    STDOUT
  when :string
    +''
  when IO
    output
  else
    raise ArgumentError, "wrong output option: #{output.inspect}"
  end

  ret = _dump_shapes(out, since)
  return nil if output == :stdout
  ret
end

.internal_class_of(obj) ⇒ Class, Module (mod_func)

MRI specific feature

Return internal class of obj.

obj can be an instance of ::ObjectSpace::InternalObjectWrapper.

Note that you should not use this method in your application.

[ GitHub ]

  
# File 'ext/objspace/objspace.c', line 896

static VALUE
objspace_internal_class_of(VALUE self, VALUE obj)
{
    VALUE klass;

    if (rb_typeddata_is_kind_of(obj, &iow_data_type)) {
        obj = (VALUE)DATA_PTR(obj);
    }

    if (RB_TYPE_P(obj, T_IMEMO)) {
        return Qnil;
    }
    else {
        klass = CLASS_OF(obj);
        return wrap_klass_iow(klass);
    }
}

.internal_super_of(cls) ⇒ Class, Module (mod_func)

MRI specific feature

Return internal super class of cls (Class or Module).

obj can be an instance of ::ObjectSpace::InternalObjectWrapper.

Note that you should not use this method in your application.

[ GitHub ]

  
# File 'ext/objspace/objspace.c', line 923

static VALUE
objspace_internal_super_of(VALUE self, VALUE obj)
{
    VALUE super;

    if (rb_typeddata_is_kind_of(obj, &iow_data_type)) {
        obj = (VALUE)DATA_PTR(obj);
    }

    switch (OBJ_BUILTIN_TYPE(obj)) {
      case T_MODULE:
      case T_CLASS:
      case T_ICLASS:
        super = RCLASS_SUPER(obj);
        break;
      default:
        rb_raise(rb_eArgError, "class or module is expected");
    }

    return wrap_klass_iow(super);
}

.memsize_of(obj) ⇒ Integer (mod_func)

Return consuming memory size of obj in bytes.

Note that the return size is incomplete. You need to deal with this information as only a HINT. Especially, the size of T_DATA may not be correct.

This method is only expected to work with C Ruby.

From Ruby 2.2, memsize_of(obj) returns a memory size includes sizeof(RVALUE).

[ GitHub ]

  
# File 'ext/objspace/objspace.c', line 48

static VALUE
memsize_of_m(VALUE self, VALUE obj)
{
    return SIZET2NUM(rb_obj_memsize_of(obj));
}

.memsize_of_all([klass]) ⇒ Integer (mod_func)

Return consuming memory size of all living objects in bytes.

If klass (should be Class object) is given, return the total memory size of instances of the given class.

Note that the returned size is incomplete. You need to deal with this information as only a HINT. Especially, the size of T_DATA may not be correct.

Note that this method does NOT return total malloc’ed memory size.

This method can be defined by the following Ruby code:

def memsize_of_all klass = false
  total = 0
  ObjectSpace.each_object{|e|
    total += ObjectSpace.memsize_of(e) if klass == false || e.kind_of?(klass)
  }
  total
end

This method is only expected to work with C Ruby.

[ GitHub ]

  
# File 'ext/objspace/objspace.c', line 137

static VALUE
memsize_of_all_m(int argc, VALUE *argv, VALUE self)
{
    struct total_data data = {0, 0};

    if (argc > 0) {
        rb_scan_args(argc, argv, "01", &data.klass);
    }

    each_object_with_flags(total_i, &data);
    return SIZET2NUM(data.total);
}

.reachable_objects_from(obj) ⇒ Array? (mod_func)

MRI specific feature

Return all reachable objects from ‘obj’.

This method returns all reachable objects from ‘obj’.

If ‘obj’ has two or more references to the same object ‘x’, then returned array only includes one ‘x’ object.

If ‘obj’ is a non-markable (non-heap management) object such as true, false, nil, symbols and Fixnums (and Flonum) then it simply returns nil.

If ‘obj’ has references to an internal object, then it returns instances of ::ObjectSpace::InternalObjectWrapper class. This object contains a reference to an internal object and you can check the type of internal object with ‘type’ method.

If ‘obj’ is instance of ::ObjectSpace::InternalObjectWrapper class, then this method returns all reachable object from an internal object, which is pointed by ‘obj’.

With this method, you can find memory leaks.

This method is only expected to work except with C Ruby.

Example:

ObjectSpace.reachable_objects_from(['a', 'b', 'c'])
#=> [Array, 'a', 'b', 'c']

ObjectSpace.reachable_objects_from(['a', 'a', 'a'])
#=> [Array, 'a', 'a', 'a'] # all 'a' strings have different object id

ObjectSpace.reachable_objects_from([v = 'a', v, v])
#=> [Array, 'a']

ObjectSpace.reachable_objects_from(1)
#=> nil # 1 is not markable (heap managed) object
[ GitHub ]

  
# File 'ext/objspace/objspace.c', line 784

static VALUE
reachable_objects_from(VALUE self, VALUE obj)
{
    if (rb_objspace_markable_object_p(obj)) {
        struct rof_data data;

        if (rb_typeddata_is_kind_of(obj, &iow_data_type)) {
            obj = (VALUE)DATA_PTR(obj);
        }

        data.refs = rb_obj_hide(rb_ident_hash_new());
        data.values = rb_ary_new();

        rb_objspace_reachable_objects_from(obj, reachable_object_from_i, &data);

        return data.values;
    }
    else {
        return Qnil;
    }
}

.reachable_objects_from_rootHash (mod_func)

MRI specific feature

Return all reachable objects from root.

[ GitHub ]

  
# File 'ext/objspace/objspace.c', line 859

static VALUE
reachable_objects_from_root(VALUE self)
{
    struct rofr_data data;
    VALUE hash = data.categories = rb_ident_hash_new();
    data.last_category = 0;

    rb_objspace_reachable_objects_from_root(reachable_object_from_root_i, &data);
    rb_hash_foreach(hash, collect_values_of_values, hash);

    return hash;
}

.trace_object_allocations (mod_func)

Starts tracing object allocations from the ObjectSpace extension module.

For example:

require 'objspace'

class C
  include ObjectSpace

  def foo
    trace_object_allocations do
      obj = Object.new
      p "#{allocation_sourcefile(obj)}:#{allocation_sourceline(obj)}"
    end
  end
end

C.new.foo #=> "objtrace.rb:8"

This example has included the ObjectSpace module to make it easier to read, but you can also use the .trace_object_allocations notation (recommended).

Note that this feature introduces a huge performance decrease and huge memory consumption.

[ GitHub ]

  
# File 'ext/objspace/object_tracing.c', line 362

static VALUE
trace_object_allocations(VALUE self)
{
    trace_object_allocations_start(self);
    return rb_ensure(rb_yield, Qnil, trace_object_allocations_stop, self);
}

.trace_object_allocations_clear (mod_func)

Clear recorded tracing information.

[ GitHub ]

  
# File 'ext/objspace/object_tracing.c', line 318

static VALUE
trace_object_allocations_clear(VALUE self)
{
    struct traceobj_arg *arg = get_traceobj_arg();

    /* clear tables */
    st_foreach(arg->object_table, free_values_i, 0);
    st_clear(arg->object_table);
    st_foreach(arg->str_table, free_keys_i, 0);
    st_clear(arg->str_table);

    /* do not touch TracePoints */

    return Qnil;
}

.trace_object_allocations_debug_start (mod_func)

[ GitHub ]

  
# File 'ext/objspace/object_tracing.c', line 402

static VALUE
trace_object_allocations_debug_start(VALUE self)
{
    tmp_keep_remains = 1;
    if (object_allocations_reporter_registered == 0) {
        object_allocations_reporter_registered = 1;
        rb_bug_reporter_add(object_allocations_reporter, 0);
    }

    return trace_object_allocations_start(self);
}

.trace_object_allocations_start (mod_func)

Starts tracing object allocations.

[ GitHub ]

  
# File 'ext/objspace/object_tracing.c', line 262

static VALUE
trace_object_allocations_start(VALUE self)
{
    struct traceobj_arg *arg = get_traceobj_arg();

    if (arg->running++ > 0) {
        /* do nothing */
    }
    else {
        if (arg->newobj_trace == 0) {
            arg->newobj_trace = rb_tracepoint_new(0, RUBY_INTERNAL_EVENT_NEWOBJ, newobj_i, arg);
            arg->freeobj_trace = rb_tracepoint_new(0, RUBY_INTERNAL_EVENT_FREEOBJ, freeobj_i, arg);
        }
        rb_tracepoint_enable(arg->newobj_trace);
        rb_tracepoint_enable(arg->freeobj_trace);
    }

    return Qnil;
}

.trace_object_allocations_stop (mod_func)

Stop tracing object allocations.

Note that if .trace_object_allocations_start is called n-times, then tracing will stop after calling .trace_object_allocations_stop n-times.

[ GitHub ]

  
# File 'ext/objspace/object_tracing.c', line 291

static VALUE
trace_object_allocations_stop(VALUE self)
{
    struct traceobj_arg *arg = get_traceobj_arg();

    if (arg->running > 0) {
        arg->running--;
    }

    if (arg->running == 0) {
        if (arg->newobj_trace != 0) {
            rb_tracepoint_disable(arg->newobj_trace);
        }
        if (arg->freeobj_trace != 0) {
            rb_tracepoint_disable(arg->freeobj_trace);
        }
    }

    return Qnil;
}