Uname: Linux web3.us.cloudlogin.co 5.10.226-xeon-hst #2 SMP Fri Sep 13 12:28:44 UTC 2024 x86_64
Software: Apache
PHP version: 8.1.31 [ PHP INFO ] PHP os: Linux
Server Ip: 162.210.96.117
Your Ip: 3.19.68.41
User: edustar (269686) | Group: tty (888)
Safe Mode: OFF
Disable Function:
NONE

name : falcon.patch
diff --git a/Changelog.backport_gc b/Changelog.backport_gc
new file mode 100644
index 0000000..b617fc8
--- /dev/null
+++ b/Changelog.backport_gc
@@ -0,0 +1,128 @@
+Tue Jan 17 12:32:46 2012  Nobuyoshi Nakada  <nobu@ruby-lang.org>
+
+	* gc.c (aligned_malloc, aligned_free): covered missing defined
+	  operators and fixes for cygwin.
+
+Wed Jan 11 22:52:51 2012  CHIKANAGA Tomoyuki  <nagachika00@gmail.com>
+
+	* gc.c (ruby_mimmalloc): don't set allocated size to header.
+	  ruby_mimmalloc() doesn't increment allocated_size/allocations and
+	  decrement them in ruby_xfree() cause inconsistency.
+
+	* gc.c (ruby_xfree): don't decrement allocated_size/allocations if
+	  allocated size record is 0.
+
+Tue Jan 10 15:13:58 2012  NARUSE, Yui  <naruse@ruby-lang.org>
+
+	* ext/readline/readline.c (readline_attempted_completion_function):
+	  use rb_memerror().
+
+Tue Jan 10 12:44:11 2012  NARUSE, Yui  <naruse@ruby-lang.org>
+
+	* gc.c (ruby_mimmalloc): defined for objects need not rb_objspace,
+	  but should return pointer suitable for ruby_xfree;
+	  main vm and main thread.
+	  patched by Sokolov Yura. https://github.com/ruby/ruby/pull/79
+
+	* internal.h: ditto.
+
+	* vm.c (Init_BareVM): use ruby_mimmalloc.
+
+	* ext/dl/cfunc.c: #include <ruby/util.h>.
+
+	* ext/syslog/syslog.c: use xfree because it is allocated by
+	  ruby_strdup.
+
+Tue Jan 10 12:13:56 2012  Kazuhiro NISHIYAMA  <zn@mbf.nifty.com>
+
+	* ext/readline/readline.c (readline_attempted_completion_function):
+	  fix compile error.
+
+Tue Jan 10 10:41:11 2012  Nobuyoshi Nakada  <nobu@ruby-lang.org>
+
+	* ext/readline/readline.c (readline_attempted_completion_function):
+	  empty completion result does not mean memory error.
+
+Mon Jan  9 23:37:43 2012  CHIKANAGA Tomoyuki  <nagachika00@gmail.com>
+
+	* ext/readline/readline.c (readline_attempted_completion_function):
+	  fix typos.
+
+Mon Jan  9 20:55:34 2012  Narihiro Nakamura  <authornari@gmail.com>
+
+	* gc.c : don't embed struct heaps_slot to a heap block because it
+	  can causes copy-on-write of memory page on heap block when its
+	  free_next is rewirted.
+
+Mon Jan  9 14:42:41 2012  Narihiro Nakamura  <authornari@gmail.com>
+
+	* gc.c: free_slots is changed Singly linked list. clear
+	  free_slots before sweep.
+
+Mon Jan  9 04:24:59 2012  NARUSE, Yui  <naruse@ruby-lang.org>
+
+	* gc.c (rb_objspace_free): global_List is allocated with xmalloc.
+	  patched by Sokolov Yura.  https://github.com/ruby/ruby/pull/78
+
+	* dln_find.c: remove useless replacement of free.
+
+	* ext/readline/readline.c (readline_attempted_completion_function):
+	  strings for readline must allocated with malloc.
+
+	* process.c (run_exec_dup2): use free; see also r20950.
+
+	* re.c (onig_new_with_source): use malloc for oniguruma.
+
+	* vm.c (ruby_vm_destruct): use free for VMs.
+
+	* vm.c (thread_free): use free for threads.
+
+Mon Jan  9 04:24:59 2012  NARUSE, Yui  <naruse@ruby-lang.org>
+
+	* dln_find.c: remove useless replacement of free.
+
+	* ext/readline/readline.c (filename_completion_proc_call):
+	  matches should use xfree.
+
+	* ext/readline/readline.c (username_completion_proc_call): ditto.
+
+Sun Jan  8 20:31:45 2012  Narihiro Nakamura  <narihiro@netlab.jp>
+
+	* gc.c : consider header bytes which are used by malloc.
+
+Sun Jan  8 11:54:43 2012  Narihiro Nakamura  <authornari@gmail.com>
+
+	* gc.c (aligned_free): support MinGW. Patch by Hiroshi Shirosaki.
+
+Sun Jan  8 11:43:05 2012  Narihiro Nakamura  <authornari@gmail.com>
+
+	* gc.c (slot_sweep): add a assertion instead of a debug print.
+
+Sun Jan  8 00:46:34 2012  KOSAKI Motohiro  <kosaki.motohiro@gmail.com>
+
+	* gc.c: get rid of implicit narrowing conversion.
+
+Sun Jan  8 00:10:10 2012  NARUSE, Yui  <naruse@ruby-lang.org>
+
+	* configure.in: check posix_memalign(3) and menalign(3).
+
+	* gc.c (aligned_malloc): use configure's result instead of
+	  _POSIX_C_SOURCE and _XOPEN_SOURCE because they can't be used
+	  to check availability at least on FreeBSD.
+
+Sat Jan  7 22:25:50 2012  Narihiro Nakamura  <authornari@gmail.com>
+
+	* gc.c: use Bitmap Marking algorithm to avoid copy-on-write of
+	  memory pages. See [ruby-dev:45085] [Feature #5839]
+	  [ruby-core:41916].
+
+	* include/ruby/ruby.h : FL_MARK rename to FL_RESERVED1.
+
+	* node.h : ditto.
+
+	* debug.c : ditto.
+
+	* object.c (rb_obj_clone): FL_MARK move to a bitmap.
+
+	* class.c (rb_singleton_class_clone): ditto.
+
diff --git a/class.c b/class.c
index df19812..56a6f6f 100644
--- a/class.c
+++ b/class.c
@@ -186,7 +186,7 @@ rb_mod_init_copy(VALUE clone, VALUE orig)
 	    rb_free_const_table(RCLASS_CONST_TBL(clone));
 	}
 	RCLASS_CONST_TBL(clone) = st_init_numtable();
-	st_foreach(RCLASS_CONST_TBL(orig), clone_const_i, (st_data_t)RCLASS_CONST_TBL(clone));
+	st_foreach_nocheck(RCLASS_CONST_TBL(orig), clone_const_i, (st_data_t)RCLASS_CONST_TBL(clone));
     }
     if (RCLASS_M_TBL(orig)) {
 	struct clone_method_data data;
@@ -196,7 +196,7 @@ rb_mod_init_copy(VALUE clone, VALUE orig)
 	}
 	data.tbl = RCLASS_M_TBL(clone) = st_init_numtable();
 	data.klass = clone;
-	st_foreach(RCLASS_M_TBL(orig), clone_method,
+	st_foreach_nocheck(RCLASS_M_TBL(orig), clone_method,
 		   (st_data_t)&data);
     }
 
@@ -229,7 +229,7 @@ rb_singleton_class_clone(VALUE obj)
     else {
 	struct clone_method_data data;
 	/* copy singleton(unnamed) class */
-	VALUE clone = class_alloc((RBASIC(klass)->flags & ~(FL_MARK)), 0);
+	VALUE clone = class_alloc(RBASIC(klass)->flags, 0);
 
 	if (BUILTIN_TYPE(obj) == T_CLASS) {
 	    RBASIC(clone)->klass = (VALUE)clone;
@@ -244,12 +244,12 @@ rb_singleton_class_clone(VALUE obj)
 	}
 	if (RCLASS_CONST_TBL(klass)) {
 	    RCLASS_CONST_TBL(clone) = st_init_numtable();
-	    st_foreach(RCLASS_CONST_TBL(klass), clone_const_i, (st_data_t)RCLASS_CONST_TBL(clone));
+	    st_foreach_nocheck(RCLASS_CONST_TBL(klass), clone_const_i, (st_data_t)RCLASS_CONST_TBL(clone));
 	}
 	RCLASS_M_TBL(clone) = st_init_numtable();
 	data.tbl = RCLASS_M_TBL(clone);
 	data.klass = (VALUE)clone;
-	st_foreach(RCLASS_M_TBL(klass), clone_method,
+	st_foreach_nocheck(RCLASS_M_TBL(klass), clone_method,
 		   (st_data_t)&data);
 	rb_singleton_class_attached(RBASIC(clone)->klass, (VALUE)clone);
 	FL_SET(clone, FL_SINGLETON);
@@ -891,13 +891,13 @@ class_instance_method_list(int argc, VALUE *argv, VALUE mod, int obj, int (*func
 
     list = st_init_numtable();
     for (; mod; mod = RCLASS_SUPER(mod)) {
-	st_foreach(RCLASS_M_TBL(mod), method_entry_i, (st_data_t)list);
+	st_foreach_nocheck(RCLASS_M_TBL(mod), method_entry_i, (st_data_t)list);
 	if (BUILTIN_TYPE(mod) == T_ICLASS) continue;
 	if (obj && FL_TEST(mod, FL_SINGLETON)) continue;
 	if (!recur) break;
     }
     ary = rb_ary_new();
-    st_foreach(list, func, ary);
+    st_foreach_nocheck(list, func, ary);
     st_free_table(list);
 
     return ary;
@@ -1123,17 +1123,17 @@ rb_obj_singleton_methods(int argc, VALUE *argv, VALUE obj)
     klass = CLASS_OF(obj);
     list = st_init_numtable();
     if (klass && FL_TEST(klass, FL_SINGLETON)) {
-	st_foreach(RCLASS_M_TBL(klass), method_entry_i, (st_data_t)list);
+	st_foreach_nocheck(RCLASS_M_TBL(klass), method_entry_i, (st_data_t)list);
 	klass = RCLASS_SUPER(klass);
     }
     if (RTEST(recur)) {
 	while (klass && (FL_TEST(klass, FL_SINGLETON) || TYPE(klass) == T_ICLASS)) {
-	    st_foreach(RCLASS_M_TBL(klass), method_entry_i, (st_data_t)list);
+	    st_foreach_nocheck(RCLASS_M_TBL(klass), method_entry_i, (st_data_t)list);
 	    klass = RCLASS_SUPER(klass);
 	}
     }
     ary = rb_ary_new();
-    st_foreach(list, ins_methods_i, ary);
+    st_foreach_nocheck(list, ins_methods_i, ary);
     st_free_table(list);
 
     return ary;
diff --git a/common.mk b/common.mk
index eb89a2b..59cdfe4 100644
--- a/common.mk
+++ b/common.mk
@@ -630,7 +630,8 @@ file.$(OBJEXT): {$(VPATH)}file.c $(RUBY_H_INCLUDES) {$(VPATH)}io.h \
 gc.$(OBJEXT): {$(VPATH)}gc.c $(RUBY_H_INCLUDES) {$(VPATH)}re.h \
   {$(VPATH)}regex.h $(ENCODING_H_INCLUDES) $(VM_CORE_H_INCLUDES) \
   {$(VPATH)}gc.h {$(VPATH)}io.h {$(VPATH)}eval_intern.h {$(VPATH)}util.h \
-  {$(VPATH)}debug.h {$(VPATH)}internal.h {$(VPATH)}constant.h
+  {$(VPATH)}debug.h {$(VPATH)}internal.h {$(VPATH)}constant.h \
+  {$(VPATH)}pool_alloc.inc.h {$(VPATH)}pool_alloc.h
 hash.$(OBJEXT): {$(VPATH)}hash.c $(RUBY_H_INCLUDES) {$(VPATH)}util.h \
   $(ENCODING_H_INCLUDES)
 inits.$(OBJEXT): {$(VPATH)}inits.c $(RUBY_H_INCLUDES) \
@@ -693,7 +694,7 @@ signal.$(OBJEXT): {$(VPATH)}signal.c $(RUBY_H_INCLUDES) \
   $(VM_CORE_H_INCLUDES) {$(VPATH)}debug.h
 sprintf.$(OBJEXT): {$(VPATH)}sprintf.c $(RUBY_H_INCLUDES) {$(VPATH)}re.h \
   {$(VPATH)}regex.h {$(VPATH)}vsnprintf.c $(ENCODING_H_INCLUDES)
-st.$(OBJEXT): {$(VPATH)}st.c $(RUBY_H_INCLUDES)
+st.$(OBJEXT): {$(VPATH)}st.c $(RUBY_H_INCLUDES) {$(VPATH)}pool_alloc.h
 strftime.$(OBJEXT): {$(VPATH)}strftime.c $(RUBY_H_INCLUDES) \
   {$(VPATH)}timev.h
 string.$(OBJEXT): {$(VPATH)}string.c $(RUBY_H_INCLUDES) {$(VPATH)}re.h \
diff --git a/compile.c b/compile.c
index 61e0f32..4f05b26 100644
--- a/compile.c
+++ b/compile.c
@@ -452,7 +452,7 @@ validate_label(st_data_t name, st_data_t label, st_data_t arg)
 static void
 validate_labels(rb_iseq_t *iseq, st_table *labels_table)
 {
-    st_foreach(labels_table, validate_label, (st_data_t)iseq);
+    st_foreach_nocheck(labels_table, validate_label, (st_data_t)iseq);
     if (!NIL_P(iseq->compile_data->err_info)) {
 	rb_exc_raise(iseq->compile_data->err_info);
     }
diff --git a/configure.in b/configure.in
index d645aa7..e1f1760 100644
--- a/configure.in
+++ b/configure.in
@@ -1313,6 +1313,30 @@ main() {
 CFLAGS="$save_CFLAGS"])
 AC_DEFINE_UNQUOTED(GC_MARK_STACKFRAME_WORD, $rb_cv_gc_mark_stackframe_word)
 
+AS_CASE(["$target_os"],
+[openbsd*|darwin[15-9].*|darwin10.[012345]*], [
+  AC_CACHE_CHECK(for heap align log on openbsd/macos, rb_cv_page_size_log,
+    [rb_cv_page_size_log=no
+     for page_log in 12 13; do
+       AC_TRY_RUN([
+#include <math.h>
+#include <unistd.h>
+
+int
+main() {
+  if ((int)log2((double)sysconf(_SC_PAGESIZE)) != $page_log) return 1;
+  return 0;
+}
+       ],
+       rb_cv_page_size_log="$page_log"; break)
+     done])
+  if test $rb_cv_page_size_log != no; then
+    AC_DEFINE_UNQUOTED(USE_PAGESIZE_LOG, $rb_cv_page_size_log)
+  else
+    AC_DEFINE_UNQUOTED(USE_PAGESIZE_LOG, 12)
+  fi
+])
+
 
 dnl Checks for library functions.
 AC_TYPE_GETGROUPS
@@ -1413,7 +1437,8 @@ AC_CHECK_FUNCS(fmod killpg wait4 waitpid fork spawnv syscall __syscall chroot ge
 	      setsid telldir seekdir fchmod cosh sinh tanh log2 round\
 	      setuid setgid daemon select_large_fdset setenv unsetenv\
               mktime timegm gmtime_r clock_gettime gettimeofday poll ppoll\
-              pread sendfile shutdown sigaltstack dl_iterate_phdr)
+              pread sendfile shutdown sigaltstack dl_iterate_phdr \
+              posix_memalign memalign valloc)
 
 AC_CACHE_CHECK(for unsetenv returns a value, rb_cv_unsetenv_return_value,
   [AC_TRY_COMPILE([
diff --git a/debug.c b/debug.c
index dcc710b..b77be0e 100644
--- a/debug.c
+++ b/debug.c
@@ -32,8 +32,8 @@ const union {
         RUBY_ENC_CODERANGE_7BIT    = ENC_CODERANGE_7BIT,
         RUBY_ENC_CODERANGE_VALID   = ENC_CODERANGE_VALID,
         RUBY_ENC_CODERANGE_BROKEN  = ENC_CODERANGE_BROKEN,
-        RUBY_FL_MARK        = FL_MARK,
-        RUBY_FL_RESERVED    = FL_RESERVED,
+        RUBY_FL_RESERVED1   = FL_RESERVED1,
+        RUBY_FL_RESERVED2   = FL_RESERVED2,
         RUBY_FL_FINALIZE    = FL_FINALIZE,
         RUBY_FL_TAINT       = FL_TAINT,
         RUBY_FL_UNTRUSTED   = FL_UNTRUSTED,
diff --git a/dln.c b/dln.c
index 961592d..aadfdba 100644
--- a/dln.c
+++ b/dln.c
@@ -466,7 +466,7 @@ static void
 dln_print_undef(void)
 {
     fprintf(stderr, " Undefined symbols:\n");
-    st_foreach(undef_tbl, undef_print, NULL);
+    st_foreach_nocheck(undef_tbl, undef_print, NULL);
 }
 
 static void
@@ -660,7 +660,7 @@ load_1(int fd, long disp, const char *need_init)
 
 		data.name0 = sym->n_un.n_name;
 		data.name1 = sym[1].n_un.n_name;
-		st_foreach(reloc_tbl, reloc_repl, &data);
+		st_foreach_nocheck(reloc_tbl, reloc_repl, &data);
 
 		st_insert(undef_tbl, strdup(sym[1].n_un.n_name), NULL);
 		if (st_delete(undef_tbl, (st_data_t*)&key, NULL)) {
@@ -982,7 +982,7 @@ load_lib(const char *lib)
 	}
 	for (;;) {
 	    target_offset = -1;
-	    st_foreach(undef_tbl, search_undef, lib_tbl);
+	    st_foreach_nocheck(undef_tbl, search_undef, lib_tbl);
 	    if (target_offset == -1) break;
 	    if (load_1(fd, target_offset, 0) == -1) {
 		st_free_table(lib_tbl);
diff --git a/dln_find.c b/dln_find.c
index 7ce3a95..d9166fa 100644
--- a/dln_find.c
+++ b/dln_find.c
@@ -45,14 +45,6 @@ char *dln_argv0;
 # include <strings.h>
 #endif
 
-#ifndef xmalloc
-void *xmalloc();
-void *xcalloc();
-void *xrealloc();
-#endif
-
-#define free(x) xfree(x)
-
 #include <stdio.h>
 #if defined(_WIN32)
 #include "missing/file.h"
diff --git a/ext/dl/cfunc.c b/ext/dl/cfunc.c
index 66aebf2..70cf6c4 100644
--- a/ext/dl/cfunc.c
+++ b/ext/dl/cfunc.c
@@ -2,7 +2,8 @@
  * $Id$
  */
 
-#include <ruby.h>
+#include <ruby/ruby.h>
+#include <ruby/util.h>
 #include <errno.h>
 #include "dl.h"
 
diff --git a/ext/readline/readline.c b/ext/readline/readline.c
index 9066004..a1cad0e 100644
--- a/ext/readline/readline.c
+++ b/ext/readline/readline.c
@@ -713,7 +713,8 @@ readline_attempted_completion_function(const char *text, int start, int end)
 
 	    low = i1;
 	}
-	result[0] = ALLOC_N(char, low + 1);
+	result[0] = (char*)malloc(low + 1);
+	if (result[0]  == NULL) rb_memerror();
 	strncpy(result[0], result[1], low);
 	result[0][low] = '\0';
     }
diff --git a/ext/syck/emitter.c b/ext/syck/emitter.c
index af0d789..d4bc7cc 100644
--- a/ext/syck/emitter.c
+++ b/ext/syck/emitter.c
@@ -152,7 +152,7 @@ syck_emitter_st_free( SyckEmitter *e )
      */
     if ( e->anchors != NULL )
     {
-        st_foreach( e->anchors, syck_st_free_anchors, 0 );
+        st_foreach_nocheck( e->anchors, syck_st_free_anchors, 0 );
         st_free_table( e->anchors );
         e->anchors = NULL;
     }
diff --git a/ext/syck/rubyext.c b/ext/syck/rubyext.c
index 2ab2e49..e10e72b 100644
--- a/ext/syck/rubyext.c
+++ b/ext/syck/rubyext.c
@@ -754,11 +754,11 @@ syck_mark_parser(SyckParser *parser)
 
     if ( parser->anchors != NULL )
     {
-        st_foreach( parser->anchors, syck_st_mark_nodes, 0 );
+        st_foreach_nocheck( parser->anchors, syck_st_mark_nodes, 0 );
     }
     if ( parser->bad_anchors != NULL )
     {
-        st_foreach( parser->bad_anchors, syck_st_mark_nodes, 0 );
+        st_foreach_nocheck( parser->bad_anchors, syck_st_mark_nodes, 0 );
     }
 }
 
diff --git a/ext/syck/syck.c b/ext/syck/syck.c
index 94e3992..36ed8e7 100644
--- a/ext/syck/syck.c
+++ b/ext/syck/syck.c
@@ -223,7 +223,7 @@ syck_st_free( SyckParser *p )
 
     if ( p->bad_anchors != NULL )
     {
-        st_foreach( p->bad_anchors, syck_st_free_nodes, 0 );
+        st_foreach_nocheck( p->bad_anchors, syck_st_free_nodes, 0 );
         st_free_table( p->bad_anchors );
         p->bad_anchors = NULL;
     }
@@ -253,7 +253,7 @@ syck_free_parser( SyckParser *p )
      */
     if ( p->syms != NULL )
     {
-        st_foreach( p->syms, syck_st_free_syms, 0 );
+        st_foreach_nocheck( p->syms, syck_st_free_syms, 0 );
         st_free_table( p->syms );
         p->syms = NULL;
     }
diff --git a/ext/syslog/syslog.c b/ext/syslog/syslog.c
index f7d622e..912ba6a 100644
--- a/ext/syslog/syslog.c
+++ b/ext/syslog/syslog.c
@@ -49,7 +49,7 @@ static VALUE mSyslog_close(VALUE self)
 
     closelog();
 
-    free((void *)syslog_ident);
+    xfree((void *)syslog_ident);
     syslog_ident = NULL;
     syslog_options = syslog_facility = syslog_mask = -1;
     syslog_opened = 0;
diff --git a/gc.c b/gc.c
index c53bfd9..01c05f1 100644
--- a/gc.c
+++ b/gc.c
@@ -20,10 +20,12 @@
 #include "vm_core.h"
 #include "internal.h"
 #include "gc.h"
+#include "pool_alloc.h"
 #include "constant.h"
 #include <stdio.h>
 #include <setjmp.h>
 #include <sys/types.h>
+#include <assert.h>
 
 #ifdef HAVE_SYS_TIME_H
 #include <sys/time.h>
@@ -35,7 +37,12 @@
 
 #if defined _WIN32 || defined __CYGWIN__
 #include <windows.h>
+#elif defined(HAVE_POSIX_MEMALIGN)
+#elif defined(HAVE_MEMALIGN) || defined(HAVE_VALLOC)
+#include <malloc.h>
 #endif
+static void *aligned_malloc(size_t, size_t);
+static void aligned_free(void *);
 
 #ifdef HAVE_VALGRIND_MEMCHECK_H
 # include <valgrind/memcheck.h>
@@ -84,10 +91,12 @@ typedef struct {
     unsigned int initial_malloc_limit;
     unsigned int initial_heap_min_slots;
     unsigned int initial_free_min;
+#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
     int gc_stress;
+#endif
 } ruby_gc_params_t;
 
-ruby_gc_params_t initial_params = {
+static ruby_gc_params_t initial_params = {
     GC_MALLOC_LIMIT,
     HEAP_MIN_SLOTS,
     FREE_MIN,
@@ -103,7 +112,10 @@ ruby_gc_params_t initial_params = {
 int ruby_gc_debug_indent = 0;
 
 /* for GC profile */
+#ifndef GC_PROFILE_MORE_DETAIL
 #define GC_PROFILE_MORE_DETAIL 0
+#endif
+
 typedef struct gc_profile_record {
     double gc_time;
     double gc_mark_time;
@@ -301,17 +313,20 @@ typedef struct RVALUE {
 #endif
 
 struct heaps_slot {
-    void *membase;
-    RVALUE *slot;
-    size_t limit;
+    struct heaps_header *membase;
+    RVALUE *freelist;
     struct heaps_slot *next;
     struct heaps_slot *prev;
+    struct heaps_slot *free_next;
+    uintptr_t bits[1];
 };
 
-struct sorted_heaps_slot {
+struct heaps_header {
+    struct heaps_slot *base;
+    uintptr_t *bits;
     RVALUE *start;
     RVALUE *end;
-    struct heaps_slot *slot;
+    size_t limit;
 };
 
 struct gc_list {
@@ -319,7 +334,27 @@ struct gc_list {
     struct gc_list *next;
 };
 
+#ifndef CALC_EXACT_MALLOC_SIZE
 #define CALC_EXACT_MALLOC_SIZE 0
+#endif
+
+#ifdef POOL_ALLOC_API
+/* POOL ALLOC API */
+#define POOL_ALLOC_PART 1
+#include "pool_alloc.inc.h"
+#undef POOL_ALLOC_PART
+
+typedef struct pool_layout_t pool_layout_t;
+struct pool_layout_t {
+    pool_header
+      p6,  /* st_table && st_table_entry */
+      p11;  /* st_table.bins init size */
+} pool_layout = {
+    INIT_POOL(void*[6]),
+    INIT_POOL(void*[11])
+};
+static void pool_finalize_header(pool_header *header);
+#endif
 
 typedef struct rb_objspace {
     struct {
@@ -330,16 +365,20 @@ typedef struct rb_objspace {
 	size_t allocations;
 #endif
     } malloc_params;
+#ifdef POOL_ALLOC_API
+    pool_layout_t *pool_headers;
+#endif
     struct {
 	size_t increment;
 	struct heaps_slot *ptr;
 	struct heaps_slot *sweep_slots;
-	struct sorted_heaps_slot *sorted;
+	struct heaps_slot *free_slots;
+	struct heaps_header **sorted;
 	size_t length;
 	size_t used;
-	RVALUE *freelist;
+        struct heaps_slot *reserve_slots;
 	RVALUE *range[2];
-	RVALUE *freed;
+	struct heaps_header *freed;
 	size_t live_num;
 	size_t free_num;
 	size_t free_min;
@@ -350,6 +389,7 @@ typedef struct rb_objspace {
 	int dont_gc;
 	int dont_lazy_sweep;
 	int during_gc;
+	rb_atomic_t finalizing;
     } flags;
     struct {
 	st_table *table;
@@ -377,7 +417,11 @@ typedef struct rb_objspace {
 #define ruby_initial_gc_stress	initial_params.gc_stress
 int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
 #else
+#  ifdef POOL_ALLOC_API
+static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}, &pool_layout, {HEAP_MIN_SLOTS}};
+#  else
 static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}, {HEAP_MIN_SLOTS}};
+#  endif
 int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
 #endif
 #define malloc_limit		objspace->malloc_params.limit
@@ -385,13 +429,13 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
 #define heaps			objspace->heap.ptr
 #define heaps_length		objspace->heap.length
 #define heaps_used		objspace->heap.used
-#define freelist		objspace->heap.freelist
 #define lomem			objspace->heap.range[0]
 #define himem			objspace->heap.range[1]
 #define heaps_inc		objspace->heap.increment
 #define heaps_freed		objspace->heap.freed
 #define dont_gc 		objspace->flags.dont_gc
 #define during_gc		objspace->flags.during_gc
+#define finalizing		objspace->flags.finalizing
 #define finalizer_table 	objspace->final.table
 #define deferred_final_list	objspace->final.deferred
 #define mark_stack		objspace->markstack.buffer
@@ -403,6 +447,12 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
 #define initial_heap_min_slots	initial_params.initial_heap_min_slots
 #define initial_free_min	initial_params.initial_free_min
 
+#define is_lazy_sweeping(objspace) ((objspace)->heap.sweep_slots != 0)
+
+#define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
+
+#define HEAP_HEADER(p) ((struct heaps_header *)(p))
+
 static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
 
 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
@@ -413,6 +463,10 @@ rb_objspace_alloc(void)
     memset(objspace, 0, sizeof(*objspace));
     malloc_limit = initial_malloc_limit;
     ruby_gc_stress = ruby_initial_gc_stress;
+#ifdef POOL_ALLOC_API
+    objspace->pool_headers = (pool_layout_t*) malloc(sizeof(pool_layout));
+    memcpy(objspace->pool_headers, &pool_layout, sizeof(pool_layout));
+#endif
 
     return objspace;
 }
@@ -465,6 +519,7 @@ rb_gc_set_params(void)
 static void gc_sweep(rb_objspace_t *);
 static void slot_sweep(rb_objspace_t *, struct heaps_slot *);
 static void gc_clear_mark_on_sweep_slots(rb_objspace_t *);
+static void aligned_free(void *);
 
 void
 rb_objspace_free(rb_objspace_t *objspace)
@@ -479,40 +534,64 @@ rb_objspace_free(rb_objspace_t *objspace)
 	struct gc_list *list, *next;
 	for (list = global_List; list; list = next) {
 	    next = list->next;
-	    free(list);
+	    xfree(list);
 	}
     }
+    if (objspace->heap.reserve_slots) {
+        struct heaps_slot *list, *next;
+        for (list = objspace->heap.reserve_slots; list; list = next) {
+            next = list->free_next;
+            free(list);
+        }
+    }
     if (objspace->heap.sorted) {
 	size_t i;
 	for (i = 0; i < heaps_used; ++i) {
-	    free(objspace->heap.sorted[i].slot->membase);
-	    free(objspace->heap.sorted[i].slot);
+            free(objspace->heap.sorted[i]->base);
+	    aligned_free(objspace->heap.sorted[i]);
 	}
 	free(objspace->heap.sorted);
 	heaps_used = 0;
 	heaps = 0;
     }
+#ifdef POOL_ALLOC_API
+    if (objspace->pool_headers) {
+        pool_finalize_header(&objspace->pool_headers->p6);
+        pool_finalize_header(&objspace->pool_headers->p11);
+        free(objspace->pool_headers);
+    }
+#endif
     free(objspace);
 }
 #endif
 
-/* tiny heap size */
-/* 32KB */
-/*#define HEAP_SIZE 0x8000 */
-/* 128KB */
-/*#define HEAP_SIZE 0x20000 */
-/* 64KB */
-/*#define HEAP_SIZE 0x10000 */
-/* 16KB */
-#define HEAP_SIZE 0x4000
-/* 8KB */
-/*#define HEAP_SIZE 0x2000 */
-/* 4KB */
-/*#define HEAP_SIZE 0x1000 */
-/* 2KB */
-/*#define HEAP_SIZE 0x800 */
-
-#define HEAP_OBJ_LIMIT (unsigned int)(HEAP_SIZE / sizeof(struct RVALUE))
+#ifndef HEAP_ALIGN_LOG
+#ifdef USE_PAGESIZE_LOG
+#define HEAP_ALIGN_LOG USE_PAGESIZE_LOG
+#else
+/* default tiny heap size: 16KB */
+#define HEAP_ALIGN_LOG 15
+#endif
+#endif
+#define HEAP_ALIGN (1UL << HEAP_ALIGN_LOG)
+#define HEAP_ALIGN_MASK (~(~0UL << HEAP_ALIGN_LOG))
+#define REQUIRED_SIZE_BY_MALLOC (sizeof(size_t) * 5)
+#define HEAP_SIZE (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC)
+#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
+
+#define HEAP_OBJ_LIMIT (unsigned int)((HEAP_SIZE - sizeof(struct heaps_header))/sizeof(struct RVALUE))
+#define HEAP_BITMAP_LIMIT CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), sizeof(uintptr_t)*8)
+#define HEAP_SLOT_SIZE (sizeof(struct heaps_slot) + (HEAP_BITMAP_LIMIT-1) * sizeof(uintptr_t))
+
+#define GET_HEAP_HEADER(x) (HEAP_HEADER(((uintptr_t)x) & ~(HEAP_ALIGN_MASK)))
+#define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base)
+#define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits)
+#define NUM_IN_SLOT(p) (((uintptr_t)p & HEAP_ALIGN_MASK)/sizeof(RVALUE))
+#define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * 8))
+#define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * 8)-1))
+#define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p)))
+#define MARK_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] = bits[BITMAP_INDEX(p)] | ((uintptr_t)1 << BITMAP_OFFSET(p)))
+#define CLEAR_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] &= ~((uintptr_t)1 << BITMAP_OFFSET(p)))
 
 extern st_table *rb_class_tbl;
 
@@ -824,8 +903,10 @@ vm_xfree(rb_objspace_t *objspace, void *ptr)
     size_t size;
     ptr = ((size_t *)ptr) - 1;
     size = ((size_t*)ptr)[0];
-    objspace->malloc_params.allocated_size -= size;
-    objspace->malloc_params.allocations--;
+    if (size) {
+	objspace->malloc_params.allocated_size -= size;
+	objspace->malloc_params.allocations--;
+    }
 #endif
 
     free(ptr);
@@ -895,6 +976,46 @@ ruby_xfree(void *x)
 	vm_xfree(&rb_objspace, x);
 }
 
+#ifdef POOL_ALLOC_API
+/* POOL ALLOC API */
+#define POOL_ALLOC_PART 2
+#include "pool_alloc.inc.h"
+#undef POOL_ALLOC_PART
+
+void
+ruby_xpool_free(void *ptr)
+{
+    pool_free_entry((void**)ptr);
+}
+
+#define CONCRET_POOL_MALLOC(pnts) \
+void * ruby_xpool_malloc_##pnts##p () { \
+    return pool_alloc_entry(&rb_objspace.pool_headers->p##pnts ); \
+}
+CONCRET_POOL_MALLOC(6)
+CONCRET_POOL_MALLOC(11)
+#undef CONCRET_POOL_MALLOC
+
+#endif
+
+/* Mimic ruby_xmalloc, but need not rb_objspace.
+ * should return pointer suitable for ruby_xfree
+ */
+void *
+ruby_mimmalloc(size_t size)
+{
+    void *mem;
+#if CALC_EXACT_MALLOC_SIZE
+    size += sizeof(size_t);
+#endif
+    mem = malloc(size);
+#if CALC_EXACT_MALLOC_SIZE
+    /* set 0 for consistency of allocated_size/allocations */
+    ((size_t *)mem)[0] = 0;
+    mem = (size_t *)mem + 1;
+#endif
+    return mem;
+}
 
 /*
  *  call-seq:
@@ -985,70 +1106,128 @@ rb_gc_unregister_address(VALUE *addr)
     }
 }
 
-
 static void
 allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
 {
-    struct sorted_heaps_slot *p;
-    size_t size;
+    struct heaps_header **p;
+    struct heaps_slot *slot;
+    size_t size, add, i;
 
-    size = next_heaps_length*sizeof(struct sorted_heaps_slot);
+    size = next_heaps_length*sizeof(struct heaps_header *);
+    add = next_heaps_length - heaps_used;
 
     if (heaps_used > 0) {
-	p = (struct sorted_heaps_slot *)realloc(objspace->heap.sorted, size);
+	p = (struct heaps_header **)realloc(objspace->heap.sorted, size);
 	if (p) objspace->heap.sorted = p;
     }
     else {
-	p = objspace->heap.sorted = (struct sorted_heaps_slot *)malloc(size);
+	p = objspace->heap.sorted = (struct heaps_header **)malloc(size);
     }
 
     if (p == 0) {
 	during_gc = 0;
 	rb_memerror();
     }
-    heaps_length = next_heaps_length;
+
+    for (i = 0; i < add; i++) {
+        slot = (struct heaps_slot *)malloc(HEAP_SLOT_SIZE);
+        if (slot == 0) {
+            during_gc = 0;
+            rb_memerror();
+            return;
+        }
+        slot->free_next = objspace->heap.reserve_slots;
+        objspace->heap.reserve_slots = slot;
+    }
+}
+
+static void *
+aligned_malloc(size_t alignment, size_t size)
+{
+    void *res;
+
+#if defined __MINGW32__
+    res = __mingw_aligned_malloc(size, alignment);
+#elif defined _WIN32 && !defined __CYGWIN__
+    res = _aligned_malloc(size, alignment);
+#elif defined(HAVE_POSIX_MEMALIGN)
+    if (posix_memalign(&res, alignment, size) == 0) {
+        return res;
+    } else {
+        return NULL;
+    }
+#elif defined(HAVE_MEMALIGN)
+    res = memalign(alignment, size);
+#elif defined(HAVE_VALLOC)
+    res = valloc(size);
+#else
+#error no memalign function
+#endif
+    return res;
+}
+
+static void
+aligned_free(void *ptr)
+{
+#if defined __MINGW32__
+    __mingw_aligned_free(ptr);
+#elif defined _WIN32 && !defined __CYGWIN__
+    _aligned_free(ptr);
+#else
+    free(ptr);
+#endif
+}
+
+static void
+link_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
+{
+    slot->free_next = objspace->heap.free_slots;
+    objspace->heap.free_slots = slot;
+}
+
+static void
+unlink_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
+{
+    objspace->heap.free_slots = slot->free_next;
+    slot->free_next = NULL;
 }
 
 static void
 assign_heap_slot(rb_objspace_t *objspace)
 {
-    RVALUE *p, *pend, *membase;
+    RVALUE *p, *pend;
+    struct heaps_header *membase;
     struct heaps_slot *slot;
     size_t hi, lo, mid;
     size_t objs;
 
     objs = HEAP_OBJ_LIMIT;
-    p = (RVALUE*)malloc(HEAP_SIZE);
-    if (p == 0) {
-	during_gc = 0;
-	rb_memerror();
-    }
-    slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot));
-    if (slot == 0) {
-	xfree(p);
+    membase = (struct heaps_header*)aligned_malloc(HEAP_ALIGN, HEAP_SIZE);
+    if (membase == 0) {
 	during_gc = 0;
 	rb_memerror();
     }
+    assert(objspace->heap.reserve_slots != NULL);
+    slot = objspace->heap.reserve_slots;
+    objspace->heap.reserve_slots = slot->free_next;
     MEMZERO((void*)slot, struct heaps_slot, 1);
 
     slot->next = heaps;
     if (heaps) heaps->prev = slot;
     heaps = slot;
 
-    membase = p;
+    p = (RVALUE*)((VALUE)membase + sizeof(struct heaps_header));
     if ((VALUE)p % sizeof(RVALUE) != 0) {
-	p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
-	if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < (size_t)((char*)p - (char*)membase)) {
-	    objs--;
-	}
+       p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
+       objs = (HEAP_SIZE - (size_t)((VALUE)p - (VALUE)membase))/sizeof(RVALUE);
     }
 
     lo = 0;
     hi = heaps_used;
     while (lo < hi) {
-	register RVALUE *mid_membase;
+	register struct heaps_header *mid_membase;
 	mid = (lo + hi) / 2;
-	mid_membase = objspace->heap.sorted[mid].slot->membase;
+        mid_membase = objspace->heap.sorted[mid];
 	if (mid_membase < membase) {
 	    lo = mid + 1;
 	}
@@ -1060,14 +1239,16 @@ assign_heap_slot(rb_objspace_t *objspace)
 	}
     }
     if (hi < heaps_used) {
-	MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct sorted_heaps_slot, heaps_used - hi);
-    }
-    objspace->heap.sorted[hi].slot = slot;
-    objspace->heap.sorted[hi].start = p;
-    objspace->heap.sorted[hi].end = (p + objs);
+	MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct heaps_header*, heaps_used - hi);
+    }
+    objspace->heap.sorted[hi] = membase;
+    membase->start = p;
+    membase->end = (p + objs);
+    membase->base = heaps;
+    membase->bits = heaps->bits;
+    membase->limit = objs;
     heaps->membase = membase;
-    heaps->slot = p;
-    heaps->limit = objs;
+    memset(heaps->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
     objspace->heap.free_num += objs;
     pend = p + objs;
     if (lomem == 0 || lomem > p) lomem = p;
@@ -1076,19 +1257,24 @@ assign_heap_slot(rb_objspace_t *objspace)
 
     while (p < pend) {
 	p->as.free.flags = 0;
-	p->as.free.next = freelist;
-	freelist = p;
+	p->as.free.next = heaps->freelist;
+	heaps->freelist = p;
 	p++;
     }
+    link_free_heap_slot(objspace, heaps);
 }
 
 static void
 add_heap_slots(rb_objspace_t *objspace, size_t add)
 {
     size_t i;
+    size_t next_heaps_length;
 
-    if ((heaps_used + add) > heaps_length) {
-        allocate_sorted_heaps(objspace, heaps_used + add);
+    next_heaps_length = heaps_used + add;
+
+    if (next_heaps_length > heaps_length) {
+        allocate_sorted_heaps(objspace, next_heaps_length);
+        heaps_length = next_heaps_length;
     }
 
     for (i = 0; i < add; i++) {
@@ -1138,6 +1324,7 @@ set_heaps_increment(rb_objspace_t *objspace)
 
     if (next_heaps_length > heaps_length) {
 	allocate_sorted_heaps(objspace, next_heaps_length);
+        heaps_length = next_heaps_length;
     }
 }
 
@@ -1160,6 +1347,7 @@ rb_during_gc(void)
 }
 
 #define RANY(o) ((RVALUE*)(o))
+#define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist)
 
 VALUE
 rb_newobj(void)
@@ -1180,15 +1368,18 @@ rb_newobj(void)
 	}
     }
 
-    if (UNLIKELY(!freelist)) {
+    if (UNLIKELY(!has_free_object)) {
 	if (!gc_lazy_sweep(objspace)) {
 	    during_gc = 0;
 	    rb_memerror();
 	}
     }
 
-    obj = (VALUE)freelist;
-    freelist = freelist->as.free.next;
+    obj = (VALUE)objspace->heap.free_slots->freelist;
+    objspace->heap.free_slots->freelist = RANY(obj)->as.free.next;
+    if (objspace->heap.free_slots->freelist == NULL) {
+        unlink_free_heap_slot(objspace, objspace->heap.free_slots);
+    }
 
     MEMZERO((void*)obj, RVALUE, 1);
 #ifdef GC_DEBUG
@@ -1357,10 +1548,10 @@ gc_mark_all(rb_objspace_t *objspace)
 
     init_mark_stack(objspace);
     for (i = 0; i < heaps_used; i++) {
-	p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
+	p = objspace->heap.sorted[i]->start; pend = objspace->heap.sorted[i]->end;
 	while (p < pend) {
-	    if ((p->as.basic.flags & FL_MARK) &&
-		(p->as.basic.flags != FL_MARK)) {
+	    if (MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p) &&
+		p->as.basic.flags) {
 		gc_mark_children(objspace, (VALUE)p, 0);
 	    }
 	    p++;
@@ -1388,26 +1579,27 @@ static inline int
 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
 {
     register RVALUE *p = RANY(ptr);
-    register struct sorted_heaps_slot *heap;
+    register struct heaps_header *heap;
     register size_t hi, lo, mid;
 
     if (p < lomem || p > himem) return FALSE;
     if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
+    heap = GET_HEAP_HEADER(p);
 
     /* check if p looks like a pointer using bsearch*/
     lo = 0;
     hi = heaps_used;
     while (lo < hi) {
 	mid = (lo + hi) / 2;
-	heap = &objspace->heap.sorted[mid];
-	if (heap->start <= p) {
-	    if (p < heap->end)
-		return TRUE;
-	    lo = mid + 1;
-	}
-	else {
-	    hi = mid;
-	}
+        if (heap > objspace->heap.sorted[mid]) {
+            lo = mid + 1;
+        }
+        else if (heap < objspace->heap.sorted[mid]) {
+            hi = mid;
+        }
+        else {
+            return (p >= heap->start && p < heap->end) ? TRUE : FALSE;
+        }
     }
     return FALSE;
 }
@@ -1450,10 +1642,10 @@ struct mark_tbl_arg {
 };
 
 static int
-mark_entry(ID key, VALUE value, st_data_t data)
+mark_entry(st_data_t key, st_data_t value, st_data_t data)
 {
     struct mark_tbl_arg *arg = (void*)data;
-    gc_mark(arg->objspace, value, arg->lev);
+    gc_mark(arg->objspace, (VALUE)value, arg->lev);
     return ST_CONTINUE;
 }
 
@@ -1464,14 +1656,14 @@ mark_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
     if (!tbl || tbl->num_entries == 0) return;
     arg.objspace = objspace;
     arg.lev = lev;
-    st_foreach(tbl, mark_entry, (st_data_t)&arg);
+    st_foreach_nocheck(tbl, mark_entry, (st_data_t)&arg);
 }
 
 static int
-mark_key(VALUE key, VALUE value, st_data_t data)
+mark_key(st_data_t key, st_data_t value, st_data_t data)
 {
     struct mark_tbl_arg *arg = (void*)data;
-    gc_mark(arg->objspace, key, arg->lev);
+    gc_mark(arg->objspace, (VALUE)key, arg->lev);
     return ST_CONTINUE;
 }
 
@@ -1482,7 +1674,7 @@ mark_set(rb_objspace_t *objspace, st_table *tbl, int lev)
     if (!tbl) return;
     arg.objspace = objspace;
     arg.lev = lev;
-    st_foreach(tbl, mark_key, (st_data_t)&arg);
+    st_foreach_nocheck(tbl, mark_key, (st_data_t)&arg);
 }
 
 void
@@ -1492,11 +1684,11 @@ rb_mark_set(st_table *tbl)
 }
 
 static int
-mark_keyvalue(VALUE key, VALUE value, st_data_t data)
+mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
 {
     struct mark_tbl_arg *arg = (void*)data;
-    gc_mark(arg->objspace, key, arg->lev);
-    gc_mark(arg->objspace, value, arg->lev);
+    gc_mark(arg->objspace, (VALUE)key, arg->lev);
+    gc_mark(arg->objspace, (VALUE)value, arg->lev);
     return ST_CONTINUE;
 }
 
@@ -1507,7 +1699,7 @@ mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev)
     if (!tbl) return;
     arg.objspace = objspace;
     arg.lev = lev;
-    st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
+    st_foreach_nocheck(tbl, mark_keyvalue, (st_data_t)&arg);
 }
 
 void
@@ -1560,7 +1752,7 @@ mark_m_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
     if (!tbl) return;
     arg.objspace = objspace;
     arg.lev = lev;
-    st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg);
+    st_foreach_nocheck(tbl, mark_method_entry_i, (st_data_t)&arg);
 }
 
 static int
@@ -1573,7 +1765,7 @@ free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
 void
 rb_free_m_table(st_table *tbl)
 {
-    st_foreach(tbl, free_method_entry_i, 0);
+    st_foreach_nocheck(tbl, free_method_entry_i, 0);
     st_free_table(tbl);
 }
 
@@ -1592,7 +1784,7 @@ mark_const_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
     if (!tbl) return;
     arg.objspace = objspace;
     arg.lev = lev;
-    st_foreach(tbl, mark_const_entry_i, (st_data_t)&arg);
+    st_foreach_nocheck(tbl, mark_const_entry_i, (st_data_t)&arg);
 }
 
 static int
@@ -1605,7 +1797,7 @@ free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data)
 void
 rb_free_const_table(st_table *tbl)
 {
-    st_foreach(tbl, free_const_entry_i, 0);
+    st_foreach_nocheck(tbl, free_const_entry_i, 0);
     st_free_table(tbl);
 }
 
@@ -1627,12 +1819,14 @@ static void
 gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev)
 {
     register RVALUE *obj;
+    register uintptr_t *bits;
 
     obj = RANY(ptr);
     if (rb_special_const_p(ptr)) return; /* special const not marked */
     if (obj->as.basic.flags == 0) return;       /* free cell */
-    if (obj->as.basic.flags & FL_MARK) return;  /* already marked */
-    obj->as.basic.flags |= FL_MARK;
+    bits = GET_HEAP_BITMAP(ptr);
+    if (MARKED_IN_BITMAP(bits, ptr)) return;  /* already marked */
+    MARK_IN_BITMAP(bits, ptr);
     objspace->heap.live_num++;
 
     if (lev > GC_LEVEL_MAX || (lev == 0 && stack_check(STACKFRAME_FOR_GC_MARK))) {
@@ -1660,6 +1854,7 @@ static void
 gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
 {
     register RVALUE *obj = RANY(ptr);
+    register uintptr_t *bits;
 
     goto marking;		/* skip */
 
@@ -1667,8 +1862,9 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
     obj = RANY(ptr);
     if (rb_special_const_p(ptr)) return; /* special const not marked */
     if (obj->as.basic.flags == 0) return;       /* free cell */
-    if (obj->as.basic.flags & FL_MARK) return;  /* already marked */
-    obj->as.basic.flags |= FL_MARK;
+    bits = GET_HEAP_BITMAP(ptr);
+    if (MARKED_IN_BITMAP(bits, ptr)) return;  /* already marked */
+    MARK_IN_BITMAP(bits, ptr);
     objspace->heap.live_num++;
 
   marking:
@@ -1930,13 +2126,18 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
 
 static int obj_free(rb_objspace_t *, VALUE);
 
-static inline void
-add_freelist(rb_objspace_t *objspace, RVALUE *p)
+static inline struct heaps_slot *
+add_slot_local_freelist(rb_objspace_t *objspace, RVALUE *p)
 {
+    struct heaps_slot *slot;
+
     VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
     p->as.free.flags = 0;
-    p->as.free.next = freelist;
-    freelist = p;
+    slot = GET_HEAP_SLOT(p);
+    p->as.free.next = slot->freelist;
+    slot->freelist = p;
+
+    return slot;
 }
 
 static void
@@ -1946,17 +2147,13 @@ finalize_list(rb_objspace_t *objspace, RVALUE *p)
 	RVALUE *tmp = p->as.free.next;
 	run_final(objspace, (VALUE)p);
 	if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
-            if (objspace->heap.sweep_slots) {
-                p->as.free.flags = 0;
-            }
-            else {
+            add_slot_local_freelist(objspace, p);
+            if (!is_lazy_sweeping(objspace)) {
                 GC_PROF_DEC_LIVE_NUM;
-                add_freelist(objspace, p);
             }
 	}
 	else {
-	    struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark;
-	    slot->limit--;
+            GET_HEAP_HEADER(p)->limit--;
 	}
 	p = tmp;
     }
@@ -1977,22 +2174,23 @@ unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
     slot->next = NULL;
 }
 
-
 static void
 free_unused_heaps(rb_objspace_t *objspace)
 {
     size_t i, j;
-    RVALUE *last = 0;
+    struct heaps_header *last = 0;
 
     for (i = j = 1; j < heaps_used; i++) {
-	if (objspace->heap.sorted[i].slot->limit == 0) {
+	if (objspace->heap.sorted[i]->limit == 0) {
+            struct heaps_slot* h = objspace->heap.sorted[i]->base;
+            h->free_next = objspace->heap.reserve_slots;
+            objspace->heap.reserve_slots = h;
 	    if (!last) {
-		last = objspace->heap.sorted[i].slot->membase;
+                last = objspace->heap.sorted[i];
 	    }
 	    else {
-		free(objspace->heap.sorted[i].slot->membase);
+		aligned_free(objspace->heap.sorted[i]);
 	    }
-            free(objspace->heap.sorted[i].slot);
 	    heaps_used--;
 	}
 	else {
@@ -2004,70 +2202,84 @@ free_unused_heaps(rb_objspace_t *objspace)
     }
     if (last) {
 	if (last < heaps_freed) {
-	    free(heaps_freed);
+	    aligned_free(heaps_freed);
 	    heaps_freed = last;
 	}
 	else {
-	    free(last);
+	    aligned_free(last);
 	}
     }
 }
 
 static void
+gc_clear_slot_bits(struct heaps_slot *slot)
+{
+    memset(slot->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
+}
+
+static void
 slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
 {
     size_t free_num = 0, final_num = 0;
     RVALUE *p, *pend;
-    RVALUE *free = freelist, *final = deferred_final_list;
+    RVALUE *final = deferred_final_list;
     int deferred;
+    uintptr_t *bits;
 
-    p = sweep_slot->slot; pend = p + sweep_slot->limit;
+    p = sweep_slot->membase->start; pend = sweep_slot->membase->end;
+    bits = sweep_slot->bits;
     while (p < pend) {
-        if (!(p->as.basic.flags & FL_MARK)) {
-            if (p->as.basic.flags &&
-                ((deferred = obj_free(objspace, (VALUE)p)) ||
-		 (FL_TEST(p, FL_FINALIZE)))) {
-                if (!deferred) {
-                    p->as.free.flags = T_ZOMBIE;
-                    RDATA(p)->dfree = 0;
+        if ((!(MARKED_IN_BITMAP(bits, p))) && BUILTIN_TYPE(p) != T_ZOMBIE) {
+            if (p->as.basic.flags) {
+                if ((deferred = obj_free(objspace, (VALUE)p)) ||
+                    (FL_TEST(p, FL_FINALIZE))) {
+                    if (!deferred) {
+                        p->as.free.flags = T_ZOMBIE;
+                        RDATA(p)->dfree = 0;
+                    }
+                    p->as.free.next = deferred_final_list;
+                    deferred_final_list = p;
+                    assert(BUILTIN_TYPE(p) == T_ZOMBIE);
+                    final_num++;
+                }
+                else {
+                    VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
+                    p->as.free.flags = 0;
+                    p->as.free.next = sweep_slot->freelist;
+                    sweep_slot->freelist = p;
+                    free_num++;
                 }
-                p->as.free.flags |= FL_MARK;
-                p->as.free.next = deferred_final_list;
-                deferred_final_list = p;
-                final_num++;
             }
             else {
-                add_freelist(objspace, p);
                 free_num++;
             }
         }
-        else if (BUILTIN_TYPE(p) == T_ZOMBIE) {
-            /* objects to be finalized */
-            /* do nothing remain marked */
-        }
-        else {
-            RBASIC(p)->flags &= ~FL_MARK;
-        }
         p++;
     }
-    if (final_num + free_num == sweep_slot->limit &&
+    gc_clear_slot_bits(sweep_slot);
+    if (final_num + free_num == sweep_slot->membase->limit &&
         objspace->heap.free_num > objspace->heap.do_heap_free) {
         RVALUE *pp;
 
         for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
-	    RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot;
+	    RDATA(pp)->dmark = 0;
             pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
         }
-        sweep_slot->limit = final_num;
-        freelist = free;	/* cancel this page from freelist */
+        sweep_slot->membase->limit = final_num;
         unlink_heap_slot(objspace, sweep_slot);
     }
     else {
+        if (free_num > 0) {
+            link_free_heap_slot(objspace, sweep_slot);
+        }
+        else {
+            sweep_slot->free_next = NULL;
+        }
         objspace->heap.free_num += free_num;
     }
     objspace->heap.final_num += final_num;
 
-    if (deferred_final_list) {
+    if (deferred_final_list && !finalizing) {
         rb_thread_t *th = GET_THREAD();
         if (th) {
             RUBY_VM_SET_FINALIZER_INTERRUPT(th);
@@ -2079,7 +2291,7 @@ static int
 ready_to_gc(rb_objspace_t *objspace)
 {
     if (dont_gc || during_gc) {
-	if (!freelist) {
+	if (!has_free_object) {
             if (!heaps_increment(objspace)) {
                 set_heaps_increment(objspace);
                 heaps_increment(objspace);
@@ -2093,7 +2305,6 @@ ready_to_gc(rb_objspace_t *objspace)
 static void
 before_gc_sweep(rb_objspace_t *objspace)
 {
-    freelist = 0;
     objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
     objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT)  * 0.2);
     if (objspace->heap.free_min < initial_free_min) {
@@ -2102,6 +2313,7 @@ before_gc_sweep(rb_objspace_t *objspace)
     }
     objspace->heap.sweep_slots = heaps;
     objspace->heap.free_num = 0;
+    objspace->heap.free_slots = NULL;
 
     /* sweep unlinked method entries */
     if (GET_VM()->unlinked_method_entry_list) {
@@ -2138,7 +2350,7 @@ lazy_sweep(rb_objspace_t *objspace)
         next = objspace->heap.sweep_slots->next;
 	slot_sweep(objspace, objspace->heap.sweep_slots);
         objspace->heap.sweep_slots = next;
-        if (freelist) {
+        if (has_free_object) {
             during_gc = 0;
             return TRUE;
         }
@@ -2150,10 +2362,10 @@ static void
 rest_sweep(rb_objspace_t *objspace)
 {
     if (objspace->heap.sweep_slots) {
-       while (objspace->heap.sweep_slots) {
-           lazy_sweep(objspace);
-       }
-       after_gc_sweep(objspace);
+	while (objspace->heap.sweep_slots) {
+	    lazy_sweep(objspace);
+	}
+	after_gc_sweep(objspace);
     }
 }
 
@@ -2200,9 +2412,9 @@ gc_lazy_sweep(rb_objspace_t *objspace)
     }
 
     GC_PROF_SWEEP_TIMER_START;
-    if(!(res = lazy_sweep(objspace))) {
+    if (!(res = lazy_sweep(objspace))) {
         after_gc_sweep(objspace);
-        if(freelist) {
+        if (has_free_object) {
             res = TRUE;
             during_gc = 0;
         }
@@ -2235,12 +2447,17 @@ void
 rb_gc_force_recycle(VALUE p)
 {
     rb_objspace_t *objspace = &rb_objspace;
-    GC_PROF_DEC_LIVE_NUM;
-    if (RBASIC(p)->flags & FL_MARK) {
-        RANY(p)->as.free.flags = 0;
+    struct heaps_slot *slot;
+
+    if (MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p)) {
+        add_slot_local_freelist(objspace, (RVALUE *)p);
     }
     else {
-        add_freelist(objspace, (RVALUE *)p);
+        GC_PROF_DEC_LIVE_NUM;
+        slot = add_slot_local_freelist(objspace, (RVALUE *)p);
+        if (slot->free_next == NULL) {
+            link_free_heap_slot(objspace, slot);
+        }
     }
 }
 
@@ -2433,19 +2650,12 @@ static void
 gc_clear_mark_on_sweep_slots(rb_objspace_t *objspace)
 {
     struct heaps_slot *scan;
-    RVALUE *p, *pend;
 
     if (objspace->heap.sweep_slots) {
         while (heaps_increment(objspace));
         while (objspace->heap.sweep_slots) {
             scan = objspace->heap.sweep_slots;
-            p = scan->slot; pend = p + scan->limit;
-            while (p < pend) {
-                if (p->as.free.flags & FL_MARK && BUILTIN_TYPE(p) != T_ZOMBIE) {
-                    p->as.basic.flags &= ~FL_MARK;
-                }
-                p++;
-            }
+            gc_clear_slot_bits(scan);
             objspace->heap.sweep_slots = objspace->heap.sweep_slots->next;
         }
     }
@@ -2634,7 +2844,7 @@ static VALUE
 objspace_each_objects(VALUE arg)
 {
     size_t i;
-    RVALUE *membase = 0;
+    struct heaps_header *membase = 0;
     RVALUE *pstart, *pend;
     rb_objspace_t *objspace = &rb_objspace;
     struct each_obj_args *args = (struct each_obj_args *)arg;
@@ -2642,16 +2852,16 @@ objspace_each_objects(VALUE arg)
 
     i = 0;
     while (i < heaps_used) {
-	while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1].slot->membase)
+	while (0 < i && membase < objspace->heap.sorted[i-1])
 	    i--;
-	while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i].slot->membase <= (uintptr_t)membase)
+	while (i < heaps_used && objspace->heap.sorted[i] <= membase)
 	    i++;
 	if (heaps_used <= i)
 	  break;
-	membase = objspace->heap.sorted[i].slot->membase;
+	membase = objspace->heap.sorted[i];
 
-	pstart = objspace->heap.sorted[i].slot->slot;
-	pend = pstart + objspace->heap.sorted[i].slot->limit;
+	pstart = membase->start;
+	pend = membase->end;
 
 	for (; pstart != pend; pstart++) {
 	    if (pstart->as.basic.flags) {
@@ -2665,6 +2875,7 @@ objspace_each_objects(VALUE arg)
 	    }
 	}
     }
+    RB_GC_GUARD(v);
 
     return Qnil;
 }
@@ -2908,11 +3119,12 @@ run_single_final(VALUE arg)
 }
 
 static void
-run_finalizer(rb_objspace_t *objspace, VALUE objid, VALUE table)
+run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
 {
     long i;
     int status;
     VALUE args[3];
+    VALUE objid = nonspecial_obj_id(obj);
 
     if (RARRAY_LEN(table) > 0) {
 	args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
@@ -2936,13 +3148,11 @@ run_finalizer(rb_objspace_t *objspace, VALUE objid, VALUE table)
 static void
 run_final(rb_objspace_t *objspace, VALUE obj)
 {
-    VALUE objid;
     RUBY_DATA_FUNC free_func = 0;
     st_data_t key, table;
 
     objspace->heap.final_num--;
 
-    objid = rb_obj_id(obj);	/* make obj into id */
     RBASIC(obj)->klass = 0;
 
     if (RTYPEDDATA_P(obj)) {
@@ -2957,7 +3167,7 @@ run_final(rb_objspace_t *objspace, VALUE obj)
 
     key = (st_data_t)obj;
     if (st_delete(finalizer_table, &key, &table)) {
-	run_finalizer(objspace, objid, (VALUE)table);
+	run_finalizer(objspace, obj, (VALUE)table);
     }
 }
 
@@ -2975,16 +3185,20 @@ finalize_deferred(rb_objspace_t *objspace)
 void
 rb_gc_finalize_deferred(void)
 {
-    finalize_deferred(&rb_objspace);
+    rb_objspace_t *objspace = &rb_objspace;
+    if (ATOMIC_EXCHANGE(finalizing, 1)) return;
+    finalize_deferred(objspace);
+    ATOMIC_SET(finalizing, 0);
 }
 
 static int
 chain_finalized_object(st_data_t key, st_data_t val, st_data_t arg)
 {
     RVALUE *p = (RVALUE *)key, **final_list = (RVALUE **)arg;
-    if ((p->as.basic.flags & (FL_FINALIZE|FL_MARK)) == FL_FINALIZE) {
+    if ((p->as.basic.flags & FL_FINALIZE) == FL_FINALIZE &&
+        !MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p)) {
 	if (BUILTIN_TYPE(p) != T_ZOMBIE) {
-	    p->as.free.flags = FL_MARK | T_ZOMBIE; /* remain marked */
+	    p->as.free.flags = T_ZOMBIE;
 	    RDATA(p)->dfree = 0;
 	}
 	p->as.free.next = *final_list;
@@ -3027,22 +3241,25 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
     /* run finalizers */
     gc_clear_mark_on_sweep_slots(objspace);
 
+    if (ATOMIC_EXCHANGE(finalizing, 1)) return;
+
     do {
 	/* XXX: this loop will make no sense */
 	/* because mark will not be removed */
 	finalize_deferred(objspace);
 	mark_tbl(objspace, finalizer_table, 0);
-	st_foreach(finalizer_table, chain_finalized_object,
+	st_foreach_nocheck(finalizer_table, chain_finalized_object,
 		   (st_data_t)&deferred_final_list);
     } while (deferred_final_list);
     /* force to run finalizer */
     while (finalizer_table->num_entries) {
 	struct force_finalize_list *list = 0;
-	st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
+	st_foreach_nocheck(finalizer_table, force_chain_object, (st_data_t)&list);
 	while (list) {
 	    struct force_finalize_list *curr = list;
-	    run_finalizer(objspace, rb_obj_id(curr->obj), curr->table);
-	    st_delete(finalizer_table, (st_data_t*)&curr->obj, 0);
+	    st_data_t obj = (st_data_t)curr->obj;
+	    run_finalizer(objspace, curr->obj, curr->table);
+	    st_delete(finalizer_table, &obj, 0);
 	    list = curr->next;
 	    xfree(curr);
 	}
@@ -3053,7 +3270,7 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
 
     /* run data object's finalizers */
     for (i = 0; i < heaps_used; i++) {
-	p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
+	p = objspace->heap.sorted[i]->start; pend = objspace->heap.sorted[i]->end;
 	while (p < pend) {
 	    if (BUILTIN_TYPE(p) == T_DATA &&
 		DATA_PTR(p) && RANY(p)->as.data.dfree &&
@@ -3089,6 +3306,7 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
 
     st_free_table(finalizer_table);
     finalizer_table = 0;
+    ATOMIC_SET(finalizing, 0);
 }
 
 void
@@ -3096,10 +3314,42 @@ rb_gc(void)
 {
     rb_objspace_t *objspace = &rb_objspace;
     garbage_collect(objspace);
-    finalize_deferred(objspace);
+    if (!finalizing) finalize_deferred(objspace);
     free_unused_heaps(objspace);
 }
 
+static inline int
+is_id_value(rb_objspace_t *objspace, VALUE ptr)
+{
+    if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
+    if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
+    if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
+    return TRUE;
+}
+
+static inline int
+is_dead_object(rb_objspace_t *objspace, VALUE ptr)
+{
+    struct heaps_slot *slot = objspace->heap.sweep_slots;
+    if (!is_lazy_sweeping(objspace) || MARKED_IN_BITMAP(GET_HEAP_BITMAP(ptr), ptr))
+	return FALSE;
+    while (slot) {
+	if ((VALUE)slot->membase->start <= ptr && ptr < (VALUE)(slot->membase->end))
+	    return TRUE;
+	slot = slot->next;
+    }
+    return FALSE;
+}
+
+static inline int
+is_live_object(rb_objspace_t *objspace, VALUE ptr)
+{
+    if (BUILTIN_TYPE(ptr) == 0) return FALSE;
+    if (RBASIC(ptr)->klass == 0) return FALSE;
+    if (is_dead_object(objspace, ptr)) return FALSE;
+    return TRUE;
+}
+
 /*
  *  call-seq:
  *     ObjectSpace._id2ref(object_id) -> an_object
@@ -3142,11 +3392,10 @@ id2ref(VALUE obj, VALUE objid)
 	return ID2SYM(symid);
     }
 
-    if (!is_pointer_to_heap(objspace, (void *)ptr) ||
-	BUILTIN_TYPE(ptr) > T_FIXNUM || BUILTIN_TYPE(ptr) == T_ICLASS) {
+    if (!is_id_value(objspace, ptr)) {
 	rb_raise(rb_eRangeError, "%p is not id value", p0);
     }
-    if (BUILTIN_TYPE(ptr) == 0 || RBASIC(ptr)->klass == 0) {
+    if (!is_live_object(objspace, ptr)) {
 	rb_raise(rb_eRangeError, "%p is recycled object", p0);
     }
     return (VALUE)ptr;
@@ -3216,7 +3465,7 @@ rb_obj_id(VALUE obj)
     if (SPECIAL_CONST_P(obj)) {
         return LONG2NUM((SIGNED_VALUE)obj);
     }
-    return (VALUE)((SIGNED_VALUE)obj|FIXNUM_FLAG);
+    return nonspecial_obj_id(obj);
 }
 
 static int
@@ -3259,7 +3508,7 @@ count_objects(int argc, VALUE *argv, VALUE os)
     VALUE hash;
 
     if (rb_scan_args(argc, argv, "01", &hash) == 1) {
-        if (TYPE(hash) != T_HASH)
+        if (!RB_TYPE_P(hash, T_HASH))
             rb_raise(rb_eTypeError, "non-hash given");
     }
 
@@ -3270,7 +3519,7 @@ count_objects(int argc, VALUE *argv, VALUE os)
     for (i = 0; i < heaps_used; i++) {
         RVALUE *p, *pend;
 
-        p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
+        p = objspace->heap.sorted[i]->start; pend = objspace->heap.sorted[i]->end;
         for (;p < pend; p++) {
             if (p->as.basic.flags) {
                 counts[BUILTIN_TYPE(p)]++;
@@ -3279,14 +3528,14 @@ count_objects(int argc, VALUE *argv, VALUE os)
                 freed++;
             }
         }
-        total += objspace->heap.sorted[i].slot->limit;
+        total += objspace->heap.sorted[i]->limit;
     }
 
     if (hash == Qnil) {
         hash = rb_hash_new();
     }
     else if (!RHASH_EMPTY_P(hash)) {
-        st_foreach(RHASH_TBL(hash), set_zero, hash);
+        st_foreach_nocheck(RHASH_TBL(hash), set_zero, hash);
     }
     rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
     rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
@@ -3378,7 +3627,7 @@ gc_stat(int argc, VALUE *argv, VALUE self)
     VALUE hash;
 
     if (rb_scan_args(argc, argv, "01", &hash) == 1) {
-        if (TYPE(hash) != T_HASH)
+        if (!RB_TYPE_P(hash, T_HASH))
             rb_raise(rb_eTypeError, "non-hash given");
     }
 
@@ -3433,6 +3682,33 @@ gc_malloc_allocations(VALUE self)
 }
 #endif
 
+/*
+ *  call-seq:
+ *     GC::Profiler.raw_data -> [Hash, ...]
+ *
+ *  Returns an Array of individual raw profile data Hashes ordered
+ *  from earliest to latest by <tt>:GC_INVOKE_TIME</tt>.  For example:
+ *
+ *    [{:GC_TIME=>1.3000000000000858e-05,
+ *      :GC_INVOKE_TIME=>0.010634999999999999,
+ *      :HEAP_USE_SIZE=>289640,
+ *      :HEAP_TOTAL_SIZE=>588960,
+ *      :HEAP_TOTAL_OBJECTS=>14724,
+ *      :GC_IS_MARKED=>false},
+ *      ...
+ *    ]
+ *
+ *  The keys mean:
+ *
+ *  +:GC_TIME+:: Time taken for this run in milliseconds
+ *  +:GC_INVOKE_TIME+:: Time the GC was invoked since startup in seconds
+ *  +:HEAP_USE_SIZE+:: Bytes of heap used
+ *  +:HEAP_TOTAL_SIZE+:: Size of heap in bytes
+ *  +:HEAP_TOTAL_OBJECTS+:: Number of objects
+ *  +:GC_IS_MARKED+:: Is the GC in the mark phase
+ *
+ */
+
 static VALUE
 gc_profile_record_get(void)
 {
@@ -3625,6 +3901,7 @@ Init_GC(void)
     rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
     rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
     rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
+    rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
     rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
     rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
     rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
diff --git a/include/ruby/ruby.h b/include/ruby/ruby.h
index fc17f76..f63358a 100644
--- a/include/ruby/ruby.h
+++ b/include/ruby/ruby.h
@@ -919,8 +919,8 @@ struct RBignum {
 #define RCOMPLEX(obj) (R_CAST(RComplex)(obj))
 
 #define FL_SINGLETON FL_USER0
-#define FL_MARK      (((VALUE)1)<<5)
-#define FL_RESERVED  (((VALUE)1)<<6) /* will be used in the future GC */
+#define FL_RESERVED1 (((VALUE)1)<<5)
+#define FL_RESERVED2 (((VALUE)1)<<6) /* will be used in the future GC */
 #define FL_FINALIZE  (((VALUE)1)<<7)
 #define FL_TAINT     (((VALUE)1)<<8)
 #define FL_UNTRUSTED (((VALUE)1)<<9)
diff --git a/include/ruby/st.h b/include/ruby/st.h
index 50f2a75..d536c1d 100644
--- a/include/ruby/st.h
+++ b/include/ruby/st.h
@@ -74,6 +74,11 @@ struct st_hash_type {
 
 #define ST_INDEX_BITS (sizeof(st_index_t) * CHAR_BIT)
 
+typedef struct st_packed_entry {
+    st_index_t hash;
+    st_data_t key, val;
+} st_packed_entry;
+
 struct st_table {
     const struct st_hash_type *type;
     st_index_t num_bins;
@@ -91,8 +96,14 @@ struct st_table {
     __extension__
 #endif
     st_index_t num_entries : ST_INDEX_BITS - 1;
-    struct st_table_entry **bins;
-    struct st_table_entry *head, *tail;
+    union {
+	struct {
+	    struct st_table_entry **bins;
+	    struct st_table_entry *head, *tail;
+	} big;
+	struct st_packed_bins *packed;
+	struct st_packed_entry upacked;
+    } as;
 };
 
 #define st_is_member(table,key) st_lookup((table),(key),(st_data_t *)0)
@@ -114,6 +125,7 @@ int st_insert2(st_table *, st_data_t, st_data_t, st_data_t (*)(st_data_t));
 int st_lookup(st_table *, st_data_t, st_data_t *);
 int st_get_key(st_table *, st_data_t, st_data_t *);
 int st_foreach(st_table *, int (*)(ANYARGS), st_data_t);
+int st_foreach_nocheck(st_table *, int (*)(ANYARGS), st_data_t);
 int st_reverse_foreach(st_table *, int (*)(ANYARGS), st_data_t);
 void st_add_direct(st_table *, st_data_t, st_data_t);
 void st_free_table(st_table *);
diff --git a/internal.h b/internal.h
index 681e010..d879fb1 100644
--- a/internal.h
+++ b/internal.h
@@ -96,6 +96,7 @@ void Init_File(void);
 
 /* gc.c */
 void Init_heap(void);
+void *ruby_mimmalloc(size_t size);
 
 /* inits.c */
 void rb_call_inits(void);
@@ -112,6 +113,8 @@ VALUE rb_iseq_clone(VALUE iseqval, VALUE newcbase);
 
 /* load.c */
 VALUE rb_get_load_path(void);
+void rb_reset_expanded_cache();
+void rb_load_path_ary_push(VALUE path);
 
 /* math.c */
 VALUE rb_math_atan2(VALUE, VALUE);
diff --git a/load.c b/load.c
index 0ff4b60..b6a46fe 100644
--- a/load.c
+++ b/load.c
@@ -4,6 +4,7 @@
 
 #include "ruby/ruby.h"
 #include "ruby/util.h"
+#include "ruby/encoding.h"
 #include "internal.h"
 #include "dln.h"
 #include "eval_intern.h"
@@ -18,6 +19,7 @@ VALUE ruby_dln_librefs;
 #define IS_DLEXT(e) (strcmp((e), DLEXT) == 0)
 #endif
 
+static int sorted_loaded_features = 1;
 
 static const char *const loadable_ext[] = {
     ".rb", DLEXT,
@@ -27,28 +29,44 @@ static const char *const loadable_ext[] = {
     0
 };
 
-VALUE
-rb_get_load_path(void)
-{
-    VALUE load_path = GET_VM()->load_path;
-    return load_path;
-}
+static VALUE rb_checked_expanded_cache(int*);
+static void rb_set_expanded_cache(VALUE, int);
+static VALUE rb_expand_load_paths(long, VALUE*, int*);
+static int cached_expanded_load_path = 1;
 
 VALUE
 rb_get_expanded_load_path(void)
 {
-    VALUE load_path = rb_get_load_path();
-    VALUE ary;
-    long i;
+    VALUE expanded = rb_checked_expanded_cache(NULL);
 
-    ary = rb_ary_new2(RARRAY_LEN(load_path));
-    for (i = 0; i < RARRAY_LEN(load_path); ++i) {
-	VALUE path = rb_file_expand_path(RARRAY_PTR(load_path)[i], Qnil);
-	rb_str_freeze(path);
-	rb_ary_push(ary, path);
+    if ( !RTEST(expanded) ) {
+	VALUE load_path = GET_VM()->load_path;
+	int has_relative = 0;
+
+	if (!load_path) return 0;
+
+	expanded = rb_expand_load_paths(
+			RARRAY_LEN(load_path), RARRAY_PTR(load_path),
+			&has_relative);
+	RB_GC_GUARD(load_path);
+
+	if (cached_expanded_load_path) {
+	    rb_set_expanded_cache(expanded, has_relative);
+	}
+    } else {
+	expanded = rb_ary_dup(expanded);
     }
-    rb_obj_freeze(ary);
-    return ary;
+    return expanded;
+}
+
+VALUE
+rb_get_load_path(void)
+{
+    VALUE load_path =
+	cached_expanded_load_path ?
+	    rb_get_expanded_load_path():
+	    GET_VM()->load_path;
+    return load_path;
 }
 
 static VALUE
@@ -129,6 +147,9 @@ loaded_feature_path_i(st_data_t v, st_data_t b, st_data_t f)
     return ST_STOP;
 }
 
+static long rb_feature_first_equal_or_greater(VALUE, const char *, long);
+static int  rb_stop_search_feature(VALUE, const char *, long);
+
 static int
 rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const char **fn)
 {
@@ -151,8 +172,10 @@ rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const c
 	type = 0;
     }
     features = get_loaded_features();
-    for (i = 0; i < RARRAY_LEN(features); ++i) {
+    i = rb_feature_first_equal_or_greater(features, feature, len);
+    for (; i < RARRAY_LEN(features); ++i) {
 	v = RARRAY_PTR(features)[i];
+	if (rb_stop_search_feature(v, feature, len)) break;
 	f = StringValuePtr(v);
 	if ((n = RSTRING_LEN(v)) < len) continue;
 	if (strncmp(f, feature, len) != 0) {
@@ -176,16 +199,16 @@ rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const c
 	}
     }
     loading_tbl = get_loading_table();
-    if (loading_tbl) {
+    if (loading_tbl && loading_tbl->num_entries > 0) {
 	f = 0;
 	if (!expanded) {
 	    struct loaded_feature_searching fs;
 	    fs.name = feature;
 	    fs.len = len;
 	    fs.type = type;
-	    fs.load_path = load_path ? load_path : rb_get_load_path();
+	    fs.load_path = load_path ? load_path : rb_get_expanded_load_path();
 	    fs.result = 0;
-	    st_foreach(loading_tbl, loaded_feature_path_i, (st_data_t)&fs);
+	    st_foreach_nocheck(loading_tbl, loaded_feature_path_i, (st_data_t)&fs);
 	    if ((f = fs.result) != 0) {
 		if (fn) *fn = f;
 		goto loading;
@@ -251,6 +274,170 @@ rb_feature_provided(const char *feature, const char **loading)
     return FALSE;
 }
 
+static long
+feature_basename_length(const char *feature, long flen)
+{
+    if (sorted_loaded_features) {
+	const char *ext = strrchr(feature, '.');
+	return ext && !strchr(ext, '/') ? ext - feature : flen;
+    } else {
+	return 0;
+    }
+}
+
+static int
+compare_feature_name(const char *left, long llen, const char *right, long rlen)
+{
+    int diff = 0;
+    while (llen-- && rlen--) {
+	diff = left[llen] - right[rlen];
+	if (diff) break;
+	if (left[llen] == '/') break;
+    }
+    return diff;
+}
+
+static int
+rb_compare_feature_name(VALUE loaded, const char *feature, long flen)
+{
+    const char *loaded_name = StringValuePtr(loaded);
+    long loaded_len = feature_basename_length(loaded_name, RSTRING_LEN(loaded));
+    return compare_feature_name(loaded_name, loaded_len, feature, flen);
+}
+
+/* used to find when equal features run out */
+static int
+rb_stop_search_feature(VALUE loaded, const char *feature, long flen)
+{
+    if (sorted_loaded_features)
+	return rb_compare_feature_name(loaded, feature, flen) > 0;
+    else
+	return FALSE;
+}
+
+/* returns first position to search feature from */
+static long
+rb_feature_first_equal_or_greater(VALUE features, const char *feature, long flen)
+{
+    if (sorted_loaded_features) {
+	long before = 0, first = RARRAY_LEN(features);
+	VALUE *values = RARRAY_PTR(features);
+	if (first == 0)
+	    return 0;
+	if (rb_compare_feature_name(values[0], feature, flen) >= 0)
+	    return 0;
+
+	while (first - before > 1) {
+	    long mid = (first + before) / 2;
+	    long cmp = rb_compare_feature_name(values[mid], feature, flen);
+	    if (cmp >= 0)
+		first = mid;
+	    else
+		before = mid;
+	}
+	return first;
+    } else {
+	return 0;
+    }
+}
+
+/* returns position to insert new feature in */
+static long
+rb_feature_first_greater(VALUE features, const char *feature, long flen)
+{
+    if (sorted_loaded_features) {
+	long before = 0, first = RARRAY_LEN(features);
+	VALUE *values = RARRAY_PTR(features);
+	if (first == 0)
+	    return 0;
+	if (rb_compare_feature_name(values[0], feature, flen) > 0)
+	    return 0;
+	if (rb_compare_feature_name(values[first-1], feature, flen) <= 0)
+	    return first;
+
+	while (first - before > 1) {
+	    long mid = (first + before) / 2;
+	    long cmp = rb_compare_feature_name(values[mid], feature, flen);
+	    if (cmp > 0)
+		first = mid;
+	    else
+		before = mid;
+	}
+	return first;
+    } else {
+	return RARRAY_LEN(features);
+    }
+}
+
+
+static VALUE
+rb_push_feature_1(VALUE features, VALUE feature)
+{
+    const char *fname = StringValuePtr(feature);
+    long flen = feature_basename_length(fname, RSTRING_LEN(feature));
+    long i = rb_feature_first_greater(features, fname, flen);
+    rb_ary_push(features, feature);
+    if ( i < RARRAY_LEN(features) - 1 ) {
+	MEMMOVE(RARRAY_PTR(features) + i + 1, RARRAY_PTR(features) + i,
+		VALUE, RARRAY_LEN(features) - i - 1);
+	RARRAY_PTR(features)[i] = feature;
+    }
+    return features;
+}
+
+static VALUE
+rb_push_feature_m(long argc, VALUE *argv, VALUE features)
+{
+    while (argc--) {
+	rb_push_feature_1(features, *argv++);
+    }
+    return features;
+}
+
+static VALUE
+rb_concat_features(VALUE features, VALUE add)
+{
+    add = rb_convert_type(add, T_ARRAY, "Array", "to_ary");
+    if (RARRAY_LEN(add)) {
+	rb_push_feature_m(RARRAY_LEN(add), RARRAY_PTR(add), features);
+    }
+    return features;
+}
+static const char *load_features_undefined_methods[] = {
+    "[]=", "reverse!", "rotate!", "sort!", "sort_by!",
+    "collect!", "map!", "shuffle!", "fill", "insert",
+    NULL
+};
+
+static VALUE
+rb_loaded_features_init(void)
+{
+    char *sorted_flag;
+    const char **name;
+    VALUE loaded_features = rb_ary_new();
+    VALUE loaded_features_c = rb_singleton_class(loaded_features);
+
+    sorted_flag = getenv("RUBY_LOADED_FEATURES_SORTED");
+    if (sorted_flag != NULL) {
+	int sorted_set = atoi(sorted_flag);
+	if (RTEST(ruby_verbose))
+	    fprintf(stderr, "sorted_loaded_features=%d (%d)\n", sorted_set, sorted_loaded_features);
+	sorted_loaded_features = sorted_set;
+    }
+
+    for(name = load_features_undefined_methods; *name; name++) {
+	rb_undef_method(loaded_features_c, *name);
+    }
+
+    if (sorted_loaded_features) {
+	rb_define_method(loaded_features_c, "<<", rb_push_feature_1, 1);
+	rb_define_method(loaded_features_c, "push", rb_push_feature_m, -1);
+	rb_define_method(loaded_features_c, "concat", rb_concat_features, 1);
+	rb_define_method(loaded_features_c, "unshift", rb_push_feature_m, -1);
+    }
+    return loaded_features;
+}
+
 static void
 rb_provide_feature(VALUE feature)
 {
@@ -258,7 +445,10 @@ rb_provide_feature(VALUE feature)
 	rb_raise(rb_eRuntimeError,
 		 "$LOADED_FEATURES is frozen; cannot append feature");
     }
-    rb_ary_push(get_loaded_features(), feature);
+    if (sorted_loaded_features)
+	rb_push_feature_1(get_loaded_features(), feature);
+    else
+	rb_ary_push(get_loaded_features(), feature);
 }
 
 void
@@ -760,6 +950,226 @@ rb_f_autoload_p(VALUE obj, VALUE sym)
     return rb_mod_autoload_p(klass, sym);
 }
 
+/* $LOAD_PATH methods which invalidates cache */
+static const char *load_path_reset_cache_methods[] = {
+    "[]=", "collect!", "compact!", "delete",
+    "delete_if", "fill", "flatten!", "insert", "keep_if",
+    "map!", "reject!", "replace", "select!", "shuffle!",
+    "sort!", "sort_by!", "uniq!", NULL
+};
+
+/* $LOAD_PATH methods which sends also to cache */
+static const char *load_path_apply_to_cache_methods[] = {
+    "clear", "delete_at", "pop", "reverse!", "rotate!",
+    "shift", "slice!", NULL
+};
+
+/* $LOAD_PATH methods which sends to cache whith expanded arguments */
+static const char *load_path_apply_expanded_methods[] = {
+    "<<", "push", "unshift", NULL
+};
+
+void
+rb_reset_expanded_cache()
+{
+    GET_VM()->load_path_expanded_cache = 0;
+}
+
+static VALUE
+rb_load_path_expanded_cache()
+{
+    VALUE cache = GET_VM()->load_path_expanded_cache;
+    VALUE expanded = Qnil;
+    if (RTEST(cache)) {
+	expanded = RARRAY_PTR(cache)[2];
+    }
+    return expanded;
+}
+
+/* Return cache only if we still in the same working directory
+ * and filesystem_encoding didn't change
+ * Invalidate cache otherwise
+ */
+static VALUE
+rb_checked_expanded_cache(int *has_relative)
+{
+    VALUE cache = GET_VM()->load_path_expanded_cache;
+    VALUE expanded = Qnil;
+    if (RTEST(cache)) {
+	VALUE curwd = RARRAY_PTR(cache)[0];
+	VALUE encindex = RARRAY_PTR(cache)[1];
+	int cache_valid = rb_filesystem_encindex() == FIX2INT(encindex);
+
+	if ( cache_valid ) {
+	    cache_valid = curwd == Qtrue;
+	    if (has_relative) {
+		*has_relative = cache_valid;
+	    }
+	    if (!cache_valid ) {
+		char *cwd = my_getcwd();
+		cache_valid = !strcmp(RSTRING_PTR(curwd), cwd);
+		xfree(cwd);
+	    }
+	}
+
+	if ( !cache_valid ) {
+	    rb_reset_expanded_cache();
+	} else {
+	    expanded = RARRAY_PTR(cache)[2];
+	}
+    }
+    RB_GC_GUARD(cache);
+    return expanded;
+}
+
+static void
+rb_set_expanded_cache(VALUE expanded, int has_relative)
+{
+    VALUE cache = rb_ary_new2(3);
+
+    if (has_relative) {
+	char *cwd = my_getcwd();
+	rb_ary_push(cache, rb_str_new_cstr(cwd));
+	xfree(cwd);
+    } else {
+	rb_ary_push(cache, Qtrue);
+    }
+
+    rb_ary_push(cache, INT2FIX(rb_filesystem_encindex()));
+    rb_ary_push(cache, rb_ary_dup(expanded));
+    GET_VM()->load_path_expanded_cache = cache;
+}
+
+static VALUE
+rb_expand_load_paths(long pathc, VALUE* paths, int *has_relative)
+{
+    long i;
+    const char *p;
+    VALUE path, expanded = rb_ary_new2(pathc);
+
+    for(i = 0; i < pathc; i++) {
+	path = rb_get_path(paths[i]);
+	p = RSTRING_PTR(path);
+	*has_relative = *has_relative || !rb_is_absolute_path(p);
+	path = rb_file_expand_path(path, Qnil);
+	rb_str_freeze(path);
+	rb_ary_push(expanded, path);
+    }
+
+    return expanded;
+}
+
+/* Invalidating $LOAD_PATH methods implementation */
+static VALUE
+rb_load_path_reset_cache_method(int argc, VALUE *argv, VALUE self)
+{
+    rb_reset_expanded_cache();
+    return rb_call_super(argc, argv);
+}
+
+/* Proxying $LOAD_PATH methods implementation */
+static VALUE
+rb_load_path_apply_to_cache_method(int argc, VALUE *argv, VALUE self)
+{
+    VALUE load_path_expanded = rb_load_path_expanded_cache();
+    if (RTEST(load_path_expanded)) {
+	ID func = rb_frame_this_func();
+	rb_funcall2(load_path_expanded, func, argc, argv);
+    }
+    return rb_call_super(argc, argv);
+}
+
+/* Proxying with expansion $LOAD_PATH methods implementation */
+static VALUE
+rb_load_path_apply_expanded_method(int argc, VALUE *argv, VALUE self)
+{
+    int old_has_relative = 0;
+    /* We call methods on cache only if we still in the same working directory */
+    VALUE load_path_expanded = rb_checked_expanded_cache(&old_has_relative);
+    if (RTEST(load_path_expanded)) {
+	int has_relative = 0;
+	ID func = rb_frame_this_func();
+	VALUE expanded = rb_expand_load_paths(argc, argv, &has_relative);
+
+	rb_funcall2(load_path_expanded, func, argc, RARRAY_PTR(expanded));
+
+	if (!old_has_relative && has_relative) {
+	    rb_set_expanded_cache(load_path_expanded, has_relative);
+	}
+	RB_GC_GUARD(expanded);
+    }
+    return rb_call_super(argc, argv);
+}
+/* $LOAD_PATH.concat(ary) - special, we call push(*ary) instead
+ * cause I'm lazy a bit and wish not to rewrite method above second time :)
+ */
+static VALUE
+rb_load_path_concat(VALUE self, VALUE ary)
+{
+    ID push;
+    CONST_ID(push, "push");
+    RB_GC_GUARD(ary);
+    return rb_funcall2(self, push, (int)RARRAY_LEN(ary), RARRAY_PTR(ary));
+}
+
+void
+rb_load_path_ary_push(VALUE path)
+{
+    int old_has_relative = 0;
+    VALUE load_path_expanded = rb_checked_expanded_cache(&old_has_relative);
+    if (RTEST(load_path_expanded)) {
+	int has_relative = 0;
+	VALUE expanded = rb_expand_load_paths(1, &path, &has_relative);
+
+	rb_ary_push(load_path_expanded, RARRAY_PTR(expanded)[0]);
+
+	if (!old_has_relative && has_relative) {
+	    rb_set_expanded_cache(load_path_expanded, has_relative);
+	}
+	RB_GC_GUARD(expanded);
+    }
+
+    rb_ary_push(GET_VM()->load_path, path);
+}
+
+static VALUE
+rb_load_path_init(void)
+{
+    const char **name;
+    VALUE load_path = rb_ary_new();
+    char *cached_flag;
+
+    cached_flag = getenv("RUBY_CACHED_LOAD_PATH");
+    if (cached_flag != NULL) {
+	cached_expanded_load_path = atoi(cached_flag);
+    }
+
+    /* Do all the magick if user did not disable it
+     * with RUBY_CACHED_LOAD_PATH=0 environment variable
+     */
+    if (cached_expanded_load_path) {
+	VALUE load_path_c = rb_singleton_class(load_path);
+
+	for(name = load_path_reset_cache_methods; *name; name++ ) {
+	    rb_define_method(load_path_c, *name, rb_load_path_reset_cache_method, -1);
+	}
+
+	for(name = load_path_apply_to_cache_methods; *name; name++ ) {
+	    rb_define_method(load_path_c, *name, rb_load_path_apply_to_cache_method, -1);
+	}
+
+	for(name = load_path_apply_expanded_methods; *name; name++ ) {
+	    rb_define_method(load_path_c, *name, rb_load_path_apply_expanded_method, -1);
+	}
+
+	rb_define_method(load_path_c, "concat", rb_load_path_concat, 1);
+    }
+
+    rb_reset_expanded_cache();
+
+    return load_path;
+}
+
 void
 Init_load()
 {
@@ -772,11 +1182,11 @@ Init_load()
     rb_define_hooked_variable(var_load_path, (VALUE*)vm, load_path_getter, rb_gvar_readonly_setter);
     rb_alias_variable(rb_intern("$-I"), id_load_path);
     rb_alias_variable(rb_intern("$LOAD_PATH"), id_load_path);
-    vm->load_path = rb_ary_new();
+    vm->load_path = rb_load_path_init();
 
     rb_define_virtual_variable("$\"", get_loaded_features, 0);
     rb_define_virtual_variable("$LOADED_FEATURES", get_loaded_features, 0);
-    vm->loaded_features = rb_ary_new();
+    vm->loaded_features = rb_loaded_features_init();
 
     rb_define_global_function("load", rb_f_load, -1);
     rb_define_global_function("require", rb_f_require, 1);
diff --git a/marshal.c b/marshal.c
index 9a43cdb..a1b46f8 100644
--- a/marshal.c
+++ b/marshal.c
@@ -106,7 +106,7 @@ static void
 mark_marshal_compat_t(void *tbl)
 {
     if (!tbl) return;
-    st_foreach(tbl, mark_marshal_compat_i, 0);
+    st_foreach_nocheck(tbl, mark_marshal_compat_i, 0);
 }
 
 void
diff --git a/node.h b/node.h
index bb96107..37938ea 100644
--- a/node.h
+++ b/node.h
@@ -260,7 +260,7 @@ typedef struct RNode {
 
 #define RNODE(obj)  (R_CAST(RNode)(obj))
 
-/* 0..4:T_TYPES, 5:FL_MARK, 6:reserved, 7:NODE_FL_NEWLINE */
+/* 0..4:T_TYPES, 5:reserved, 6:reserved, 7:NODE_FL_NEWLINE */
 #define NODE_FL_NEWLINE (((VALUE)1)<<7)
 #define NODE_FL_CREF_PUSHED_BY_EVAL NODE_FL_NEWLINE
 
diff --git a/object.c b/object.c
index f45e013..b59e1a0 100644
--- a/object.c
+++ b/object.c
@@ -278,7 +278,7 @@ rb_obj_clone(VALUE obj)
     }
     clone = rb_obj_alloc(rb_obj_class(obj));
     RBASIC(clone)->klass = rb_singleton_class_clone(obj);
-    RBASIC(clone)->flags = (RBASIC(obj)->flags | FL_TEST(clone, FL_TAINT) | FL_TEST(clone, FL_UNTRUSTED)) & ~(FL_FREEZE|FL_FINALIZE|FL_MARK);
+    RBASIC(clone)->flags = (RBASIC(obj)->flags | FL_TEST(clone, FL_TAINT) | FL_TEST(clone, FL_UNTRUSTED)) & ~(FL_FREEZE|FL_FINALIZE);
     init_copy(clone, obj);
     rb_funcall(clone, id_init_clone, 1, obj);
     RBASIC(clone)->flags |= RBASIC(obj)->flags & FL_FREEZE;
diff --git a/parse.y b/parse.y
index b0da1b7..bf054c2 100644
--- a/parse.y
+++ b/parse.y
@@ -10040,7 +10040,7 @@ rb_sym_all_symbols(void)
 {
     VALUE ary = rb_ary_new2(global_symbols.sym_id->num_entries);
 
-    st_foreach(global_symbols.sym_id, symbols_i, ary);
+    st_foreach_nocheck(global_symbols.sym_id, symbols_i, ary);
     return ary;
 }
 
diff --git a/pool_alloc.h b/pool_alloc.h
new file mode 100644
index 0000000..957708e
--- /dev/null
+++ b/pool_alloc.h
@@ -0,0 +1,11 @@
+#ifndef POOL_ALLOC_H
+#define POOL_ALLOC_H
+
+#define POOL_ALLOC_API
+#ifdef POOL_ALLOC_API
+void  ruby_xpool_free(void *ptr);
+void *ruby_xpool_malloc_6p();
+void *ruby_xpool_malloc_11p();
+#endif
+
+#endif
diff --git a/pool_alloc.inc.h b/pool_alloc.inc.h
new file mode 100644
index 0000000..e06baba
--- /dev/null
+++ b/pool_alloc.inc.h
@@ -0,0 +1,152 @@
+/*
+ * this is generic pool allocator
+ * you should define following macroses:
+ * ITEM_NAME - unique identifier, which allows to hold functions in a namespace
+ * ITEM_TYPEDEF(name) - passed to typedef to localize item type
+ * free_entry - desired name of function for free entry
+ * alloc_entry - defired name of function for allocate entry
+ */
+
+#if POOL_ALLOC_PART == 1
+#define DEFAULT_POOL_SIZE 8192
+typedef unsigned int pool_holder_counter;
+
+typedef struct pool_entry_list pool_entry_list;
+typedef struct pool_holder pool_holder;
+
+typedef struct pool_header {
+    pool_holder         *first;
+    pool_holder         *_black_magick;
+    pool_holder_counter  size; // size of entry in sizeof(void*) items
+    pool_holder_counter  total; // size of entry in sizeof(void*) items
+} pool_header;
+
+struct pool_holder {
+    pool_holder_counter free, total;
+    pool_header  *header;
+    void               *freep;
+    pool_holder        *fore, *back;
+    void *data[1];
+};
+#define POOL_DATA_SIZE(pool_size) (((pool_size) - sizeof(void*) * 6 - offsetof(pool_holder, data)) / sizeof(void*))
+#define POOL_ENTRY_SIZE(item_type) ((sizeof(item_type) - 1) / sizeof(void*) + 1)
+#define POOL_HOLDER_COUNT(pool_size, item_type) (POOL_DATA_SIZE(pool_size)/POOL_ENTRY_SIZE(item_type))
+#define INIT_POOL(item_type) {NULL, NULL, POOL_ENTRY_SIZE(item_type), POOL_HOLDER_COUNT(DEFAULT_POOL_SIZE, item_type)}
+
+#elif POOL_ALLOC_PART == 2
+static pool_holder *
+pool_holder_alloc(pool_header *header)
+{
+    pool_holder *holder;
+    pool_holder_counter i, size, count;
+    register void **ptr;
+
+    size_t sz = offsetof(pool_holder, data) +
+	    header->size * header->total * sizeof(void*);
+#define objspace (&rb_objspace)
+    vm_malloc_prepare(objspace, DEFAULT_POOL_SIZE);
+    if (header->first != NULL) return header->first;
+    TRY_WITH_GC(holder = (pool_holder*) aligned_malloc(DEFAULT_POOL_SIZE, sz));
+    malloc_increase += DEFAULT_POOL_SIZE;
+#if CALC_EXACT_MALLOC_SIZE
+    objspace->malloc_params.allocated_size += DEFAULT_POOL_SIZE;
+    objspace->malloc_params.allocations++;
+#endif
+#undef objspace
+
+    size = header->size;
+    count = header->total;
+    holder->free = count;
+    holder->total = count;
+    holder->header = header;
+    holder->fore = NULL;
+    holder->back = NULL;
+    holder->freep = &holder->data;
+    ptr = holder->data;
+    for(i = count - 1; i; i-- ) {
+	ptr = *ptr = ptr + size;
+    }
+    *ptr = NULL;
+    header->first = holder;
+    return holder;
+}
+
+static inline void
+pool_holder_unchaing(pool_header *header, pool_holder *holder)
+{
+    register pool_holder *fore = holder->fore, *back = holder->back;
+    holder->fore = NULL;
+    holder->back = NULL;
+    if (fore != NULL)  fore->back     = back;
+    else               header->_black_magick = back;
+    if (back != NULL)  back->fore     = fore;
+    else               header->first = fore;
+}
+
+static inline pool_holder *
+entry_holder(void **entry)
+{
+    return (pool_holder*)(((uintptr_t)entry) & ~(DEFAULT_POOL_SIZE - 1));
+}
+
+static inline void
+pool_free_entry(void **entry)
+{
+    pool_holder *holder = entry_holder(entry);
+    pool_header *header = holder->header;
+
+    if (holder->free++ == 0) {
+	register pool_holder *first = header->first;
+	if (first == NULL) {
+	    header->first = holder;
+	} else {
+	    holder->back = first;
+	    holder->fore = first->fore;
+	    first->fore = holder;
+	    if (holder->fore)
+		holder->fore->back = holder;
+	    else
+		header->_black_magick = holder;
+	}
+    } else if (holder->free == holder->total && header->first != holder ) {
+	pool_holder_unchaing(header, holder);
+	aligned_free(holder);
+#if CALC_EXACT_MALLOC_SIZE
+	rb_objspace.malloc_params.allocated_size -= DEFAULT_POOL_SIZE;
+	rb_objspace.malloc_params.allocations--;
+#endif
+	return;
+    }
+
+    *entry = holder->freep;
+    holder->freep = entry;
+}
+
+static inline void*
+pool_alloc_entry(pool_header *header)
+{
+    pool_holder *holder = header->first;
+    void **result;
+    if (holder == NULL) {
+	holder = pool_holder_alloc(header);
+    }
+
+    result = holder->freep;
+    holder->freep = *result;
+
+    if (--holder->free == 0) {
+	pool_holder_unchaing(header, holder);
+    }
+
+    return result;
+}
+
+static void
+pool_finalize_header(pool_header *header)
+{
+    if (header->first) {
+        aligned_free(header->first);
+        header->first = NULL;
+    }
+}
+#endif
diff --git a/process.c b/process.c
index 2a16757..7a2201c 100644
--- a/process.c
+++ b/process.c
@@ -666,7 +666,7 @@ rb_waitpid(rb_pid_t pid, int *st, int flags)
 	    struct wait_data data;
 	    data.pid = (rb_pid_t)-1;
 	    data.status = -1;
-	    st_foreach(pid_tbl, wait_each, (st_data_t)&data);
+	    st_foreach_nocheck(pid_tbl, wait_each, (st_data_t)&data);
 	    if (data.status != -1) {
 		rb_last_status_set(data.status, data.pid);
 		return data.pid;
@@ -2162,11 +2162,11 @@ run_exec_dup2(VALUE ary, VALUE save, char *errmsg, size_t errmsg_buflen)
         }
     }
 
-    xfree(pairs);
+    free(pairs);
     return 0;
 
   fail:
-    xfree(pairs);
+    free(pairs);
     return -1;
 }
 
diff --git a/re.c b/re.c
index 9fdbf54..25467d7 100644
--- a/re.c
+++ b/re.c
@@ -769,7 +769,7 @@ onig_new_with_source(regex_t** reg, const UChar* pattern, const UChar* pattern_e
 {
   int r;
 
-  *reg = (regex_t* )xmalloc(sizeof(regex_t));
+  *reg = (regex_t* )malloc(sizeof(regex_t));
   if (IS_NULL(*reg)) return ONIGERR_MEMORY;
 
   r = onig_reg_init(*reg, option, ONIGENC_CASE_FOLD_DEFAULT, enc, syntax);
diff --git a/ruby.c b/ruby.c
index 3c97d01..b9b9fd5 100644
--- a/ruby.c
+++ b/ruby.c
@@ -209,7 +209,6 @@ push_include(const char *path, VALUE (*filter)(VALUE))
 {
     const char sep = PATH_SEP_CHAR;
     const char *p, *s;
-    VALUE load_path = GET_VM()->load_path;
 
     p = path;
     while (*p) {
@@ -217,7 +216,7 @@ push_include(const char *path, VALUE (*filter)(VALUE))
 	    p++;
 	if (!*p) break;
 	for (s = p; *s && *s != sep; s = CharNext(s));
-	rb_ary_push(load_path, (*filter)(rubylib_mangled_path(p, s - p)));
+	rb_load_path_ary_push((*filter)(rubylib_mangled_path(p, s - p)));
 	p = s;
     }
 }
@@ -338,7 +337,6 @@ ruby_init_loadpath(void)
 void
 ruby_init_loadpath_safe(int safe_level)
 {
-    VALUE load_path;
     ID id_initial_load_path_mark;
     extern const char ruby_initial_load_paths[];
     const char *paths = ruby_initial_load_paths;
@@ -438,7 +436,6 @@ ruby_init_loadpath_safe(int safe_level)
 #define RUBY_RELATIVE(path, len) rubylib_mangled_path((path), (len))
 #define PREFIX_PATH() RUBY_RELATIVE(exec_prefix, sizeof(exec_prefix)-1)
 #endif
-    load_path = GET_VM()->load_path;
 
     if (safe_level == 0) {
 #ifdef MANGLED_PATH
@@ -452,7 +449,7 @@ ruby_init_loadpath_safe(int safe_level)
 	size_t len = strlen(paths);
 	VALUE path = RUBY_RELATIVE(paths, len);
 	rb_ivar_set(path, id_initial_load_path_mark, path);
-	rb_ary_push(load_path, path);
+	rb_load_path_ary_push(path);
 	paths += len + 1;
     }
 
@@ -1349,6 +1346,7 @@ process_options(int argc, char **argv, struct cmdline_options *opt)
 	for (i = 0; i < RARRAY_LEN(load_path); ++i) {
 	    rb_enc_associate(RARRAY_PTR(load_path)[i], lenc);
 	}
+	rb_reset_expanded_cache();
     }
     if (!(opt->disable & DISABLE_BIT(gems))) {
 #if defined DISABLE_RUBYGEMS && DISABLE_RUBYGEMS
diff --git a/st.c b/st.c
index fda5784..675918d 100644
--- a/st.c
+++ b/st.c
@@ -7,6 +7,7 @@
 #include "st.h"
 #else
 #include "ruby/ruby.h"
+#include "pool_alloc.h"
 #endif
 
 #include <stdio.h>
@@ -25,8 +26,21 @@ struct st_table_entry {
     st_table_entry *fore, *back;
 };
 
+#define STATIC_ASSERT(name, expr) typedef int static_assert_##name##_check[(expr) ? 1 : -1];
+
 #define ST_DEFAULT_MAX_DENSITY 5
 #define ST_DEFAULT_INIT_TABLE_SIZE 11
+#define ST_DEFAULT_SECOND_TABLE_SIZE 19
+#define ST_DEFAULT_PACKED_TABLE_SIZE 18
+#define PACKED_UNIT (int)(sizeof(st_packed_entry) / sizeof(st_table_entry*))
+#define MAX_PACKED_HASH (int)(ST_DEFAULT_PACKED_TABLE_SIZE * sizeof(st_table_entry*) / sizeof(st_packed_entry))
+
+typedef struct st_packed_bins {
+    st_packed_entry kv[MAX_PACKED_HASH];
+} st_packed_bins;
+
+STATIC_ASSERT(st_packed_entry, sizeof(st_packed_entry) == sizeof(st_table_entry*[PACKED_UNIT]))
+STATIC_ASSERT(st_packed_bins, sizeof(st_packed_bins) <= sizeof(st_table_entry*[ST_DEFAULT_PACKED_TABLE_SIZE]))
 
     /*
      * DEFAULT_MAX_DENSITY is the default for the largest we allow the
@@ -38,7 +52,8 @@ struct st_table_entry {
      *
      */
 
-static const struct st_hash_type type_numhash = {
+#define type_numhash st_hashtype_num
+const struct st_hash_type st_hashtype_num = {
     st_numcmp,
     st_numhash,
 };
@@ -61,20 +76,99 @@ static void rehash(st_table *);
 #ifdef RUBY
 #define malloc xmalloc
 #define calloc xcalloc
+#define realloc xrealloc
 #define free(x) xfree(x)
 #endif
 
 #define numberof(array) (int)(sizeof(array) / sizeof((array)[0]))
 
-#define alloc(type) (type*)malloc((size_t)sizeof(type))
-#define Calloc(n,s) (char*)calloc((n),(s))
-
 #define EQUAL(table,x,y) ((x)==(y) || (*(table)->type->compare)((x),(y)) == 0)
 
-/* remove cast to unsigned int in the future */
-#define do_hash(key,table) (unsigned int)(st_index_t)(*(table)->type->hash)((key))
+#define do_hash(key,table) (st_index_t)(*(table)->type->hash)((key))
 #define do_hash_bin(key,table) (do_hash((key), (table))%(table)->num_bins)
 
+/* preparation for possible allocation improvements */
+#ifdef POOL_ALLOC_API
+#define st_alloc_entry() (st_table_entry *)ruby_xpool_malloc_6p()
+#define st_free_entry(entry) ruby_xpool_free(entry)
+#define st_alloc_table() (st_table *)ruby_xpool_malloc_6p()
+#define st_dealloc_table(table) ruby_xpool_free(table)
+static inline st_table_entry **
+st_alloc_bins(st_index_t size)
+{
+    st_table_entry **result;
+    if (size == 11) {
+        result = (st_table_entry **) ruby_xpool_malloc_11p();
+        memset(result, 0, 11 * sizeof(st_table_entry *));
+    }
+    else
+        result = (st_table_entry **) ruby_xcalloc(size, sizeof(st_table_entry*));
+    return result;
+}
+static inline void
+st_free_bins(st_table_entry **bins, st_index_t size)
+{
+    if (size == 11)
+	ruby_xpool_free(bins);
+    else
+	ruby_xfree(bins);
+}
+static inline st_table_entry**
+st_realloc_bins(st_table_entry **bins, st_index_t newsize, st_index_t oldsize)
+{
+    st_free_bins(bins, oldsize);
+    return st_alloc_bins(newsize);
+}
+#else
+#define st_alloc_entry() (st_table_entry *)malloc(sizeof(st_table_entry))
+#define st_free_entry(entry) free(entry)
+#define st_alloc_table() (st_table *)malloc(sizeof(st_table))
+#define st_dealloc_table(table) free(table)
+#define st_alloc_bins(size) (st_table_entry **)calloc(size, sizeof(st_table_entry *))
+#define st_free_bins(bins, size) free(bins)
+static inline st_table_entry**
+st_realloc_bins(st_table_entry **bins, st_index_t newsize, st_index_t oldsize)
+{
+    bins = (st_table_entry **)realloc(bins, newsize * sizeof(st_table_entry *));
+    MEMZERO(bins, st_table_entry*, newsize);
+    return bins;
+}
+#endif
+
+/* preparation for possible packing improvements */
+#define PACKED_BINS(table) (*(table)->as.packed)
+#define PACKED_ENT(table, i) PACKED_BINS(table).kv[i]
+#define PKEY(table, i) PACKED_ENT((table), (i)).key
+#define PVAL(table, i) PACKED_ENT((table), (i)).val
+#define PHASH(table, i) PACKED_ENT((table), (i)).hash
+#define PKEY_SET(table, i, v) (PKEY((table), (i)) = (v))
+#define PVAL_SET(table, i, v) (PVAL((table), (i)) = (v))
+#define PHASH_SET(table, i, v) (PHASH((table), (i)) = (v))
+/* this function depends much on packed layout, so that it placed here */
+static inline void
+remove_packed_entry(st_table *table, st_index_t i)
+{
+    table->num_entries--;
+    if (i < table->num_entries) {
+	MEMMOVE(&PACKED_ENT(table, i), &PACKED_ENT(table, i+1),
+		st_packed_entry, table->num_entries - i);
+    }
+}
+/* ultra packed values */
+#define MAX_ULTRA_PACKED 1
+#define ULTRA_PACKED(table) ((table)->num_bins == 0)
+#define UPHASH(table) (table)->as.upacked.hash
+#define UPKEY(table)  (table)->as.upacked.key
+#define UPVAL(table)  (table)->as.upacked.val
+#define UPHASH_SET(table, val) (UPHASH(table) = (val))
+#define UPKEY_SET(table, val) (UPKEY(table) = (val))
+#define UPVAL_SET(table, val) (UPVAL(table) = (val))
+
+/* Shortage */
+#define bins as.big.bins
+#define head as.big.head
+#define tail as.big.tail
+
 /*
  * MINSIZE is the minimum size of a dictionary.
  */
@@ -85,8 +179,8 @@ static void rehash(st_table *);
 Table of prime numbers 2^n+a, 2<=n<=30.
 */
 static const unsigned int primes[] = {
-	8 + 3,
-	16 + 3,
+	ST_DEFAULT_INIT_TABLE_SIZE,
+	ST_DEFAULT_SECOND_TABLE_SIZE,
 	32 + 5,
 	64 + 3,
 	128 + 3,
@@ -161,8 +255,6 @@ stat_col(void)
 }
 #endif
 
-#define MAX_PACKED_NUMHASH (ST_DEFAULT_INIT_TABLE_SIZE/2)
-
 st_table*
 st_init_table_with_size(const struct st_hash_type *type, st_index_t size)
 {
@@ -181,14 +273,19 @@ st_init_table_with_size(const struct st_hash_type *type, st_index_t size)
     }
 #endif
 
-    size = new_size(size);	/* round up to prime number */
 
-    tbl = alloc(st_table);
+    tbl = st_alloc_table();
     tbl->type = type;
     tbl->num_entries = 0;
-    tbl->entries_packed = type == &type_numhash && size/2 <= MAX_PACKED_NUMHASH;
+    if ( (tbl->entries_packed = size <= MAX_PACKED_HASH) ) {
+        size = size <= MAX_ULTRA_PACKED ? 0 :
+                ST_DEFAULT_PACKED_TABLE_SIZE;
+    }
+    else {
+        size = new_size(size);	/* round up to prime number */
+    }
     tbl->num_bins = size;
-    tbl->bins = (st_table_entry **)Calloc(size, sizeof(st_table_entry*));
+    tbl->bins = size ? st_alloc_bins(size) : NULL;
     tbl->head = 0;
     tbl->tail = 0;
 
@@ -248,12 +345,12 @@ st_clear(st_table *table)
         return;
     }
 
-    for(i = 0; i < table->num_bins; i++) {
+    for (i = 0; i < table->num_bins; i++) {
 	ptr = table->bins[i];
 	table->bins[i] = 0;
 	while (ptr != 0) {
 	    next = ptr->next;
-	    free(ptr);
+	    st_free_entry(ptr);
 	    ptr = next;
 	}
     }
@@ -266,8 +363,9 @@ void
 st_free_table(st_table *table)
 {
     st_clear(table);
-    free(table->bins);
-    free(table);
+    if (table->num_bins)
+	st_free_bins(table->bins, table->num_bins);
+    st_dealloc_table(table);
 }
 
 size_t
@@ -306,46 +404,77 @@ count_collision(const struct st_hash_type *type)
 #define FOUND_ENTRY
 #endif
 
-#define FIND_ENTRY(table, ptr, hash_val, bin_pos) do {\
-    (bin_pos) = (hash_val)%(table)->num_bins;\
-    (ptr) = (table)->bins[(bin_pos)];\
-    FOUND_ENTRY;\
-    if (PTR_NOT_EQUAL((table), (ptr), (hash_val), key)) {\
-	COLLISION;\
-	while (PTR_NOT_EQUAL((table), (ptr)->next, (hash_val), key)) {\
-	    (ptr) = (ptr)->next;\
-	}\
-	(ptr) = (ptr)->next;\
-    }\
-} while (0)
+#define FIND_ENTRY(table, ptr, hash_val, bin_pos) \
+    ((ptr) = find_entry((table), key, (hash_val), ((bin_pos) = (hash_val)%(table)->num_bins)))
+
+static st_table_entry *
+find_entry(st_table *table, st_data_t key, st_index_t hash_val, st_index_t bin_pos)
+{
+    register st_table_entry *ptr = table->bins[bin_pos];
+    FOUND_ENTRY;
+    if (PTR_NOT_EQUAL(table, ptr, hash_val, key)) {
+	COLLISION;
+	while (PTR_NOT_EQUAL(table, ptr->next, hash_val, key)) {
+	    ptr = ptr->next;
+	}
+	ptr = ptr->next;
+    }
+    return ptr;
+}
+
+static inline st_index_t
+find_packed_index(st_table *table, st_index_t hash_val, st_data_t key)
+{
+    st_index_t i = 0;
+    for(;;i++) {
+        while (i < table->num_entries && PHASH(table, i) != hash_val) i++;
+        if (i == table->num_entries || EQUAL(table, key, PKEY(table, i)))
+            break;
+    }
+    return i;
+}
+
+static inline int
+check_ultra_packed(st_table *table, st_index_t hash_val, st_data_t key)
+{
+    return table->num_entries && UPHASH(table) == hash_val &&
+	    EQUAL(table, key, UPKEY(table));
+}
 
 #define collision_check 0
 
 int
 st_lookup(st_table *table, register st_data_t key, st_data_t *value)
 {
-    st_index_t hash_val, bin_pos;
+    st_index_t hash_val;
     register st_table_entry *ptr;
 
+    hash_val = do_hash(key, table);
+
+    if (ULTRA_PACKED(table)) {
+	if (check_ultra_packed(table, hash_val, key)) {
+	    if (value != 0) *value = UPVAL(table);
+	    return 1;
+	}
+	return 0;
+    }
+
     if (table->entries_packed) {
-        st_index_t i;
-        for (i = 0; i < table->num_entries; i++) {
-            if ((st_data_t)table->bins[i*2] == key) {
-                if (value !=0) *value = (st_data_t)table->bins[i*2+1];
-                return 1;
-            }
-        }
+        st_index_t i = find_packed_index(table, hash_val, key);
+	if (i < table->num_entries) {
+	    if (value != 0) *value = PVAL(table, i);
+	    return 1;
+	}
         return 0;
     }
 
-    hash_val = do_hash(key, table);
-    FIND_ENTRY(table, ptr, hash_val, bin_pos);
+    ptr = find_entry(table, key, hash_val, hash_val % table->num_bins);
 
     if (ptr == 0) {
 	return 0;
     }
     else {
-	if (value != 0)  *value = ptr->record;
+	if (value != 0) *value = ptr->record;
 	return 1;
     }
 }
@@ -353,22 +482,29 @@ st_lookup(st_table *table, register st_data_t key, st_data_t *value)
 int
 st_get_key(st_table *table, register st_data_t key, st_data_t *result)
 {
-    st_index_t hash_val, bin_pos;
+    st_index_t hash_val;
     register st_table_entry *ptr;
 
+    hash_val = do_hash(key, table);
+
+    if (ULTRA_PACKED(table)) {
+	if (check_ultra_packed(table, hash_val, key)) {
+	    if (result != 0) *result = UPKEY(table);
+	    return 1;
+	}
+	return 0;
+    }
+
     if (table->entries_packed) {
-        st_index_t i;
-        for (i = 0; i < table->num_entries; i++) {
-            if ((st_data_t)table->bins[i*2] == key) {
-                if (result !=0) *result = (st_data_t)table->bins[i*2];
-                return 1;
-            }
-        }
+        st_index_t i = find_packed_index(table, hash_val, key);
+	if (i < table->num_entries) {
+	    if (result != 0) *result = PKEY(table, i);
+	    return 1;
+	}
         return 0;
     }
 
-    hash_val = do_hash(key, table);
-    FIND_ENTRY(table, ptr, hash_val, bin_pos);
+    ptr = find_entry(table, key, hash_val, hash_val % table->num_bins);
 
     if (ptr == 0) {
 	return 0;
@@ -382,85 +518,153 @@ st_get_key(st_table *table, register st_data_t key, st_data_t *result)
 #undef collision_check
 #define collision_check 1
 
-#define MORE_PACKABLE_P(table) \
-    ((st_index_t)((table)->num_entries+1) * 2 <= (table)->num_bins && \
-     (table)->num_entries+1 <= MAX_PACKED_NUMHASH)
-
-#define ADD_DIRECT(table, key, value, hash_val, bin_pos)\
-do {\
-    st_table_entry *entry;\
-    if ((table)->num_entries > ST_DEFAULT_MAX_DENSITY * (table)->num_bins) {\
-	rehash(table);\
-        (bin_pos) = (hash_val) % (table)->num_bins;\
-    }\
-    \
-    entry = alloc(st_table_entry);\
-    \
-    entry->hash = (hash_val);\
-    entry->key = (key);\
-    entry->record = (value);\
-    entry->next = (table)->bins[(bin_pos)];\
-    if ((table)->head != 0) {\
-	entry->fore = 0;\
-	(entry->back = (table)->tail)->fore = entry;\
-	(table)->tail = entry;\
-    }\
-    else {\
-	(table)->head = (table)->tail = entry;\
-	entry->fore = entry->back = 0;\
-    }\
-    (table)->bins[(bin_pos)] = entry;\
-    (table)->num_entries++;\
-} while (0)
+static inline st_table_entry *
+new_entry(st_table * table, st_data_t key, st_data_t value,
+	st_index_t hash_val, register st_index_t bin_pos)
+{
+    register st_table_entry *entry = st_alloc_entry();
+
+    entry->next = table->bins[bin_pos];
+    table->bins[bin_pos] = entry;
+    entry->hash = hash_val;
+    entry->key = key;
+    entry->record = value;
+
+    return entry;
+}
+
+static inline void
+add_direct(st_table *table, st_data_t key, st_data_t value,
+	   st_index_t hash_val, register st_index_t bin_pos)
+{
+    register st_table_entry *entry;
+    if (table->num_entries > ST_DEFAULT_MAX_DENSITY * table->num_bins) {
+	rehash(table);
+        bin_pos = hash_val % table->num_bins;
+    }
+
+    entry = new_entry(table, key, value, hash_val, bin_pos);
+
+    if (table->head != 0) {
+	entry->fore = 0;
+	(entry->back = table->tail)->fore = entry;
+	table->tail = entry;
+    }
+    else {
+	table->head = table->tail = entry;
+	entry->fore = entry->back = 0;
+    }
+    table->num_entries++;
+}
 
 static void
 unpack_entries(register st_table *table)
 {
-    st_index_t i;
-    struct st_table_entry *packed_bins[MAX_PACKED_NUMHASH*2];
+    st_index_t i = 0;
+    st_packed_bins packed_bins;
+    register st_table_entry *entry;
     st_table tmp_table = *table;
 
-    memcpy(packed_bins, table->bins, sizeof(struct st_table_entry *) * table->num_entries*2);
-    table->bins = packed_bins;
+    packed_bins = PACKED_BINS(table);
+    table->as.packed = &packed_bins;
     tmp_table.entries_packed = 0;
-    tmp_table.num_entries = 0;
-    memset(tmp_table.bins, 0, sizeof(struct st_table_entry *) * tmp_table.num_bins);
-    for (i = 0; i < table->num_entries; i++) {
-        st_insert(&tmp_table, (st_data_t)packed_bins[i*2], (st_data_t)packed_bins[i*2+1]);
+    tmp_table.num_entries = MAX_PACKED_HASH;
+#if ST_DEFAULT_INIT_TABLE_SIZE == ST_DEFAULT_PACKED_TABLE_SIZE
+    MEMZERO(tmp_table.bins, st_table_entry*, tmp_table.num_bins);
+#else
+    st_free_bins(tmp_table.bins, tmp_table.num_bins);
+    tmp_table.num_bins = ST_DEFAULT_INIT_TABLE_SIZE;
+    tmp_table.bins = st_alloc_bins(ST_DEFAULT_INIT_TABLE_SIZE);
+#endif
+#define ikey packed_bins.kv[i].key
+#define ival packed_bins.kv[i].val
+#define ihash packed_bins.kv[i].hash
+    entry = new_entry(&tmp_table, ikey, ival, ihash,
+                      ihash % ST_DEFAULT_INIT_TABLE_SIZE);
+    tmp_table.head = entry;
+    entry->back = NULL;
+    for (i = 1; i < MAX_PACKED_HASH; i++) {
+        register st_table_entry *oldentry = entry;
+        entry = new_entry(&tmp_table, ikey, ival, ihash,
+                          ihash % ST_DEFAULT_INIT_TABLE_SIZE);
+        oldentry->fore = entry;
+        entry->back = oldentry;
     }
+    entry->fore = NULL;
+    tmp_table.tail = entry;
     *table = tmp_table;
 }
 
+static void
+add_packed_direct(st_table *table, st_data_t key, st_data_t value, st_index_t hash_val)
+{
+    if (table->num_entries < MAX_PACKED_HASH) {
+	st_index_t i = table->num_entries++;
+	PKEY_SET(table, i, key);
+	PVAL_SET(table, i, value);
+	PHASH_SET(table, i, hash_val);
+    }
+    else {
+	unpack_entries(table);
+	add_direct(table, key, value, hash_val, hash_val % table->num_bins);
+    }
+}
+
+static void
+add_upacked_direct(st_table *table, st_data_t key, st_data_t value, st_index_t hash_val)
+{
+    if (table->num_entries) {
+        st_packed_entry tmp = table->as.upacked;
+        table->bins = st_alloc_bins(ST_DEFAULT_PACKED_TABLE_SIZE);
+        table->num_bins = ST_DEFAULT_PACKED_TABLE_SIZE;
+        PACKED_ENT(table, 0) = tmp;
+        PHASH_SET(table, 1, hash_val);
+        PKEY_SET(table, 1, key);
+        PVAL_SET(table, 1, value);
+        table->num_entries = 2;
+        table->head = NULL;
+        table->tail = NULL;
+    }
+    else {
+        UPHASH_SET(table, hash_val);
+        UPKEY_SET(table, key);
+        UPVAL_SET(table, value);
+        table->num_entries = 1;
+    }
+}
+
 int
 st_insert(register st_table *table, register st_data_t key, st_data_t value)
 {
-    st_index_t hash_val, bin_pos;
+    st_index_t hash_val;
+    register st_index_t bin_pos;
     register st_table_entry *ptr;
 
+    hash_val = do_hash(key, table);
+
+    if (ULTRA_PACKED(table)) {
+	if (check_ultra_packed(table, hash_val, key)) {
+	    UPVAL_SET(table, value);
+	    return 1;
+	}
+	add_upacked_direct(table, key, value, hash_val);
+	return 0;
+    }
+
     if (table->entries_packed) {
-        st_index_t i;
-        for (i = 0; i < table->num_entries; i++) {
-            if ((st_data_t)table->bins[i*2] == key) {
-                table->bins[i*2+1] = (struct st_table_entry*)value;
-                return 1;
-            }
-        }
-        if (MORE_PACKABLE_P(table)) {
-            i = table->num_entries++;
-            table->bins[i*2] = (struct st_table_entry*)key;
-            table->bins[i*2+1] = (struct st_table_entry*)value;
-            return 0;
-        }
-        else {
-            unpack_entries(table);
+        st_index_t i = find_packed_index(table, hash_val, key);
+	if (i < table->num_entries) {
+	    PVAL_SET(table, i, value);
+	    return 1;
         }
+	add_packed_direct(table, key, value, hash_val);
+	return 0;
     }
 
-    hash_val = do_hash(key, table);
     FIND_ENTRY(table, ptr, hash_val, bin_pos);
 
     if (ptr == 0) {
-	ADD_DIRECT(table, key, value, hash_val, bin_pos);
+	add_direct(table, key, value, hash_val, bin_pos);
 	return 0;
     }
     else {
@@ -473,34 +677,38 @@ int
 st_insert2(register st_table *table, register st_data_t key, st_data_t value,
 	   st_data_t (*func)(st_data_t))
 {
-    st_index_t hash_val, bin_pos;
+    st_index_t hash_val;
+    register st_index_t bin_pos;
     register st_table_entry *ptr;
 
+    hash_val = do_hash(key, table);
+
+    if (ULTRA_PACKED(table)) {
+	if (check_ultra_packed(table, hash_val, key)) {
+	    UPVAL_SET(table, value);
+	    return 1;
+	}
+	key = (*func)(key);
+	add_upacked_direct(table, key, value, hash_val);
+	return 0;
+    }
+
     if (table->entries_packed) {
-        st_index_t i;
-        for (i = 0; i < table->num_entries; i++) {
-            if ((st_data_t)table->bins[i*2] == key) {
-                table->bins[i*2+1] = (struct st_table_entry*)value;
-                return 1;
-            }
-        }
-        if (MORE_PACKABLE_P(table)) {
-            i = table->num_entries++;
-            table->bins[i*2] = (struct st_table_entry*)key;
-            table->bins[i*2+1] = (struct st_table_entry*)value;
-            return 0;
-        }
-        else {
-            unpack_entries(table);
+        st_index_t i = find_packed_index(table, hash_val, key);
+	if (i < table->num_entries) {
+	    PVAL_SET(table, i, value);
+	    return 1;
         }
+	key = (*func)(key);
+	add_packed_direct(table, key, value, hash_val);
+	return 0;
     }
 
-    hash_val = do_hash(key, table);
     FIND_ENTRY(table, ptr, hash_val, bin_pos);
 
     if (ptr == 0) {
 	key = (*func)(key);
-	ADD_DIRECT(table, key, value, hash_val, bin_pos);
+	add_direct(table, key, value, hash_val, bin_pos);
 	return 0;
     }
     else {
@@ -512,36 +720,31 @@ st_insert2(register st_table *table, register st_data_t key, st_data_t value,
 void
 st_add_direct(st_table *table, st_data_t key, st_data_t value)
 {
-    st_index_t hash_val, bin_pos;
+    st_index_t hash_val;
+
+    hash_val = do_hash(key, table);
+
+    if (ULTRA_PACKED(table)) {
+	add_upacked_direct(table, key, value, hash_val);
+	return;
+    }
 
     if (table->entries_packed) {
-        int i;
-        if (MORE_PACKABLE_P(table)) {
-            i = table->num_entries++;
-            table->bins[i*2] = (struct st_table_entry*)key;
-            table->bins[i*2+1] = (struct st_table_entry*)value;
-            return;
-        }
-        else {
-            unpack_entries(table);
-        }
+	add_packed_direct(table, key, value, hash_val);
+	return;
     }
 
-    hash_val = do_hash(key, table);
-    bin_pos = hash_val % table->num_bins;
-    ADD_DIRECT(table, key, value, hash_val, bin_pos);
+    add_direct(table, key, value, hash_val, hash_val % table->num_bins);
 }
 
 static void
 rehash(register st_table *table)
 {
     register st_table_entry *ptr, **new_bins;
-    st_index_t i, new_num_bins, hash_val;
+    st_index_t new_num_bins, hash_val;
 
     new_num_bins = new_size(table->num_bins+1);
-    new_bins = (st_table_entry**)
-	xrealloc(table->bins, new_num_bins * sizeof(st_table_entry*));
-    for (i = 0; i < new_num_bins; ++i) new_bins[i] = 0;
+    new_bins = st_realloc_bins(table->bins, new_num_bins, table->num_bins);
     table->num_bins = new_num_bins;
     table->bins = new_bins;
 
@@ -558,34 +761,38 @@ st_table*
 st_copy(st_table *old_table)
 {
     st_table *new_table;
-    st_table_entry *ptr, *entry, *prev, **tail;
+    st_table_entry *ptr, *entry, *prev, **tailp;
     st_index_t num_bins = old_table->num_bins;
     st_index_t hash_val;
 
-    new_table = alloc(st_table);
+    new_table = st_alloc_table();
     if (new_table == 0) {
 	return 0;
     }
 
     *new_table = *old_table;
-    new_table->bins = (st_table_entry**)
-	Calloc((unsigned)num_bins, sizeof(st_table_entry*));
+
+    if (ULTRA_PACKED(old_table)) {
+	return new_table;
+    }
+
+    new_table->bins = st_alloc_bins(num_bins);
 
     if (new_table->bins == 0) {
-	free(new_table);
+	st_dealloc_table(new_table);
 	return 0;
     }
 
     if (old_table->entries_packed) {
-        memcpy(new_table->bins, old_table->bins, sizeof(struct st_table_entry *) * old_table->num_bins);
+        MEMCPY(new_table->bins, old_table->bins, st_table_entry*, old_table->num_bins);
         return new_table;
     }
 
     if ((ptr = old_table->head) != 0) {
 	prev = 0;
-	tail = &new_table->head;
+	tailp = &new_table->head;
 	do {
-	    entry = alloc(st_table_entry);
+	    entry = st_alloc_entry();
 	    if (entry == 0) {
 		st_free_table(new_table);
 		return 0;
@@ -595,8 +802,8 @@ st_copy(st_table *old_table)
 	    entry->next = new_table->bins[hash_val];
 	    new_table->bins[hash_val] = entry;
 	    entry->back = prev;
-	    *tail = prev = entry;
-	    tail = &entry->fore;
+	    *tailp = prev = entry;
+	    tailp = &entry->fore;
 	} while ((ptr = ptr->fore) != 0);
 	new_table->tail = prev;
     }
@@ -604,21 +811,22 @@ st_copy(st_table *old_table)
     return new_table;
 }
 
-#define REMOVE_ENTRY(table, ptr) do					\
-    {									\
-	if ((ptr)->fore == 0 && (ptr)->back == 0) {			\
-	    (table)->head = 0;						\
-	    (table)->tail = 0;						\
-	}								\
-	else {								\
-	    st_table_entry *fore = (ptr)->fore, *back = (ptr)->back;	\
-	    if (fore) fore->back = back;				\
-	    if (back) back->fore = fore;				\
-	    if ((ptr) == (table)->head) (table)->head = fore;		\
-	    if ((ptr) == (table)->tail) (table)->tail = back;		\
-	}								\
-	(table)->num_entries--;						\
-    } while (0)
+static inline void
+remove_entry(st_table *table, st_table_entry *ptr)
+{
+    if (ptr->fore == 0 && ptr->back == 0) {
+	table->head = 0;
+	table->tail = 0;
+    }
+    else {
+	st_table_entry *fore = ptr->fore, *back = ptr->back;
+	if (fore) fore->back = back;
+	if (back) back->fore = fore;
+	if (ptr == table->head) table->head = fore;
+	if (ptr == table->tail) table->tail = back;
+    }
+    table->num_entries--;
+}
 
 int
 st_delete(register st_table *table, register st_data_t *key, st_data_t *value)
@@ -627,34 +835,42 @@ st_delete(register st_table *table, register st_data_t *key, st_data_t *value)
     st_table_entry **prev;
     register st_table_entry *ptr;
 
+    hash_val = do_hash(*key, table);
+
+    if (ULTRA_PACKED(table)) {
+	if (check_ultra_packed(table, hash_val, *key)) {
+	    if (value != 0) *value = UPVAL(table);
+	    *key = UPKEY(table);
+	    table->num_entries = 0;
+	    return 1;
+	}
+	goto notfound;
+    }
+
     if (table->entries_packed) {
-        st_index_t i;
-        for (i = 0; i < table->num_entries; i++) {
-            if ((st_data_t)table->bins[i*2] == *key) {
-                if (value != 0) *value = (st_data_t)table->bins[i*2+1];
-                table->num_entries--;
-                memmove(&table->bins[i*2], &table->bins[(i+1)*2],
-                        sizeof(struct st_table_entry*) * 2*(table->num_entries-i));
-                return 1;
-            }
+        st_index_t i = find_packed_index(table, hash_val, *key);
+	if (i < table->num_entries) {
+	    if (value != 0) *value = PVAL(table, i);
+	    *key = PKEY(table, i);
+	    remove_packed_entry(table, i);
+	    return 1;
         }
-        if (value != 0) *value = 0;
-        return 0;
+	goto notfound;
     }
 
-    hash_val = do_hash_bin(*key, table);
-
-    for (prev = &table->bins[hash_val]; (ptr = *prev) != 0; prev = &ptr->next) {
+    prev = &table->bins[hash_val % table->num_bins];
+    for (; (ptr = *prev) != 0; prev = &ptr->next) {
 	if (EQUAL(table, *key, ptr->key)) {
 	    *prev = ptr->next;
-	    REMOVE_ENTRY(table, ptr);
+	    remove_entry(table, ptr);
 	    if (value != 0) *value = ptr->record;
 	    *key = ptr->key;
-	    free(ptr);
+	    st_free_entry(ptr);
 	    return 1;
 	}
     }
 
+notfound:
     if (value != 0) *value = 0;
     return 0;
 }
@@ -665,25 +881,36 @@ st_delete_safe(register st_table *table, register st_data_t *key, st_data_t *val
     st_index_t hash_val;
     register st_table_entry *ptr;
 
+    hash_val = do_hash(*key, table);
+
+    if (ULTRA_PACKED(table)) {
+	if (check_ultra_packed(table, hash_val, *key)) {
+	    if (value != 0) *value = UPVAL(table);
+	    *key = UPKEY(table);
+	    UPKEY_SET(table, never);
+	    UPHASH_SET(table, 0);
+	    return 1;
+	}
+	goto notfound;
+    }
+
     if (table->entries_packed) {
-	st_index_t i;
-	for (i = 0; i < table->num_entries; i++) {
-	    if ((st_data_t)table->bins[i*2] == *key) {
-		if (value != 0) *value = (st_data_t)table->bins[i*2+1];
-		table->bins[i*2] = (void *)never;
-		return 1;
-	    }
+        st_index_t i = find_packed_index(table, hash_val, *key);
+	if (i < table->num_entries) {
+	    if (value != 0) *value = PVAL(table, i);
+	    *key = PKEY(table, i);
+	    PKEY_SET(table, i, never);
+	    PHASH_SET(table, i,  0);
+	    return 1;
 	}
-	if (value != 0) *value = 0;
-	return 0;
+	goto notfound;
     }
 
-    hash_val = do_hash_bin(*key, table);
-    ptr = table->bins[hash_val];
+    ptr = table->bins[hash_val % table->num_bins];
 
     for (; ptr != 0; ptr = ptr->next) {
 	if ((ptr->key != never) && EQUAL(table, ptr->key, *key)) {
-	    REMOVE_ENTRY(table, ptr);
+	    remove_entry(table, ptr);
 	    *key = ptr->key;
 	    if (value != 0) *value = ptr->record;
 	    ptr->key = ptr->record = never;
@@ -691,6 +918,7 @@ st_delete_safe(register st_table *table, register st_data_t *key, st_data_t *val
 	}
     }
 
+notfound:
     if (value != 0) *value = 0;
     return 0;
 }
@@ -701,15 +929,21 @@ st_cleanup_safe(st_table *table, st_data_t never)
     st_table_entry *ptr, **last, *tmp;
     st_index_t i;
 
+    if (ULTRA_PACKED(table)) {
+	if (UPKEY(table) == never) {
+	    table->num_entries = 0;
+	}
+	return;
+    }
+
     if (table->entries_packed) {
 	st_index_t i = 0, j = 0;
-	while ((st_data_t)table->bins[i*2] != never) {
+	while (PKEY(table, i) != never) {
 	    if (i++ == table->num_entries) return;
 	}
 	for (j = i; ++i < table->num_entries;) {
-	    if ((st_data_t)table->bins[i*2] == never) continue;
-	    table->bins[j*2] = table->bins[i*2];
-	    table->bins[j*2+1] = table->bins[i*2+1];
+	    if (PKEY(table, i) == never) continue;
+	    PACKED_ENT(table, j) = PACKED_ENT(table, i);
 	    j++;
 	}
 	table->num_entries = j;
@@ -722,7 +956,7 @@ st_cleanup_safe(st_table *table, st_data_t never)
 	    if (ptr->key == never) {
 		tmp = ptr;
 		*last = ptr = ptr->next;
-		free(tmp);
+		st_free_entry(tmp);
 	    }
 	    else {
 		ptr = *(last = &ptr->next);
@@ -732,21 +966,70 @@ st_cleanup_safe(st_table *table, st_data_t never)
 }
 
 int
+st_foreach_nocheck(st_table *table, int (*func)(ANYARGS), st_data_t arg)
+{
+    enum st_retval retval;
+    if (table->num_entries == 0) return 0;
+    if (ULTRA_PACKED(table)) {
+        (*func)(UPKEY(table), UPVAL(table), arg);
+    }
+    else if (table->entries_packed) {
+        register st_index_t i;
+        for(i = 0; i < table->num_entries; i++) {
+            retval = (*func)(PKEY(table, i), PVAL(table, i), arg);
+            if (retval == ST_STOP) break;
+        }
+    }
+    else {
+        st_table_entry *ptr;
+        for(ptr = table->head; ptr; ptr = ptr->fore) {
+            retval = (*func)(ptr->key, ptr->record, arg);
+            if (retval == ST_STOP) break;
+        }
+    }
+    return 0;
+}
+
+int
 st_foreach(st_table *table, int (*func)(ANYARGS), st_data_t arg)
 {
     st_table_entry *ptr, **last, *tmp;
     enum st_retval retval;
-    st_index_t i;
+    st_index_t i = 0;
 
     if (table->entries_packed) {
+        st_packed_entry packed;
+        if (ULTRA_PACKED(table) && table->num_entries) {
+            packed = table->as.upacked;
+            retval = (*func)(packed.key, packed.val, arg);
+            if (!ULTRA_PACKED(table)) goto packed;
+            switch(retval) {
+              case ST_CHECK:
+                if (UPKEY(table) == Qundef && UPHASH(table) == 0)
+                    break;
+                if (table->num_entries &&
+                        UPHASH(table) == packed.hash &&
+                        EQUAL(table, packed.key, UPKEY(table)))
+                    break;
+                retval = (*func)(0, 0, arg, 1);
+                return 1;
+              case ST_CONTINUE:
+                break;
+              case ST_STOP:
+                return 0;
+              case ST_DELETE:
+                table->num_entries = 0;
+            }
+            return 0;
+        }
+
         for (i = 0; i < table->num_entries; i++) {
-            st_index_t j;
-            st_data_t key, val;
-            key = (st_data_t)table->bins[i*2];
-            val = (st_data_t)table->bins[i*2+1];
-            retval = (*func)(key, val, arg);
+            packed = PACKED_ENT(table, i);
+            retval = (*func)(packed.key, packed.val, arg);
+          packed:
 	    if (!table->entries_packed) {
-		FIND_ENTRY(table, ptr, key, i);
+		st_index_t key = packed.key;
+		FIND_ENTRY(table, ptr, packed.hash, i);
 		if (retval == ST_CHECK) {
 		    if (!ptr) goto deleted;
 		    goto unpacked_continue;
@@ -755,11 +1038,15 @@ st_foreach(st_table *table, int (*func)(ANYARGS), st_data_t arg)
 	    }
             switch (retval) {
 	      case ST_CHECK:	/* check if hash is modified during iteration */
-                for (j = 0; j < table->num_entries; j++) {
-                    if ((st_data_t)table->bins[j*2] == key)
-                        break;
-                }
-                if (j == table->num_entries) {
+                /* work around uncomforming befaviour of hash */
+                if (PKEY(table, i) == Qundef && PHASH(table, i) == 0)
+                    break;
+                else if (i < table->num_entries &&
+                        PHASH(table, i) == packed.hash &&
+                        EQUAL(table, packed.key, PKEY(table, i)))
+                    break;
+                i = find_packed_index(table, packed.hash, packed.key);
+                if (i == table->num_entries) {
 		    goto deleted;
                 }
 		/* fall through */
@@ -768,9 +1055,7 @@ st_foreach(st_table *table, int (*func)(ANYARGS), st_data_t arg)
 	      case ST_STOP:
 		return 0;
 	      case ST_DELETE:
-                table->num_entries--;
-                memmove(&table->bins[i*2], &table->bins[(i+1)*2],
-                        sizeof(struct st_table_entry*) * 2*(table->num_entries-i));
+		remove_packed_entry(table, i);
                 i--;
                 break;
             }
@@ -809,8 +1094,8 @@ st_foreach(st_table *table, int (*func)(ANYARGS), st_data_t arg)
 		    if (ptr == tmp) {
 			tmp = ptr->fore;
 			*last = ptr->next;
-			REMOVE_ENTRY(table, ptr);
-			free(ptr);
+			remove_entry(table, ptr);
+			st_free_entry(ptr);
 			if (ptr == tmp) return 0;
 			ptr = tmp;
 			break;
@@ -834,13 +1119,13 @@ st_reverse_foreach(st_table *table, int (*func)(ANYARGS), st_data_t arg)
         for (i = table->num_entries-1; 0 <= i; i--) {
             int j;
             st_data_t key, val;
-            key = (st_data_t)table->bins[i*2];
-            val = (st_data_t)table->bins[i*2+1];
+            key = PKEY(table, i);
+            val = PVAL(table, i);
             retval = (*func)(key, val, arg);
             switch (retval) {
 	      case ST_CHECK:	/* check if hash is modified during iteration */
                 for (j = 0; j < table->num_entries; j++) {
-                    if ((st_data_t)table->bins[j*2] == key)
+                    if (PKEY(table, j) == key)
                         break;
                 }
                 if (j == table->num_entries) {
@@ -854,9 +1139,7 @@ st_reverse_foreach(st_table *table, int (*func)(ANYARGS), st_data_t arg)
 	      case ST_STOP:
 		return 0;
 	      case ST_DELETE:
-                table->num_entries--;
-                memmove(&table->bins[i*2], &table->bins[(i+1)*2],
-                        sizeof(struct st_table_entry*) * 2*(table->num_entries-i));
+		remove_packed_entry(table, i);
                 break;
             }
         }
@@ -889,8 +1172,8 @@ st_reverse_foreach(st_table *table, int (*func)(ANYARGS), st_data_t arg)
 		    if (ptr == tmp) {
 			tmp = ptr->back;
 			*last = ptr->next;
-			REMOVE_ENTRY(table, ptr);
-			free(ptr);
+			remove_entry(table, ptr);
+			st_free_entry(ptr);
 			ptr = tmp;
 			break;
 		    }
diff --git a/thread.c b/thread.c
index 342d4fe..2387edf 100644
--- a/thread.c
+++ b/thread.c
@@ -2197,7 +2197,7 @@ rb_thread_keys(VALUE self)
     GetThreadPtr(self, th);
 
     if (th->local_storage) {
-	st_foreach(th->local_storage, thread_keys_i, ary);
+	st_foreach_nocheck(th->local_storage, thread_keys_i, ary);
     }
     return ary;
 }
@@ -3068,7 +3068,7 @@ clear_coverage(void)
 {
     VALUE coverages = rb_get_coverages();
     if (RTEST(coverages)) {
-	st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
+	st_foreach_nocheck(RHASH_TBL(coverages), clear_coverage_i, 0);
     }
 }
 
@@ -3213,7 +3213,7 @@ thgroup_list(VALUE group)
 
     param.ary = ary;
     param.group = group;
-    st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
+    st_foreach_nocheck(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
     return ary;
 }
 
@@ -4119,7 +4119,7 @@ set_threads_event_flags_i(st_data_t key, st_data_t val, st_data_t flag)
 static void
 set_threads_event_flags(int flag)
 {
-    st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
+    st_foreach_nocheck(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
 }
 
 static inline int
@@ -4299,7 +4299,7 @@ static rb_thread_t *
 vm_event_hooks_running_thread(rb_vm_t *vm)
 {
     rb_thread_t *found = NULL;
-    st_foreach(vm->living_threads, running_vm_event_hooks, (st_data_t)&found);
+    st_foreach_nocheck(vm->living_threads, running_vm_event_hooks, (st_data_t)&found);
     return found;
 }
 
diff --git a/transcode.c b/transcode.c
index 4c3a273..db31c19 100644
--- a/transcode.c
+++ b/transcode.c
@@ -319,7 +319,7 @@ transcode_search_path(const char *sname, const char *dname,
         }
 
         bfs.base_enc = q->enc;
-        st_foreach(table2, transcode_search_path_i, (st_data_t)&bfs);
+        st_foreach_nocheck(table2, transcode_search_path_i, (st_data_t)&bfs);
         bfs.base_enc = NULL;
 
         xfree(q);
diff --git a/variable.c b/variable.c
index caadf71..a2751d8 100644
--- a/variable.c
+++ b/variable.c
@@ -98,7 +98,7 @@ fc_i(ID key, rb_const_entry_t *ce, struct fc_result *res)
 	    arg.klass = res->klass;
 	    arg.track = value;
 	    arg.prev = res;
-	    st_foreach(RCLASS_CONST_TBL(value), fc_i, (st_data_t)&arg);
+	    st_foreach_nocheck(RCLASS_CONST_TBL(value), fc_i, (st_data_t)&arg);
 	    if (arg.path) {
 		res->path = arg.path;
 		return ST_STOP;
@@ -123,10 +123,10 @@ find_class_path(VALUE klass)
     arg.track = rb_cObject;
     arg.prev = 0;
     if (RCLASS_CONST_TBL(rb_cObject)) {
-	st_foreach_safe(RCLASS_CONST_TBL(rb_cObject), fc_i, (st_data_t)&arg);
+	st_foreach_nocheck(RCLASS_CONST_TBL(rb_cObject), fc_i, (st_data_t)&arg);
     }
     if (arg.path == 0) {
-	st_foreach_safe(rb_class_tbl, fc_i, (st_data_t)&arg);
+	st_foreach_nocheck(rb_class_tbl, fc_i, (st_data_t)&arg);
     }
     if (arg.path) {
 	st_data_t tmp = tmp_classpath;
@@ -473,7 +473,7 @@ void
 rb_gc_mark_global_tbl(void)
 {
     if (rb_global_tbl)
-        st_foreach_safe(rb_global_tbl, mark_global_entry, 0);
+        st_foreach_nocheck(rb_global_tbl, mark_global_entry, 0);
 }
 
 static ID
@@ -765,7 +765,7 @@ rb_f_global_variables(void)
     char buf[2];
     int i;
 
-    st_foreach_safe(rb_global_tbl, gvar_i, ary);
+    st_foreach_nocheck(rb_global_tbl, gvar_i, ary);
     buf[0] = '$';
     for (i = 1; i <= 9; ++i) {
 	buf[1] = (char)(i + '0');
@@ -923,7 +923,7 @@ static int
 givar_i(VALUE obj, st_table *tbl)
 {
     if (rb_special_const_p(obj)) {
-	st_foreach_safe(tbl, givar_mark_i, 0);
+	st_foreach_nocheck(tbl, givar_mark_i, 0);
     }
     return ST_CONTINUE;
 }
@@ -933,7 +933,7 @@ rb_mark_generic_ivar_tbl(void)
 {
     if (!generic_iv_tbl) return;
     if (special_generic_ivar == 0) return;
-    st_foreach_safe(generic_iv_tbl, givar_i, 0);
+    st_foreach_nocheck(generic_iv_tbl, givar_i, 0);
 }
 
 void
@@ -1731,7 +1731,7 @@ rb_mod_const_at(VALUE mod, void *data)
 	tbl = st_init_numtable();
     }
     if (RCLASS_CONST_TBL(mod)) {
-	st_foreach_safe(RCLASS_CONST_TBL(mod), sv_i, (st_data_t)tbl);
+	st_foreach_nocheck(RCLASS_CONST_TBL(mod), sv_i, (st_data_t)tbl);
     }
     return tbl;
 }
@@ -1766,7 +1766,7 @@ rb_const_list(void *data)
 
     if (!tbl) return rb_ary_new2(0);
     ary = rb_ary_new2(tbl->num_entries);
-    st_foreach_safe(tbl, list_i, ary);
+    st_foreach_nocheck(tbl, list_i, ary);
     st_free_table(tbl);
 
     return ary;
diff --git a/vm.c b/vm.c
index e997afa..634dee8 100644
--- a/vm.c
+++ b/vm.c
@@ -1570,11 +1570,12 @@ rb_vm_mark(void *ptr)
     if (ptr) {
 	rb_vm_t *vm = ptr;
 	if (vm->living_threads) {
-	    st_foreach(vm->living_threads, vm_mark_each_thread_func, 0);
+	    st_foreach_nocheck(vm->living_threads, vm_mark_each_thread_func, 0);
 	}
 	RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
 	RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
 	RUBY_MARK_UNLESS_NULL(vm->load_path);
+	RUBY_MARK_UNLESS_NULL(vm->load_path_expanded_cache);
 	RUBY_MARK_UNLESS_NULL(vm->loaded_features);
 	RUBY_MARK_UNLESS_NULL(vm->top_self);
 	RUBY_MARK_UNLESS_NULL(vm->coverages);
@@ -2202,8 +2203,8 @@ void
 Init_BareVM(void)
 {
     /* VM bootstrap: phase 1 */
-    rb_vm_t * vm = malloc(sizeof(*vm));
-    rb_thread_t * th = malloc(sizeof(*th));
+    rb_vm_t * vm = ruby_mimmalloc(sizeof(*vm));
+    rb_thread_t * th = ruby_mimmalloc(sizeof(*th));
     if (!vm || !th) {
 	fprintf(stderr, "[FATAL] failed to allocate memory\n");
 	exit(EXIT_FAILURE);
diff --git a/vm_core.h b/vm_core.h
index 7211005..e787d4b 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -298,6 +298,7 @@ typedef struct rb_vm_struct {
     /* load */
     VALUE top_self;
     VALUE load_path;
+    VALUE load_path_expanded_cache;
     VALUE loaded_features;
     struct st_table *loading_table;
 
© 2025 GrazzMean