summaryrefslogtreecommitdiff
path: root/internal
diff options
context:
space:
mode:
authorMatt Valentine-House <matt@eightbitraptor.com>2023-02-08 11:56:53 +0000
committerPeter Zhu <peter@peterzhu.ca>2023-02-09 10:32:29 -0500
commit72aba64fff09a829bfaf41165d0075066f087185 (patch)
treed989e27e007a38baf233dd2d0e036e0bc0b30226 /internal
parente2b6289bab16ff2e05e8ac7a8bc3a35bcc2c44ed (diff)
downloadruby-72aba64fff09a829bfaf41165d0075066f087185.tar.gz
Merge gc.h and internal/gc.h
[Feature #19425]
Diffstat (limited to 'internal')
-rw-r--r--internal/gc.h139
1 files changed, 139 insertions, 0 deletions
diff --git a/internal/gc.h b/internal/gc.h
index 8fb219a0c4..dc55de1c03 100644
--- a/internal/gc.h
+++ b/internal/gc.h
@@ -15,6 +15,124 @@
#include "internal/compilers.h" /* for __has_attribute */
#include "ruby/ruby.h" /* for rb_event_flag_t */
+#if defined(__x86_64__) && !defined(_ILP32) && defined(__GNUC__)
+#define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movq\t%%rsp, %0" : "=r" (*(p)))
+#elif defined(__i386) && defined(__GNUC__)
+#define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movl\t%%esp, %0" : "=r" (*(p)))
+#elif (defined(__powerpc__) || defined(__powerpc64__)) && defined(__GNUC__) && !defined(_AIX) && !defined(__APPLE__) // Not Apple is NEEDED to unbreak ppc64 build on Darwin. Don't ask.
+#define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mr\t%0, %%r1" : "=r" (*(p)))
+#elif (defined(__powerpc__) || defined(__powerpc64__)) && defined(__GNUC__) && defined(_AIX)
+#define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mr %0,1" : "=r" (*(p)))
+#elif defined(__POWERPC__) && defined(__APPLE__) // Darwin ppc and ppc64
+#define SET_MACHINE_STACK_END(p) __asm__ volatile("mr %0, r1" : "=r" (*(p)))
+#elif defined(__aarch64__) && defined(__GNUC__)
+#define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mov\t%0, sp" : "=r" (*(p)))
+#else
+NOINLINE(void rb_gc_set_stack_end(VALUE **stack_end_p));
+#define SET_MACHINE_STACK_END(p) rb_gc_set_stack_end(p)
+#define USE_CONSERVATIVE_STACK_END
+#endif
+
+#define RB_GC_SAVE_MACHINE_CONTEXT(th) \
+ do { \
+ FLUSH_REGISTER_WINDOWS; \
+ setjmp((th)->ec->machine.regs); \
+ SET_MACHINE_STACK_END(&(th)->ec->machine.stack_end); \
+ } while (0)
+
+/* for GC debug */
+
+#ifndef RUBY_MARK_FREE_DEBUG
+#define RUBY_MARK_FREE_DEBUG 0
+#endif
+
+#if RUBY_MARK_FREE_DEBUG
+extern int ruby_gc_debug_indent;
+
+static inline void
+rb_gc_debug_indent(void)
+{
+ ruby_debug_printf("%*s", ruby_gc_debug_indent, "");
+}
+
+static inline void
+rb_gc_debug_body(const char *mode, const char *msg, int st, void *ptr)
+{
+ if (st == 0) {
+ ruby_gc_debug_indent--;
+ }
+ rb_gc_debug_indent();
+ ruby_debug_printf("%s: %s %s (%p)\n", mode, st ? "->" : "<-", msg, ptr);
+
+ if (st) {
+ ruby_gc_debug_indent++;
+ }
+
+ fflush(stdout);
+}
+
+#define RUBY_MARK_ENTER(msg) rb_gc_debug_body("mark", (msg), 1, ptr)
+#define RUBY_MARK_LEAVE(msg) rb_gc_debug_body("mark", (msg), 0, ptr)
+#define RUBY_FREE_ENTER(msg) rb_gc_debug_body("free", (msg), 1, ptr)
+#define RUBY_FREE_LEAVE(msg) rb_gc_debug_body("free", (msg), 0, ptr)
+#define RUBY_GC_INFO rb_gc_debug_indent(), ruby_debug_printf
+
+#else
+#define RUBY_MARK_ENTER(msg)
+#define RUBY_MARK_LEAVE(msg)
+#define RUBY_FREE_ENTER(msg)
+#define RUBY_FREE_LEAVE(msg)
+#define RUBY_GC_INFO if(0)printf
+#endif
+
+#define RUBY_MARK_MOVABLE_UNLESS_NULL(ptr) do { \
+ VALUE markobj = (ptr); \
+ if (RTEST(markobj)) {rb_gc_mark_movable(markobj);} \
+} while (0)
+#define RUBY_MARK_UNLESS_NULL(ptr) do { \
+ VALUE markobj = (ptr); \
+ if (RTEST(markobj)) {rb_gc_mark(markobj);} \
+} while (0)
+#define RUBY_FREE_UNLESS_NULL(ptr) if(ptr){ruby_xfree(ptr);(ptr)=NULL;}
+
+#if STACK_GROW_DIRECTION > 0
+# define STACK_UPPER(x, a, b) (a)
+#elif STACK_GROW_DIRECTION < 0
+# define STACK_UPPER(x, a, b) (b)
+#else
+RUBY_EXTERN int ruby_stack_grow_direction;
+int ruby_get_stack_grow_direction(volatile VALUE *addr);
+# define stack_growup_p(x) ( \
+ (ruby_stack_grow_direction ? \
+ ruby_stack_grow_direction : \
+ ruby_get_stack_grow_direction(x)) > 0)
+# define STACK_UPPER(x, a, b) (stack_growup_p(x) ? (a) : (b))
+#endif
+
+/*
+ STACK_GROW_DIR_DETECTION is used with STACK_DIR_UPPER.
+
+ On most normal systems, stacks grow from high address to lower address. In
+ this case, STACK_DIR_UPPER(a, b) will return (b), but on exotic systems where
+ the stack grows UP (from low address to high address), it will return (a).
+*/
+
+#if STACK_GROW_DIRECTION
+#define STACK_GROW_DIR_DETECTION
+#define STACK_DIR_UPPER(a,b) STACK_UPPER(0, (a), (b))
+#else
+#define STACK_GROW_DIR_DETECTION VALUE stack_grow_dir_detection
+#define STACK_DIR_UPPER(a,b) STACK_UPPER(&stack_grow_dir_detection, (a), (b))
+#endif
+#define IS_STACK_DIR_UPPER() STACK_DIR_UPPER(1,0)
+
+const char *rb_obj_info(VALUE obj);
+const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
+
+
+struct rb_thread_struct;
+size_t rb_size_pool_slot_size(unsigned char pool_id);
+
struct rb_execution_context_struct; /* in vm_core.h */
struct rb_objspace; /* in vm_core.h */
@@ -137,6 +255,27 @@ void rb_gc_mark_and_move(VALUE *ptr);
} while (0)
RUBY_SYMBOL_EXPORT_BEGIN
+/* exports for objspace module */
+size_t rb_objspace_data_type_memsize(VALUE obj);
+void rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data);
+void rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *data);
+int rb_objspace_markable_object_p(VALUE obj);
+int rb_objspace_internal_object_p(VALUE obj);
+int rb_objspace_marked_object_p(VALUE obj);
+
+void rb_objspace_each_objects(
+ int (*callback)(void *start, void *end, size_t stride, void *data),
+ void *data);
+
+void rb_objspace_each_objects_without_setup(
+ int (*callback)(void *, void *, size_t, void *),
+ void *data);
+
+size_t rb_gc_obj_slot_size(VALUE obj);
+
+VALUE rb_gc_disable_no_rest(void);
+
+
/* gc.c (export) */
const char *rb_objspace_data_type_name(VALUE obj);
VALUE rb_wb_protected_newobj_of(VALUE, VALUE, size_t);