21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
48#include "ruby/internal/config.h"
54#include "ruby_assert.h"
57#define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
58#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
59#define RUBY_ASSERT_CRITICAL_SECTION
60#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
62#define VM_ASSERT(expr) ((void)0)
63#define VM_UNREACHABLE(func) UNREACHABLE
64#define RUBY_DEBUG_THREAD_SCHEDULE()
67#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
69#if defined(RUBY_ASSERT_CRITICAL_SECTION)
71extern int ruby_assert_critical_section_entered;
72#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
73#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
75#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
76#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
79#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
80# include "wasm/setjmp.h"
85#if defined(__linux__) || defined(__FreeBSD__)
86# define RB_THREAD_T_HAS_NATIVE_ID
90#include "ccan/list/list.h"
93#include "internal/array.h"
94#include "internal/basic_operators.h"
95#include "internal/serial.h"
96#include "internal/vm.h"
101#include "ruby_atomic.h"
113#ifndef VM_INSN_INFO_TABLE_IMPL
114# define VM_INSN_INFO_TABLE_IMPL 2
119# define NSIG NSIG_MAX
120#elif defined(_SIG_MAXSIG)
122# define NSIG _SIG_MAXSIG
123#elif defined(_SIGMAX)
124# define NSIG (_SIGMAX + 1)
128# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
131#define RUBY_NSIG NSIG
134# define RUBY_SIGCHLD (SIGCLD)
135#elif defined(SIGCHLD)
136# define RUBY_SIGCHLD (SIGCHLD)
138# define RUBY_SIGCHLD (0)
142#if defined(__APPLE__)
143# define SIGCHLD_LOSSY (1)
145# define SIGCHLD_LOSSY (0)
149#define WAITPID_USE_SIGCHLD (RUBY_SIGCHLD || SIGCHLD_LOSSY)
151#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
152# define USE_SIGALTSTACK
153void *rb_allocate_sigaltstack(
void);
154void *rb_register_sigaltstack(
void *);
155# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
156# define RB_ALTSTACK_FREE(var) free(var)
157# define RB_ALTSTACK(var) var
159# define RB_ALTSTACK_INIT(var, altstack)
160# define RB_ALTSTACK_FREE(var)
161# define RB_ALTSTACK(var) (0)
164#include THREAD_IMPL_H
165#define RUBY_VM_THREAD_MODEL 2
172#if defined(__GNUC__) && __GNUC__ >= 2
174#if OPT_TOKEN_THREADED_CODE
175#if OPT_DIRECT_THREADED_CODE
176#undef OPT_DIRECT_THREADED_CODE
183#if OPT_DIRECT_THREADED_CODE
184#undef OPT_DIRECT_THREADED_CODE
186#if OPT_TOKEN_THREADED_CODE
187#undef OPT_TOKEN_THREADED_CODE
192#if OPT_CALL_THREADED_CODE
193#if OPT_DIRECT_THREADED_CODE
194#undef OPT_DIRECT_THREADED_CODE
197#undef OPT_STACK_CACHING
201void rb_vm_encoded_insn_data_table_init(
void);
202typedef unsigned long rb_num_t;
203typedef signed long rb_snum_t;
207 RUBY_TAG_RETURN = 0x1,
208 RUBY_TAG_BREAK = 0x2,
210 RUBY_TAG_RETRY = 0x4,
212 RUBY_TAG_RAISE = 0x6,
213 RUBY_TAG_THROW = 0x7,
214 RUBY_TAG_FATAL = 0x8,
218#define TAG_NONE RUBY_TAG_NONE
219#define TAG_RETURN RUBY_TAG_RETURN
220#define TAG_BREAK RUBY_TAG_BREAK
221#define TAG_NEXT RUBY_TAG_NEXT
222#define TAG_RETRY RUBY_TAG_RETRY
223#define TAG_REDO RUBY_TAG_REDO
224#define TAG_RAISE RUBY_TAG_RAISE
225#define TAG_THROW RUBY_TAG_THROW
226#define TAG_FATAL RUBY_TAG_FATAL
227#define TAG_MASK RUBY_TAG_MASK
229enum ruby_vm_throw_flags {
230 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
231 VM_THROW_STATE_MASK = 0xff
255STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
257 sizeof(
const rb_cref_t *)) <= RVALUE_SIZE);
306#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
308#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
310#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
321#define PATHOBJ_PATH 0
322#define PATHOBJ_REALPATH 1
325pathobj_path(
VALUE pathobj)
337pathobj_realpath(
VALUE pathobj)
351typedef uintptr_t iseq_bits_t;
353#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
356#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
372 enum rb_iseq_type type;
374 unsigned int iseq_size;
402 unsigned int has_lead : 1;
403 unsigned int has_opt : 1;
404 unsigned int has_rest : 1;
405 unsigned int has_post : 1;
406 unsigned int has_kw : 1;
407 unsigned int has_kwrest : 1;
408 unsigned int has_block : 1;
410 unsigned int ambiguous_param0 : 1;
411 unsigned int accepts_no_kwarg : 1;
412 unsigned int ruby2_keywords: 1;
424 const VALUE *opt_table;
439 const struct rb_iseq_param_keyword {
445 VALUE *default_values;
454 unsigned int *positions;
456#if VM_INSN_INFO_TABLE_IMPL == 2
457 struct succ_index_table *succ_index_table;
461 const ID *local_table;
474 rb_snum_t flip_count;
477 VALUE pc2branchindex;
478 VALUE *original_iseq;
481 unsigned int local_table_size;
482 unsigned int ic_size;
483 unsigned int ise_size;
484 unsigned int ivc_size;
485 unsigned int icvarc_size;
486 unsigned int ci_size;
487 unsigned int stack_max;
495 bool builtin_inline_p;
506#if USE_MJIT || USE_YJIT
510 long unsigned total_calls;
547#define ISEQ_BODY(iseq) ((iseq)->body)
550#define USE_LAZY_LOAD 0
561 if (ISEQ_BODY(iseq) == NULL) {
573 if (def->type != VM_METHOD_TYPE_ISEQ)
rb_bug(
"def_iseq_ptr: not iseq (%d)", def->type);
575 return rb_iseq_check(def->body.iseq.
iseqptr);
578enum ruby_special_exceptions {
582 ruby_error_stackfatal,
583 ruby_error_stream_closed,
584 ruby_special_error_count
587#define GetVMPtr(obj, ptr) \
588 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
594 rb_vm_at_exit_func *func;
601void rb_objspace_call_finalizer(
struct rb_objspace *);
606 unsigned int running;
619 struct ccan_list_head set;
621 unsigned int blocking_cnt;
628 rb_nativethread_lock_t lock;
630 unsigned int lock_rec;
633 bool barrier_waiting;
634 unsigned int barrier_cnt;
635 rb_nativethread_cond_t barrier_cond;
638 rb_nativethread_cond_t terminate_cond;
639 bool terminate_waiting;
643#ifdef USE_SIGALTSTACK
647 rb_serial_t fork_gen;
648 rb_nativethread_lock_t waitpid_lock;
649 struct ccan_list_head waiting_pids;
650 struct ccan_list_head waiting_grps;
651 struct ccan_list_head waiting_fds;
654 volatile int ubf_async_safe;
656 unsigned int running: 1;
657 unsigned int thread_abort_on_exception: 1;
658 unsigned int thread_report_on_exception: 1;
659 unsigned int thread_ignore_deadlock: 1;
662 VALUE mark_object_ary;
663 const VALUE special_exceptions[ruby_special_error_count];
668 shape_id_t next_shape_id;
673 VALUE load_path_snapshot;
674 VALUE load_path_check_cache;
675 VALUE expanded_load_path;
676 VALUE loaded_features;
677 VALUE loaded_features_snapshot;
678 VALUE loaded_features_realpaths;
679 VALUE loaded_features_realpath_map;
680 struct st_table *loaded_features_index;
688 VALUE cmd[RUBY_NSIG];
692 struct st_table *ensure_rollback_table;
698 int src_encoding_index;
701 struct ccan_list_head workqueue;
702 rb_nativethread_lock_t workqueue_lock;
704 VALUE orig_progname, progname;
705 VALUE coverages, me2counter;
717 int builtin_inline_index;
728#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
729#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
731 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE];
733#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
739 size_t thread_vm_stack_size;
740 size_t thread_machine_stack_size;
741 size_t fiber_vm_stack_size;
742 size_t fiber_machine_stack_size;
749#define RUBY_VM_SIZE_ALIGN 4096
751#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE))
752#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE))
753#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE))
754#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE))
756#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE))
757#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE))
758#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE))
759#if defined(__powerpc64__) || defined(__ppc64__)
760#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE))
762#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE))
765#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
767#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
768#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
769#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
770#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
771#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
772#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
773#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
774#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
777#ifndef VM_DEBUG_BP_CHECK
778#define VM_DEBUG_BP_CHECK 0
781#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
782#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
795enum rb_block_handler_type {
796 block_handler_type_iseq,
797 block_handler_type_ifunc,
798 block_handler_type_symbol,
799 block_handler_type_proc
815 enum rb_block_type
type;
824 const void *block_code;
837rb_thread_ptr(
VALUE thval)
842enum rb_thread_status {
845 THREAD_STOPPED_FOREVER,
850typedef RUBY_JMP_BUF rb_jmpbuf_t;
852typedef void *rb_jmpbuf_t[5];
864 enum ruby_tag_type state;
865 unsigned int lock_rec;
868STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(
struct rb_vm_tag, buf) > 0);
869STATIC_ASSERT(rb_vm_tag_buf_end,
870 offsetof(
struct rb_vm_tag, buf) +
sizeof(rb_jmpbuf_t) <
902 size_t vm_stack_size;
910#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
911 uint32_t checked_clock;
919 VALUE local_storage_recursive_hash;
920 VALUE local_storage_recursive_hash_for_trace;
926 const VALUE *root_lep;
937 VALUE passed_block_handler;
942 BITFIELD(
enum method_missing_reason, method_missing_reason, 8);
944 VALUE private_const_reference;
950 size_t stack_maxsize;
955#ifndef rb_execution_context_t
957#define rb_execution_context_t rb_execution_context_t
961#define VM_CORE_H_EC_DEFINED 1
985 struct ccan_list_node lt_node;
1006 BITFIELD(
enum rb_thread_status, status, 2);
1008 unsigned int locking_native_thread : 1;
1009 unsigned int to_kill : 1;
1010 unsigned int abort_on_exception: 1;
1011 unsigned int report_on_exception: 1;
1012 unsigned int pending_interrupt_queue_checked: 1;
1014 uint32_t running_time_us;
1016 void *blocking_region_buffer;
1022#if OPT_CALL_THREADED_CODE
1027 VALUE pending_interrupt_queue;
1028 VALUE pending_interrupt_mask_stack;
1031 rb_nativethread_lock_t interrupt_lock;
1033 VALUE locking_mutex;
1045 VALUE (*func)(
void *);
1050 enum thread_invoke_type {
1051 thread_invoke_type_none = 0,
1052 thread_invoke_type_proc,
1053 thread_invoke_type_ractor_proc,
1054 thread_invoke_type_func
1058 VALUE stat_insn_usage;
1064 unsigned int blocking;
1072static inline unsigned int
1075 return (
unsigned int)th->serial;
1079 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1080 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1081 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1083 VM_DEFINECLASS_TYPE_MASK = 0x07
1084} rb_vm_defineclass_type_t;
1086#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1087#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1088#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1089#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1090#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1091 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1094RUBY_SYMBOL_EXPORT_BEGIN
1112rb_iseq_new_with_callback_new_callback(
1124attr_index_t rb_estimate_iv_count(
VALUE klass,
const rb_iseq_t * initialize_iseq);
1132RUBY_SYMBOL_EXPORT_END
1134#define GetProcPtr(obj, ptr) \
1135 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1139 unsigned int is_from_method: 1;
1140 unsigned int is_lambda: 1;
1141 unsigned int is_isolated: 1;
1144RUBY_SYMBOL_EXPORT_BEGIN
1147VALUE rb_proc_ractor_make_shareable(
VALUE self);
1148RUBY_SYMBOL_EXPORT_END
1155 unsigned int env_size;
1160#define GetBindingPtr(obj, ptr) \
1161 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1165 const VALUE pathobj;
1171enum vm_check_match_type {
1172 VM_CHECKMATCH_TYPE_WHEN = 1,
1173 VM_CHECKMATCH_TYPE_CASE = 2,
1174 VM_CHECKMATCH_TYPE_RESCUE = 3
1177#define VM_CHECKMATCH_TYPE_MASK 0x03
1178#define VM_CHECKMATCH_ARRAY 0x04
1180enum vm_special_object_type {
1181 VM_SPECIAL_OBJECT_VMCORE = 1,
1182 VM_SPECIAL_OBJECT_CBASE,
1183 VM_SPECIAL_OBJECT_CONST_BASE
1187 VM_SVAR_LASTLINE = 0,
1188 VM_SVAR_BACKREF = 1,
1190 VM_SVAR_EXTRA_START = 2,
1191 VM_SVAR_FLIPFLOP_START = 2
1203typedef VALUE CDHASH;
1205#ifndef FUNC_FASTCALL
1206#define FUNC_FASTCALL(x) x
1212#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1213#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1215#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1216#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1217#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1219enum vm_frame_env_flags {
1230 VM_FRAME_MAGIC_METHOD = 0x11110001,
1231 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1232 VM_FRAME_MAGIC_CLASS = 0x33330001,
1233 VM_FRAME_MAGIC_TOP = 0x44440001,
1234 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1235 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1236 VM_FRAME_MAGIC_EVAL = 0x77770001,
1237 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1238 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1240 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1243 VM_FRAME_FLAG_FINISH = 0x0020,
1244 VM_FRAME_FLAG_BMETHOD = 0x0040,
1245 VM_FRAME_FLAG_CFRAME = 0x0080,
1246 VM_FRAME_FLAG_LAMBDA = 0x0100,
1247 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1248 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1249 VM_FRAME_FLAG_PASSED = 0x0800,
1252 VM_ENV_FLAG_LOCAL = 0x0002,
1253 VM_ENV_FLAG_ESCAPED = 0x0004,
1254 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1255 VM_ENV_FLAG_ISOLATED = 0x0010,
1258#define VM_ENV_DATA_SIZE ( 3)
1260#define VM_ENV_DATA_INDEX_ME_CREF (-2)
1261#define VM_ENV_DATA_INDEX_SPECVAL (-1)
1262#define VM_ENV_DATA_INDEX_FLAGS ( 0)
1263#define VM_ENV_DATA_INDEX_ENV ( 1)
1265#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1267static inline void VM_FORCE_WRITE_SPECIAL_CONST(
const VALUE *ptr,
VALUE special_const_value);
1272 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1274 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1278VM_ENV_FLAGS_UNSET(
const VALUE *ep,
VALUE flag)
1280 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1282 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1285static inline unsigned long
1286VM_ENV_FLAGS(
const VALUE *ep,
long flag)
1288 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1290 return flags & flag;
1293static inline unsigned long
1296 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1302 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1308 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1314 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1320 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1324rb_obj_is_iseq(
VALUE iseq)
1326 return imemo_type_p(iseq, imemo_iseq);
1329#if VM_CHECK_MODE > 0
1330#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1336 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1337 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1338 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1345 return !VM_FRAME_CFRAME_P(cfp);
1348#define RUBYVM_CFUNC_FRAME_P(cfp) \
1349 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1351#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1352#define VM_BLOCK_HANDLER_NONE 0
1355VM_ENV_LOCAL_P(
const VALUE *ep)
1357 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1360static inline const VALUE *
1361VM_ENV_PREV_EP(
const VALUE *ep)
1363 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1364 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1368VM_ENV_BLOCK_HANDLER(
const VALUE *ep)
1370 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1371 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1374#if VM_CHECK_MODE > 0
1375int rb_vm_ep_in_heap_p(
const VALUE *ep);
1379VM_ENV_ESCAPED_P(
const VALUE *ep)
1381 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1382 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1385#if VM_CHECK_MODE > 0
1387vm_assert_env(
VALUE obj)
1389 VM_ASSERT(imemo_type_p(obj, imemo_env));
1396VM_ENV_ENVVAL(const
VALUE *ep)
1398 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1399 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1400 VM_ASSERT(vm_assert_env(envval));
1406VM_ENV_ENVVAL_PTR(const
VALUE *ep)
1408 return (
const rb_env_t *)VM_ENV_ENVVAL(ep);
1415 env->env_size = env_size;
1416 env_ep[VM_ENV_DATA_INDEX_ENV] = (
VALUE)env;
1423 *((
VALUE *)ptr) = v;
1427VM_FORCE_WRITE_SPECIAL_CONST(
const VALUE *ptr,
VALUE special_const_value)
1430 VM_FORCE_WRITE(ptr, special_const_value);
1434VM_STACK_ENV_WRITE(
const VALUE *ep,
int index,
VALUE v)
1436 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1437 VM_FORCE_WRITE(&ep[index], v);
1440const VALUE *rb_vm_ep_local_ep(
const VALUE *ep);
1447#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1448#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1450#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1451 ((void *)(ecfp) > (void *)(cfp))
1462 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1466VM_BH_ISEQ_BLOCK_P(
VALUE block_handler)
1468 if ((block_handler & 0x03) == 0x01) {
1469#if VM_CHECK_MODE > 0
1471 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1483 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1484 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1485 return block_handler;
1489VM_BH_TO_ISEQ_BLOCK(
VALUE block_handler)
1492 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1497VM_BH_IFUNC_P(
VALUE block_handler)
1499 if ((block_handler & 0x03) == 0x03) {
1500#if VM_CHECK_MODE > 0
1502 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1514 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1515 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1516 return block_handler;
1520VM_BH_TO_IFUNC_BLOCK(
VALUE block_handler)
1523 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1528VM_BH_TO_CAPT_BLOCK(
VALUE block_handler)
1531 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1535static inline enum rb_block_handler_type
1536vm_block_handler_type(
VALUE block_handler)
1538 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1539 return block_handler_type_iseq;
1541 else if (VM_BH_IFUNC_P(block_handler)) {
1542 return block_handler_type_ifunc;
1544 else if (
SYMBOL_P(block_handler)) {
1545 return block_handler_type_symbol;
1549 return block_handler_type_proc;
1554vm_block_handler_verify(MAYBE_UNUSED(
VALUE block_handler))
1556 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1557 (vm_block_handler_type(block_handler), 1));
1563 return ((
VALUE) cfp->block_code) == block_handler;
1566static inline enum rb_block_type
1567vm_block_type(
const struct rb_block *block)
1569#if VM_CHECK_MODE > 0
1570 switch (block->type) {
1571 case block_type_iseq:
1572 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1574 case block_type_ifunc:
1575 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1577 case block_type_symbol:
1578 VM_ASSERT(
SYMBOL_P(block->as.symbol));
1580 case block_type_proc:
1589vm_block_type_set(
const struct rb_block *block,
enum rb_block_type
type)
1595static inline const struct rb_block *
1596vm_proc_block(
VALUE procval)
1603static inline const VALUE *vm_block_ep(
const struct rb_block *block);
1606vm_proc_iseq(
VALUE procval)
1608 return vm_block_iseq(vm_proc_block(procval));
1611static inline const VALUE *
1612vm_proc_ep(
VALUE procval)
1614 return vm_block_ep(vm_proc_block(procval));
1618vm_block_iseq(
const struct rb_block *block)
1620 switch (vm_block_type(block)) {
1621 case block_type_iseq:
return rb_iseq_check(block->as.captured.code.iseq);
1622 case block_type_proc:
return vm_proc_iseq(block->as.proc);
1623 case block_type_ifunc:
1624 case block_type_symbol:
return NULL;
1626 VM_UNREACHABLE(vm_block_iseq);
1630static inline const VALUE *
1631vm_block_ep(
const struct rb_block *block)
1633 switch (vm_block_type(block)) {
1634 case block_type_iseq:
1635 case block_type_ifunc:
return block->as.captured.ep;
1636 case block_type_proc:
return vm_proc_ep(block->as.proc);
1637 case block_type_symbol:
return NULL;
1639 VM_UNREACHABLE(vm_block_ep);
1644vm_block_self(
const struct rb_block *block)
1646 switch (vm_block_type(block)) {
1647 case block_type_iseq:
1648 case block_type_ifunc:
1649 return block->as.captured.self;
1650 case block_type_proc:
1651 return vm_block_self(vm_proc_block(block->as.proc));
1652 case block_type_symbol:
1655 VM_UNREACHABLE(vm_block_self);
1660VM_BH_TO_SYMBOL(
VALUE block_handler)
1662 VM_ASSERT(
SYMBOL_P(block_handler));
1663 return block_handler;
1667VM_BH_FROM_SYMBOL(
VALUE symbol)
1674VM_BH_TO_PROC(
VALUE block_handler)
1677 return block_handler;
1681VM_BH_FROM_PROC(
VALUE procval)
1697#
if OPT_STACK_CACHING
1702#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp)
1703#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp))
1704void rb_vm_bugreport(
const void *);
1705typedef void (*ruby_sighandler_t)(int);
1707NORETURN(
void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler,
int sig, const
void *, const
char *fmt, ...));
1710RUBY_SYMBOL_EXPORT_BEGIN
1715RUBY_SYMBOL_EXPORT_END
1729 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1735 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1742void rb_vm_inc_const_missing_count(
void);
1748void rb_thread_start_timer_thread(
void);
1749void rb_thread_stop_timer_thread(
void);
1750void rb_thread_reset_timer_thread(
void);
1751void rb_thread_wakeup_timer_thread(
int);
1754rb_vm_living_threads_init(
rb_vm_t *vm)
1756 ccan_list_head_init(&vm->waiting_fds);
1757 ccan_list_head_init(&vm->waiting_pids);
1758 ccan_list_head_init(&vm->workqueue);
1759 ccan_list_head_init(&vm->waiting_grps);
1760 ccan_list_head_init(&vm->ractor.set);
1763typedef int rb_backtrace_iter_func(
void *,
VALUE,
int,
VALUE);
1775void rb_vm_register_special_exception_str(
enum ruby_special_exceptions sp,
VALUE exception_class,
VALUE mesg);
1777#define rb_vm_register_special_exception(sp, e, m) \
1778 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1786#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1788#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1789 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1790 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1791 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1792 if (UNLIKELY((cfp) <= &bound[1])) { \
1793 vm_stackoverflow(); \
1797#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1798 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1806#if RUBY_VM_THREAD_MODEL == 2
1807MJIT_SYMBOL_EXPORT_BEGIN
1815MJIT_SYMBOL_EXPORT_END
1817#define GET_VM() rb_current_vm()
1818#define GET_RACTOR() rb_current_ractor()
1819#define GET_THREAD() rb_current_thread()
1820#define GET_EC() rb_current_execution_context(true)
1825 return ec->thread_ptr;
1833 VM_ASSERT(th->ractor != NULL);
1854rb_current_execution_context(
bool expect_ec)
1856#ifdef RB_THREAD_LOCAL_SPECIFIER
1865 VM_ASSERT(!expect_ec || ec != NULL);
1870rb_current_thread(
void)
1873 return rb_ec_thread_ptr(ec);
1877rb_current_ractor(
void)
1879 if (ruby_single_main_ractor) {
1880 return ruby_single_main_ractor;
1884 return rb_ec_ractor_ptr(ec);
1892 VM_ASSERT(ruby_current_vm_ptr == NULL ||
1893 ruby_current_execution_context_ptr == NULL ||
1894 rb_ec_thread_ptr(GET_EC()) == NULL ||
1895 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
1896 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
1899 return ruby_current_vm_ptr;
1903 unsigned int recorded_lock_rec,
1904 unsigned int current_lock_rec);
1906static inline unsigned int
1909 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1911 if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
1915 return vm->ractor.sync.lock_rec;
1920#error "unsupported thread model"
1924 TIMER_INTERRUPT_MASK = 0x01,
1925 PENDING_INTERRUPT_MASK = 0x02,
1926 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
1927 TRAP_INTERRUPT_MASK = 0x08,
1928 TERMINATE_INTERRUPT_MASK = 0x10,
1929 VM_BARRIER_INTERRUPT_MASK = 0x20,
1932#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
1933#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
1934#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
1935#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
1936#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
1937#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
1938#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
1939 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
1944#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1945 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
1947 if (current_clock != ec->checked_clock) {
1948 ec->checked_clock = current_clock;
1949 RUBY_VM_SET_TIMER_INTERRUPT(ec);
1952 return ec->interrupt_flag & ~(ec)->interrupt_mask;
1956int rb_signal_buff_size(
void);
1959void rb_threadptr_signal_raise(
rb_thread_t *th,
int sig);
1961int rb_threadptr_execute_interrupts(
rb_thread_t *,
int);
1963void rb_threadptr_unlock_all_locking_mutexes(
rb_thread_t *th);
1964void rb_threadptr_pending_interrupt_clear(
rb_thread_t *th);
1975void rb_vm_cond_wait(
rb_vm_t *vm, rb_nativethread_cond_t *cond);
1976void rb_vm_cond_timedwait(
rb_vm_t *vm, rb_nativethread_cond_t *cond,
unsigned long msec);
1978#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
1982#ifdef RUBY_ASSERT_CRITICAL_SECTION
1983 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1986 VM_ASSERT(ec == GET_EC());
1988 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
1989 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2019#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2020 const rb_event_flag_t flag_arg_ = (flag_); \
2021 rb_hook_list_t *hooks_arg_ = (hooks_); \
2022 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2024 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2034 VM_ASSERT((hooks->events & flag) != 0);
2036 trace_arg.event = flag;
2038 trace_arg.cfp = ec->cfp;
2039 trace_arg.self = self;
2041 trace_arg.called_id = called_id;
2042 trace_arg.klass = klass;
2043 trace_arg.data = data;
2045 trace_arg.klass_solved = 0;
2047 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2060 return &cr_pub->hooks;
2063#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2064 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2066#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2067 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2074 rb_ary_new_from_args(2, eval_script, (
VALUE)iseq));
2077void rb_vm_trap_exit(
rb_vm_t *vm);
2079RUBY_SYMBOL_EXPORT_BEGIN
2081int rb_thread_check_trap_pending(
void);
2084#define RUBY_EVENT_COVERAGE_LINE 0x010000
2085#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2087extern VALUE rb_get_coverages(
void);
2088extern void rb_set_coverages(
VALUE,
int,
VALUE);
2089extern void rb_clear_coverages(
void);
2090extern void rb_reset_coverages(
void);
2091extern void rb_resume_coverages(
void);
2092extern void rb_suspend_coverages(
void);
2094void rb_postponed_job_flush(
rb_vm_t *vm);
2100RUBY_SYMBOL_EXPORT_END
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
#define RUBY_EXTERN
Declaration of externally visible global variables.
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
uint32_t rb_event_flag_t
Represents event(s).
#define T_STRING
Old name of RUBY_T_STRING.
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
void rb_bug(const char *fmt,...)
Interpreter panic switch.
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
void rb_unblock_function_t(void *)
This is the type of UBFs.
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
#define RARRAY_AREF(a, i)
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
This is the struct that holds necessary info for a struct.
struct rb_iseq_constant_body::@132 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
IFUNC (Internal FUNCtion)
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.