Ruby 3.2.4p170 (2024-04-23 revision af471c0e0127eea0cafa6f308c0425bbfab0acf5)
vm_core.h
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
48#include "ruby/internal/config.h"
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#if VM_CHECK_MODE > 0
57#define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
58#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
59#define RUBY_ASSERT_CRITICAL_SECTION
60#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
61#else
62#define VM_ASSERT(expr) ((void)0)
63#define VM_UNREACHABLE(func) UNREACHABLE
64#define RUBY_DEBUG_THREAD_SCHEDULE()
65#endif
66
67#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
68
69#if defined(RUBY_ASSERT_CRITICAL_SECTION)
70// TODO add documentation
71extern int ruby_assert_critical_section_entered;
72#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
73#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
74#else
75#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
76#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
77#endif
78
79#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
80# include "wasm/setjmp.h"
81#else
82# include <setjmp.h>
83#endif
84
85#if defined(__linux__) || defined(__FreeBSD__)
86# define RB_THREAD_T_HAS_NATIVE_ID
87#endif
88
90#include "ccan/list/list.h"
91#include "id.h"
92#include "internal.h"
93#include "internal/array.h"
94#include "internal/basic_operators.h"
95#include "internal/serial.h"
96#include "internal/vm.h"
97#include "method.h"
98#include "node.h"
99#include "ruby/ruby.h"
100#include "ruby/st.h"
101#include "ruby_atomic.h"
102#include "vm_opts.h"
103#include "shape.h"
104
105#include "ruby/thread_native.h"
106
107/*
108 * implementation selector of get_insn_info algorithm
109 * 0: linear search
110 * 1: binary search
111 * 2: succinct bitvector
112 */
113#ifndef VM_INSN_INFO_TABLE_IMPL
114# define VM_INSN_INFO_TABLE_IMPL 2
115#endif
116
117#if defined(NSIG_MAX) /* POSIX issue 8 */
118# undef NSIG
119# define NSIG NSIG_MAX
120#elif defined(_SIG_MAXSIG) /* FreeBSD */
121# undef NSIG
122# define NSIG _SIG_MAXSIG
123#elif defined(_SIGMAX) /* QNX */
124# define NSIG (_SIGMAX + 1)
125#elif defined(NSIG) /* 99% of everything else */
126# /* take it */
127#else /* Last resort */
128# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
129#endif
130
131#define RUBY_NSIG NSIG
132
133#if defined(SIGCLD)
134# define RUBY_SIGCHLD (SIGCLD)
135#elif defined(SIGCHLD)
136# define RUBY_SIGCHLD (SIGCHLD)
137#else
138# define RUBY_SIGCHLD (0)
139#endif
140
141/* platforms with broken or non-existent SIGCHLD work by polling */
142#if defined(__APPLE__)
143# define SIGCHLD_LOSSY (1)
144#else
145# define SIGCHLD_LOSSY (0)
146#endif
147
148/* define to 0 to test old code path */
149#define WAITPID_USE_SIGCHLD (RUBY_SIGCHLD || SIGCHLD_LOSSY)
150
151#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
152# define USE_SIGALTSTACK
153void *rb_allocate_sigaltstack(void);
154void *rb_register_sigaltstack(void *);
155# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
156# define RB_ALTSTACK_FREE(var) free(var)
157# define RB_ALTSTACK(var) var
158#else /* noop */
159# define RB_ALTSTACK_INIT(var, altstack)
160# define RB_ALTSTACK_FREE(var)
161# define RB_ALTSTACK(var) (0)
162#endif
163
164#include THREAD_IMPL_H
165#define RUBY_VM_THREAD_MODEL 2
166
167/*****************/
168/* configuration */
169/*****************/
170
171/* gcc ver. check */
172#if defined(__GNUC__) && __GNUC__ >= 2
173
174#if OPT_TOKEN_THREADED_CODE
175#if OPT_DIRECT_THREADED_CODE
176#undef OPT_DIRECT_THREADED_CODE
177#endif
178#endif
179
180#else /* defined(__GNUC__) && __GNUC__ >= 2 */
181
182/* disable threaded code options */
183#if OPT_DIRECT_THREADED_CODE
184#undef OPT_DIRECT_THREADED_CODE
185#endif
186#if OPT_TOKEN_THREADED_CODE
187#undef OPT_TOKEN_THREADED_CODE
188#endif
189#endif
190
191/* call threaded code */
192#if OPT_CALL_THREADED_CODE
193#if OPT_DIRECT_THREADED_CODE
194#undef OPT_DIRECT_THREADED_CODE
195#endif /* OPT_DIRECT_THREADED_CODE */
196#if OPT_STACK_CACHING
197#undef OPT_STACK_CACHING
198#endif /* OPT_STACK_CACHING */
199#endif /* OPT_CALL_THREADED_CODE */
200
201void rb_vm_encoded_insn_data_table_init(void);
202typedef unsigned long rb_num_t;
203typedef signed long rb_snum_t;
204
205enum ruby_tag_type {
206 RUBY_TAG_NONE = 0x0,
207 RUBY_TAG_RETURN = 0x1,
208 RUBY_TAG_BREAK = 0x2,
209 RUBY_TAG_NEXT = 0x3,
210 RUBY_TAG_RETRY = 0x4,
211 RUBY_TAG_REDO = 0x5,
212 RUBY_TAG_RAISE = 0x6,
213 RUBY_TAG_THROW = 0x7,
214 RUBY_TAG_FATAL = 0x8,
215 RUBY_TAG_MASK = 0xf
216};
217
218#define TAG_NONE RUBY_TAG_NONE
219#define TAG_RETURN RUBY_TAG_RETURN
220#define TAG_BREAK RUBY_TAG_BREAK
221#define TAG_NEXT RUBY_TAG_NEXT
222#define TAG_RETRY RUBY_TAG_RETRY
223#define TAG_REDO RUBY_TAG_REDO
224#define TAG_RAISE RUBY_TAG_RAISE
225#define TAG_THROW RUBY_TAG_THROW
226#define TAG_FATAL RUBY_TAG_FATAL
227#define TAG_MASK RUBY_TAG_MASK
228
229enum ruby_vm_throw_flags {
230 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
231 VM_THROW_STATE_MASK = 0xff
232};
233
234/* forward declarations */
235struct rb_thread_struct;
237
238/* iseq data type */
240
242 rb_serial_t raw;
243 VALUE data[2];
244};
245
246// imemo_constcache
248 VALUE flags;
249
250 VALUE value; // v0
251 VALUE _unused1; // v1
252 VALUE _unused2; // v2
253 const rb_cref_t *ic_cref; // v3
254};
255STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
256 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
257 sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
258
275
277 uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
278 ID iv_set_name;
279};
280
284
286 struct {
287 struct rb_thread_struct *running_thread;
288 VALUE value;
289 } once;
290 struct iseq_inline_constant_cache ic_cache;
291 struct iseq_inline_iv_cache_entry iv_cache;
292};
293
295 const struct rb_callinfo *ci;
296 const struct rb_callcache *cc;
297 VALUE block_handler;
298 VALUE recv;
299 int argc;
300 int kw_splat;
301};
302
304
305#if 1
306#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
307#else
308#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
309#endif
310#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
311
313 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
314 VALUE base_label; /* String */
315 VALUE label; /* String */
316 int first_lineno;
317 int node_id;
318 rb_code_location_t code_location;
320
321#define PATHOBJ_PATH 0
322#define PATHOBJ_REALPATH 1
323
324static inline VALUE
325pathobj_path(VALUE pathobj)
326{
327 if (RB_TYPE_P(pathobj, T_STRING)) {
328 return pathobj;
329 }
330 else {
331 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
332 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
333 }
334}
335
336static inline VALUE
337pathobj_realpath(VALUE pathobj)
338{
339 if (RB_TYPE_P(pathobj, T_STRING)) {
340 return pathobj;
341 }
342 else {
343 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
344 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
345 }
346}
347
348/* Forward declarations */
349struct rb_mjit_unit;
350
351typedef uintptr_t iseq_bits_t;
352
353#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
354
355/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
356#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
357
358/* instruction sequence type */
359enum rb_iseq_type {
360 ISEQ_TYPE_TOP,
361 ISEQ_TYPE_METHOD,
362 ISEQ_TYPE_BLOCK,
363 ISEQ_TYPE_CLASS,
364 ISEQ_TYPE_RESCUE,
365 ISEQ_TYPE_ENSURE,
366 ISEQ_TYPE_EVAL,
367 ISEQ_TYPE_MAIN,
368 ISEQ_TYPE_PLAIN
369};
370
372 enum rb_iseq_type type;
373
374 unsigned int iseq_size;
375 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
376
400 struct {
401 struct {
402 unsigned int has_lead : 1;
403 unsigned int has_opt : 1;
404 unsigned int has_rest : 1;
405 unsigned int has_post : 1;
406 unsigned int has_kw : 1;
407 unsigned int has_kwrest : 1;
408 unsigned int has_block : 1;
409
410 unsigned int ambiguous_param0 : 1; /* {|a|} */
411 unsigned int accepts_no_kwarg : 1;
412 unsigned int ruby2_keywords: 1;
413 } flags;
414
415 unsigned int size;
416
417 int lead_num;
418 int opt_num;
419 int rest_start;
420 int post_start;
421 int post_num;
422 int block_start;
423
424 const VALUE *opt_table; /* (opt_num + 1) entries. */
425 /* opt_num and opt_table:
426 *
427 * def foo o1=e1, o2=e2, ..., oN=eN
428 * #=>
429 * # prologue code
430 * A1: e1
431 * A2: e2
432 * ...
433 * AN: eN
434 * AL: body
435 * opt_num = N
436 * opt_table = [A1, A2, ..., AN, AL]
437 */
438
439 const struct rb_iseq_param_keyword {
440 int num;
441 int required_num;
442 int bits_start;
443 int rest_start;
444 const ID *table;
445 VALUE *default_values;
446 } *keyword;
448
449 rb_iseq_location_t location;
450
451 /* insn info, must be freed */
453 const struct iseq_insn_info_entry *body;
454 unsigned int *positions;
455 unsigned int size;
456#if VM_INSN_INFO_TABLE_IMPL == 2
457 struct succ_index_table *succ_index_table;
458#endif
459 } insns_info;
460
461 const ID *local_table; /* must free */
462
463 /* catch table */
464 struct iseq_catch_table *catch_table;
465
466 /* for child iseq */
467 const struct rb_iseq_struct *parent_iseq;
468 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
469
470 union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
471 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
472
473 struct {
474 rb_snum_t flip_count;
475 VALUE script_lines;
476 VALUE coverage;
477 VALUE pc2branchindex;
478 VALUE *original_iseq;
479 } variable;
480
481 unsigned int local_table_size;
482 unsigned int ic_size; // Number of IC caches
483 unsigned int ise_size; // Number of ISE caches
484 unsigned int ivc_size; // Number of IVC caches
485 unsigned int icvarc_size; // Number of ICVARC caches
486 unsigned int ci_size;
487 unsigned int stack_max; /* for stack overflow check */
488
489 bool catch_except_p; // If a frame of this ISeq may catch exception, set true.
490 // If true, this ISeq is leaf *and* backtraces are not used, for example,
491 // by rb_profile_frames. We verify only leafness on VM_CHECK_MODE though.
492 // Note that GC allocations might use backtraces due to
493 // ObjectSpace#trace_object_allocations.
494 // For more details, see: https://bugs.ruby-lang.org/issues/16956
495 bool builtin_inline_p;
496
497 union {
498 iseq_bits_t * list; /* Find references for GC */
499 iseq_bits_t single;
500 } mark_bits;
501
502 struct rb_id_table *outer_variables;
503
504 const rb_iseq_t *mandatory_only_iseq;
505
506#if USE_MJIT || USE_YJIT
507 // Function pointer for JIT code
508 VALUE (*jit_func)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
509 // Number of total calls with jit_exec()
510 long unsigned total_calls;
511#endif
512
513#if USE_MJIT
514 // MJIT stores some data on each iseq.
515 struct rb_mjit_unit *mjit_unit;
516#endif
517
518#if USE_YJIT
519 // YJIT stores some data on each iseq.
520 void *yjit_payload;
521#endif
522};
523
524/* T_IMEMO/iseq */
525/* typedef rb_iseq_t is in method.h */
527 VALUE flags; /* 1 */
528 VALUE wrapper; /* 2 */
529
530 struct rb_iseq_constant_body *body; /* 3 */
531
532 union { /* 4, 5 words */
533 struct iseq_compile_data *compile_data; /* used at compile time */
534
535 struct {
536 VALUE obj;
537 int index;
538 } loader;
539
540 struct {
541 struct rb_hook_list_struct *local_hooks;
542 rb_event_flag_t global_trace_events;
543 } exec;
544 } aux;
545};
546
547#define ISEQ_BODY(iseq) ((iseq)->body)
548
549#ifndef USE_LAZY_LOAD
550#define USE_LAZY_LOAD 0
551#endif
552
553#if USE_LAZY_LOAD
554const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
555#endif
556
557static inline const rb_iseq_t *
558rb_iseq_check(const rb_iseq_t *iseq)
559{
560#if USE_LAZY_LOAD
561 if (ISEQ_BODY(iseq) == NULL) {
562 rb_iseq_complete((rb_iseq_t *)iseq);
563 }
564#endif
565 return iseq;
566}
567
568static inline const rb_iseq_t *
569def_iseq_ptr(rb_method_definition_t *def)
570{
571//TODO: re-visit. to check the bug, enable this assertion.
572#if VM_CHECK_MODE > 0
573 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
574#endif
575 return rb_iseq_check(def->body.iseq.iseqptr);
576}
577
578enum ruby_special_exceptions {
579 ruby_error_reenter,
580 ruby_error_nomemory,
581 ruby_error_sysstack,
582 ruby_error_stackfatal,
583 ruby_error_stream_closed,
584 ruby_special_error_count
585};
586
587#define GetVMPtr(obj, ptr) \
588 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
589
590struct rb_vm_struct;
591typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
592
593typedef struct rb_at_exit_list {
594 rb_vm_at_exit_func *func;
595 struct rb_at_exit_list *next;
597
598struct rb_objspace;
599struct rb_objspace *rb_objspace_alloc(void);
600void rb_objspace_free(struct rb_objspace *);
601void rb_objspace_call_finalizer(struct rb_objspace *);
602
603typedef struct rb_hook_list_struct {
604 struct rb_event_hook_struct *hooks;
605 rb_event_flag_t events;
606 unsigned int running;
607 bool need_clean;
608 bool is_local;
610
611
612// see builtin.h for definition
613typedef const struct rb_builtin_function *RB_BUILTIN;
614
615typedef struct rb_vm_struct {
616 VALUE self;
617
618 struct {
619 struct ccan_list_head set;
620 unsigned int cnt;
621 unsigned int blocking_cnt;
622
623 struct rb_ractor_struct *main_ractor;
624 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
625
626 struct {
627 // monitor
628 rb_nativethread_lock_t lock;
629 struct rb_ractor_struct *lock_owner;
630 unsigned int lock_rec;
631
632 // barrier
633 bool barrier_waiting;
634 unsigned int barrier_cnt;
635 rb_nativethread_cond_t barrier_cond;
636
637 // join at exit
638 rb_nativethread_cond_t terminate_cond;
639 bool terminate_waiting;
640 } sync;
641 } ractor;
642
643#ifdef USE_SIGALTSTACK
644 void *main_altstack;
645#endif
646
647 rb_serial_t fork_gen;
648 rb_nativethread_lock_t waitpid_lock;
649 struct ccan_list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
650 struct ccan_list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
651 struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
652
653 /* set in single-threaded processes only: */
654 volatile int ubf_async_safe;
655
656 unsigned int running: 1;
657 unsigned int thread_abort_on_exception: 1;
658 unsigned int thread_report_on_exception: 1;
659 unsigned int thread_ignore_deadlock: 1;
660
661 /* object management */
662 VALUE mark_object_ary;
663 const VALUE special_exceptions[ruby_special_error_count];
664
665 /* object shapes */
666 rb_shape_t *shape_list;
667 rb_shape_t *root_shape;
668 shape_id_t next_shape_id;
669
670 /* load */
671 VALUE top_self;
672 VALUE load_path;
673 VALUE load_path_snapshot;
674 VALUE load_path_check_cache;
675 VALUE expanded_load_path;
676 VALUE loaded_features;
677 VALUE loaded_features_snapshot;
678 VALUE loaded_features_realpaths;
679 VALUE loaded_features_realpath_map;
680 struct st_table *loaded_features_index;
681 struct st_table *loading_table;
682 // For running the init function of statically linked
683 // extensions when they are loaded
684 struct st_table *static_ext_inits;
685
686 /* signal */
687 struct {
688 VALUE cmd[RUBY_NSIG];
689 } trap_list;
690
691 /* relation table of ensure - rollback for callcc */
692 struct st_table *ensure_rollback_table;
693
694 /* postponed_job (async-signal-safe, NOT thread-safe) */
695 struct rb_postponed_job_struct *postponed_job_buffer;
696 rb_atomic_t postponed_job_index;
697
698 int src_encoding_index;
699
700 /* workqueue (thread-safe, NOT async-signal-safe) */
701 struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
702 rb_nativethread_lock_t workqueue_lock;
703
704 VALUE orig_progname, progname;
705 VALUE coverages, me2counter;
706 int coverage_mode;
707
708 st_table * defined_module_hash;
709
710 struct rb_objspace *objspace;
711
712 rb_at_exit_list *at_exit;
713
714 st_table *frozen_strings;
715
716 const struct rb_builtin_function *builtin_function_table;
717 int builtin_inline_index;
718
719 struct rb_id_table *negative_cme_table;
720 st_table *overloaded_cme_table; // cme -> overloaded_cme
721
722 // This id table contains a mapping from ID to ICs. It does this with ID
723 // keys and nested st_tables as values. The nested tables have ICs as keys
724 // and Qtrue as values. It is used when inline constant caches need to be
725 // invalidated or ISEQs are being freed.
726 struct rb_id_table *constant_cache;
727
728#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
729#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
730#endif
731 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
732
733#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
734 uint32_t clock;
735#endif
736
737 /* params */
738 struct { /* size in byte */
739 size_t thread_vm_stack_size;
740 size_t thread_machine_stack_size;
741 size_t fiber_vm_stack_size;
742 size_t fiber_machine_stack_size;
743 } default_params;
744
745} rb_vm_t;
746
747/* default values */
748
749#define RUBY_VM_SIZE_ALIGN 4096
750
751#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
752#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
753#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
754#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
755
756#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
757#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
758#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
759#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
760#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
761#else
762#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
763#endif
764
765#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
766/* It seems sanitizers consume A LOT of machine stacks */
767#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
768#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
769#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
770#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
771#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
772#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
773#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
774#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
775#endif
776
777#ifndef VM_DEBUG_BP_CHECK
778#define VM_DEBUG_BP_CHECK 0
779#endif
780
781#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
782#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
783#endif
784
786 VALUE self;
787 const VALUE *ep;
788 union {
789 const rb_iseq_t *iseq;
790 const struct vm_ifunc *ifunc;
791 VALUE val;
792 } code;
793};
794
795enum rb_block_handler_type {
796 block_handler_type_iseq,
797 block_handler_type_ifunc,
798 block_handler_type_symbol,
799 block_handler_type_proc
800};
801
802enum rb_block_type {
803 block_type_iseq,
804 block_type_ifunc,
805 block_type_symbol,
806 block_type_proc
807};
808
809struct rb_block {
810 union {
811 struct rb_captured_block captured;
812 VALUE symbol;
813 VALUE proc;
814 } as;
815 enum rb_block_type type;
816};
817
819 const VALUE *pc; /* cfp[0] */
820 VALUE *sp; /* cfp[1] */
821 const rb_iseq_t *iseq; /* cfp[2] */
822 VALUE self; /* cfp[3] / block[0] */
823 const VALUE *ep; /* cfp[4] / block[1] */
824 const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc or forwarded block handler */
825 VALUE *__bp__; /* cfp[6] */ /* outside vm_push_frame, use vm_base_ptr instead. */
826
827#if VM_DEBUG_BP_CHECK
828 VALUE *bp_check; /* cfp[7] */
829#endif
830 // Return address for YJIT code
831 void *jit_return;
833
834extern const rb_data_type_t ruby_threadptr_data_type;
835
836static inline struct rb_thread_struct *
837rb_thread_ptr(VALUE thval)
838{
839 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
840}
841
842enum rb_thread_status {
843 THREAD_RUNNABLE,
844 THREAD_STOPPED,
845 THREAD_STOPPED_FOREVER,
846 THREAD_KILLED
847};
848
849#ifdef RUBY_JMP_BUF
850typedef RUBY_JMP_BUF rb_jmpbuf_t;
851#else
852typedef void *rb_jmpbuf_t[5];
853#endif
854
855/*
856 the members which are written in EC_PUSH_TAG() should be placed at
857 the beginning and the end, so that entire region is accessible.
858*/
859struct rb_vm_tag {
860 VALUE tag;
861 VALUE retval;
862 rb_jmpbuf_t buf;
863 struct rb_vm_tag *prev;
864 enum ruby_tag_type state;
865 unsigned int lock_rec;
866};
867
868STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
869STATIC_ASSERT(rb_vm_tag_buf_end,
870 offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) <
871 sizeof(struct rb_vm_tag));
872
875 void *arg;
876};
877
878struct rb_mutex_struct;
879
880typedef struct rb_ensure_entry {
881 VALUE marker;
882 VALUE (*e_proc)(VALUE);
883 VALUE data2;
885
886typedef struct rb_ensure_list {
887 struct rb_ensure_list *next;
888 struct rb_ensure_entry entry;
890
891typedef struct rb_fiber_struct rb_fiber_t;
892
894 struct rb_waiting_list *next;
895 struct rb_thread_struct *thread;
896 struct rb_fiber_struct *fiber;
897};
898
900 /* execution information */
901 VALUE *vm_stack; /* must free, must mark */
902 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
904
905 struct rb_vm_tag *tag;
906
907 /* interrupt flags */
908 rb_atomic_t interrupt_flag;
909 rb_atomic_t interrupt_mask; /* size should match flag */
910#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
911 uint32_t checked_clock;
912#endif
913
914 rb_fiber_t *fiber_ptr;
915 struct rb_thread_struct *thread_ptr;
916
917 /* storage (ec (fiber) local) */
918 struct rb_id_table *local_storage;
919 VALUE local_storage_recursive_hash;
920 VALUE local_storage_recursive_hash_for_trace;
921
922 /* Inheritable fiber storage. */
923 VALUE storage;
924
925 /* eval env */
926 const VALUE *root_lep;
927 VALUE root_svar;
928
929 /* ensure & callcc */
930 rb_ensure_list_t *ensure_list;
931
932 /* trace information */
933 struct rb_trace_arg_struct *trace_arg;
934
935 /* temporary places */
936 VALUE errinfo;
937 VALUE passed_block_handler; /* for rb_iterate */
938
939 uint8_t raised_flag; /* only 3 bits needed */
940
941 /* n.b. only 7 bits needed, really: */
942 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
943
944 VALUE private_const_reference;
945
946 /* for GC */
947 struct {
948 VALUE *stack_start;
949 VALUE *stack_end;
950 size_t stack_maxsize;
952 } machine;
953};
954
955#ifndef rb_execution_context_t
957#define rb_execution_context_t rb_execution_context_t
958#endif
959
960// for builtin.h
961#define VM_CORE_H_EC_DEFINED 1
962
963// Set the vm_stack pointer in the execution context.
964void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
965
966// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
967// @param ec the execution context to update.
968// @param stack a pointer to the stack to use.
969// @param size the size of the stack, as in `VALUE stack[size]`.
970void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
971
972// Clear (set to `NULL`) the vm_stack pointer.
973// @param ec the execution context to update.
974void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
975
977 bool ractor_safe;
978};
979
980typedef struct rb_ractor_struct rb_ractor_t;
981
982struct rb_native_thread;
983
984typedef struct rb_thread_struct {
985 struct ccan_list_node lt_node; // managed by a ractor
986 VALUE self;
987 rb_ractor_t *ractor;
988 rb_vm_t *vm;
989 struct rb_native_thread *nt;
991
992 struct rb_thread_sched_item sched;
993 rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
994
995 VALUE last_status; /* $? */
996
997 /* for cfunc */
998 struct rb_calling_info *calling;
999
1000 /* for load(true) */
1001 VALUE top_self;
1002 VALUE top_wrapper;
1003
1004 /* thread control */
1005
1006 BITFIELD(enum rb_thread_status, status, 2);
1007 /* bit flags */
1008 unsigned int locking_native_thread : 1;
1009 unsigned int to_kill : 1;
1010 unsigned int abort_on_exception: 1;
1011 unsigned int report_on_exception: 1;
1012 unsigned int pending_interrupt_queue_checked: 1;
1013 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1014 uint32_t running_time_us; /* 12500..800000 */
1015
1016 void *blocking_region_buffer;
1017
1018 VALUE thgroup;
1019 VALUE value;
1020
1021 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1022#if OPT_CALL_THREADED_CODE
1023 VALUE retval;
1024#endif
1025
1026 /* async errinfo queue */
1027 VALUE pending_interrupt_queue;
1028 VALUE pending_interrupt_mask_stack;
1029
1030 /* interrupt management */
1031 rb_nativethread_lock_t interrupt_lock;
1032 struct rb_unblock_callback unblock;
1033 VALUE locking_mutex;
1034 struct rb_mutex_struct *keeping_mutexes;
1035
1036 struct rb_waiting_list *join_list;
1037
1038 union {
1039 struct {
1040 VALUE proc;
1041 VALUE args;
1042 int kw_splat;
1043 } proc;
1044 struct {
1045 VALUE (*func)(void *);
1046 void *arg;
1047 } func;
1048 } invoke_arg;
1049
1050 enum thread_invoke_type {
1051 thread_invoke_type_none = 0,
1052 thread_invoke_type_proc,
1053 thread_invoke_type_ractor_proc,
1054 thread_invoke_type_func
1055 } invoke_type;
1056
1057 /* statistics data for profiler */
1058 VALUE stat_insn_usage;
1059
1060 /* fiber */
1061 rb_fiber_t *root_fiber;
1062
1063 VALUE scheduler;
1064 unsigned int blocking;
1065
1066 /* misc */
1067 VALUE name;
1068
1069 struct rb_ext_config ext_config;
1070} rb_thread_t;
1071
1072static inline unsigned int
1073rb_th_serial(const rb_thread_t *th)
1074{
1075 return (unsigned int)th->serial;
1076}
1077
1078typedef enum {
1079 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1080 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1081 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1082 /* 0x03..0x06 is reserved */
1083 VM_DEFINECLASS_TYPE_MASK = 0x07
1084} rb_vm_defineclass_type_t;
1085
1086#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1087#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1088#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1089#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1090#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1091 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1092
1093/* iseq.c */
1094RUBY_SYMBOL_EXPORT_BEGIN
1095
1096/* node -> iseq */
1097rb_iseq_t *rb_iseq_new (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1098rb_iseq_t *rb_iseq_new_top (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1099rb_iseq_t *rb_iseq_new_main (const rb_ast_body_t *ast, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1100rb_iseq_t *rb_iseq_new_eval (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1101rb_iseq_t *rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1102 enum rb_iseq_type, const rb_compile_option_t*);
1103
1104struct iseq_link_anchor;
1106 VALUE flags;
1107 VALUE reserved;
1108 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1109 const void *data;
1110};
1111static inline struct rb_iseq_new_with_callback_callback_func *
1112rb_iseq_new_with_callback_new_callback(
1113 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1114{
1115 VALUE memo = rb_imemo_new(imemo_ifunc, (VALUE)func, (VALUE)ptr, Qundef, Qfalse);
1116 return (struct rb_iseq_new_with_callback_callback_func *)memo;
1117}
1118rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1119 VALUE name, VALUE path, VALUE realpath, int first_lineno,
1120 const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1121
1122VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1123int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1124attr_index_t rb_estimate_iv_count(VALUE klass, const rb_iseq_t * initialize_iseq);
1125
1126VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1127
1128RUBY_EXTERN VALUE rb_cISeq;
1129RUBY_EXTERN VALUE rb_cRubyVM;
1130RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1131RUBY_EXTERN VALUE rb_block_param_proxy;
1132RUBY_SYMBOL_EXPORT_END
1133
1134#define GetProcPtr(obj, ptr) \
1135 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1136
1137typedef struct {
1138 const struct rb_block block;
1139 unsigned int is_from_method: 1; /* bool */
1140 unsigned int is_lambda: 1; /* bool */
1141 unsigned int is_isolated: 1; /* bool */
1142} rb_proc_t;
1143
1144RUBY_SYMBOL_EXPORT_BEGIN
1145VALUE rb_proc_isolate(VALUE self);
1146VALUE rb_proc_isolate_bang(VALUE self);
1147VALUE rb_proc_ractor_make_shareable(VALUE self);
1148RUBY_SYMBOL_EXPORT_END
1149
1150typedef struct {
1151 VALUE flags; /* imemo header */
1152 rb_iseq_t *iseq;
1153 const VALUE *ep;
1154 const VALUE *env;
1155 unsigned int env_size;
1156} rb_env_t;
1157
1158extern const rb_data_type_t ruby_binding_data_type;
1159
1160#define GetBindingPtr(obj, ptr) \
1161 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1162
1163typedef struct {
1164 const struct rb_block block;
1165 const VALUE pathobj;
1166 int first_lineno;
1167} rb_binding_t;
1168
1169/* used by compile time and send insn */
1170
1171enum vm_check_match_type {
1172 VM_CHECKMATCH_TYPE_WHEN = 1,
1173 VM_CHECKMATCH_TYPE_CASE = 2,
1174 VM_CHECKMATCH_TYPE_RESCUE = 3
1175};
1176
1177#define VM_CHECKMATCH_TYPE_MASK 0x03
1178#define VM_CHECKMATCH_ARRAY 0x04
1179
1180enum vm_special_object_type {
1181 VM_SPECIAL_OBJECT_VMCORE = 1,
1182 VM_SPECIAL_OBJECT_CBASE,
1183 VM_SPECIAL_OBJECT_CONST_BASE
1184};
1185
1186enum vm_svar_index {
1187 VM_SVAR_LASTLINE = 0, /* $_ */
1188 VM_SVAR_BACKREF = 1, /* $~ */
1189
1190 VM_SVAR_EXTRA_START = 2,
1191 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1192};
1193
1194/* inline cache */
1195typedef struct iseq_inline_constant_cache *IC;
1196typedef struct iseq_inline_iv_cache_entry *IVC;
1197typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1198typedef union iseq_inline_storage_entry *ISE;
1199typedef const struct rb_callinfo *CALL_INFO;
1200typedef const struct rb_callcache *CALL_CACHE;
1201typedef struct rb_call_data *CALL_DATA;
1202
1203typedef VALUE CDHASH;
1204
1205#ifndef FUNC_FASTCALL
1206#define FUNC_FASTCALL(x) x
1207#endif
1208
1209typedef rb_control_frame_t *
1210 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1211
1212#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1213#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1214
1215#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1216#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1217#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1218
1219enum vm_frame_env_flags {
1220 /* Frame/Environment flag bits:
1221 * MMMM MMMM MMMM MMMM ____ FFFF FFFE EEEX (LSB)
1222 *
1223 * X : tag for GC marking (It seems as Fixnum)
1224 * EEE : 4 bits Env flags
1225 * FF..: 7 bits Frame flags
1226 * MM..: 15 bits frame magic (to check frame corruption)
1227 */
1228
1229 /* frame types */
1230 VM_FRAME_MAGIC_METHOD = 0x11110001,
1231 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1232 VM_FRAME_MAGIC_CLASS = 0x33330001,
1233 VM_FRAME_MAGIC_TOP = 0x44440001,
1234 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1235 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1236 VM_FRAME_MAGIC_EVAL = 0x77770001,
1237 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1238 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1239
1240 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1241
1242 /* frame flag */
1243 VM_FRAME_FLAG_FINISH = 0x0020,
1244 VM_FRAME_FLAG_BMETHOD = 0x0040,
1245 VM_FRAME_FLAG_CFRAME = 0x0080,
1246 VM_FRAME_FLAG_LAMBDA = 0x0100,
1247 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1248 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1249 VM_FRAME_FLAG_PASSED = 0x0800,
1250
1251 /* env flag */
1252 VM_ENV_FLAG_LOCAL = 0x0002,
1253 VM_ENV_FLAG_ESCAPED = 0x0004,
1254 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1255 VM_ENV_FLAG_ISOLATED = 0x0010,
1256};
1257
1258#define VM_ENV_DATA_SIZE ( 3)
1259
1260#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1261#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1262#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1263#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1264
1265#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1266
1267static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1268
1269static inline void
1270VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1271{
1272 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1273 VM_ASSERT(FIXNUM_P(flags));
1274 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1275}
1276
1277static inline void
1278VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1279{
1280 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1281 VM_ASSERT(FIXNUM_P(flags));
1282 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1283}
1284
1285static inline unsigned long
1286VM_ENV_FLAGS(const VALUE *ep, long flag)
1287{
1288 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1289 VM_ASSERT(FIXNUM_P(flags));
1290 return flags & flag;
1291}
1292
1293static inline unsigned long
1294VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1295{
1296 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1297}
1298
1299static inline int
1300VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1301{
1302 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1303}
1304
1305static inline int
1306VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1307{
1308 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1309}
1310
1311static inline int
1312VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1313{
1314 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1315}
1316
1317static inline int
1318VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1319{
1320 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1321}
1322
1323static inline int
1324rb_obj_is_iseq(VALUE iseq)
1325{
1326 return imemo_type_p(iseq, imemo_iseq);
1327}
1328
1329#if VM_CHECK_MODE > 0
1330#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1331#endif
1332
1333static inline int
1334VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1335{
1336 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1337 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1338 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1339 return cframe_p;
1340}
1341
1342static inline int
1343VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1344{
1345 return !VM_FRAME_CFRAME_P(cfp);
1346}
1347
1348#define RUBYVM_CFUNC_FRAME_P(cfp) \
1349 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1350
1351#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1352#define VM_BLOCK_HANDLER_NONE 0
1353
1354static inline int
1355VM_ENV_LOCAL_P(const VALUE *ep)
1356{
1357 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1358}
1359
1360static inline const VALUE *
1361VM_ENV_PREV_EP(const VALUE *ep)
1362{
1363 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1364 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1365}
1366
1367static inline VALUE
1368VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1369{
1370 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1371 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1372}
1373
1374#if VM_CHECK_MODE > 0
1375int rb_vm_ep_in_heap_p(const VALUE *ep);
1376#endif
1377
1378static inline int
1379VM_ENV_ESCAPED_P(const VALUE *ep)
1380{
1381 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1382 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1383}
1384
1385#if VM_CHECK_MODE > 0
1386static inline int
1387vm_assert_env(VALUE obj)
1388{
1389 VM_ASSERT(imemo_type_p(obj, imemo_env));
1390 return 1;
1391}
1392#endif
1393
1395static inline VALUE
1396VM_ENV_ENVVAL(const VALUE *ep)
1397{
1398 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1399 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1400 VM_ASSERT(vm_assert_env(envval));
1401 return envval;
1402}
1403
1405static inline const rb_env_t *
1406VM_ENV_ENVVAL_PTR(const VALUE *ep)
1407{
1408 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1409}
1410
1411static inline const rb_env_t *
1412vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1413{
1414 rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq);
1415 env->env_size = env_size;
1416 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1417 return env;
1418}
1419
1420static inline void
1421VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1422{
1423 *((VALUE *)ptr) = v;
1424}
1425
1426static inline void
1427VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1428{
1429 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1430 VM_FORCE_WRITE(ptr, special_const_value);
1431}
1432
1433static inline void
1434VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1435{
1436 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1437 VM_FORCE_WRITE(&ep[index], v);
1438}
1439
1440const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1441const VALUE *rb_vm_proc_local_ep(VALUE proc);
1442void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1443void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1444
1445VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1446
1447#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1448#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1449
1450#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1451 ((void *)(ecfp) > (void *)(cfp))
1452
1453static inline const rb_control_frame_t *
1454RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1455{
1456 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1457}
1458
1459static inline int
1460RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1461{
1462 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1463}
1464
1465static inline int
1466VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1467{
1468 if ((block_handler & 0x03) == 0x01) {
1469#if VM_CHECK_MODE > 0
1470 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1471 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1472#endif
1473 return 1;
1474 }
1475 else {
1476 return 0;
1477 }
1478}
1479
1480static inline VALUE
1481VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1482{
1483 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1484 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1485 return block_handler;
1486}
1487
1488static inline const struct rb_captured_block *
1489VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1490{
1491 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1492 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1493 return captured;
1494}
1495
1496static inline int
1497VM_BH_IFUNC_P(VALUE block_handler)
1498{
1499 if ((block_handler & 0x03) == 0x03) {
1500#if VM_CHECK_MODE > 0
1501 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1502 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1503#endif
1504 return 1;
1505 }
1506 else {
1507 return 0;
1508 }
1509}
1510
1511static inline VALUE
1512VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1513{
1514 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1515 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1516 return block_handler;
1517}
1518
1519static inline const struct rb_captured_block *
1520VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1521{
1522 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1523 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1524 return captured;
1525}
1526
1527static inline const struct rb_captured_block *
1528VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1529{
1530 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1531 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1532 return captured;
1533}
1534
1535static inline enum rb_block_handler_type
1536vm_block_handler_type(VALUE block_handler)
1537{
1538 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1539 return block_handler_type_iseq;
1540 }
1541 else if (VM_BH_IFUNC_P(block_handler)) {
1542 return block_handler_type_ifunc;
1543 }
1544 else if (SYMBOL_P(block_handler)) {
1545 return block_handler_type_symbol;
1546 }
1547 else {
1548 VM_ASSERT(rb_obj_is_proc(block_handler));
1549 return block_handler_type_proc;
1550 }
1551}
1552
1553static inline void
1554vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1555{
1556 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1557 (vm_block_handler_type(block_handler), 1));
1558}
1559
1560static inline int
1561vm_cfp_forwarded_bh_p(const rb_control_frame_t *cfp, VALUE block_handler)
1562{
1563 return ((VALUE) cfp->block_code) == block_handler;
1564}
1565
1566static inline enum rb_block_type
1567vm_block_type(const struct rb_block *block)
1568{
1569#if VM_CHECK_MODE > 0
1570 switch (block->type) {
1571 case block_type_iseq:
1572 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1573 break;
1574 case block_type_ifunc:
1575 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1576 break;
1577 case block_type_symbol:
1578 VM_ASSERT(SYMBOL_P(block->as.symbol));
1579 break;
1580 case block_type_proc:
1581 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1582 break;
1583 }
1584#endif
1585 return block->type;
1586}
1587
1588static inline void
1589vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1590{
1591 struct rb_block *mb = (struct rb_block *)block;
1592 mb->type = type;
1593}
1594
1595static inline const struct rb_block *
1596vm_proc_block(VALUE procval)
1597{
1598 VM_ASSERT(rb_obj_is_proc(procval));
1599 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1600}
1601
1602static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1603static inline const VALUE *vm_block_ep(const struct rb_block *block);
1604
1605static inline const rb_iseq_t *
1606vm_proc_iseq(VALUE procval)
1607{
1608 return vm_block_iseq(vm_proc_block(procval));
1609}
1610
1611static inline const VALUE *
1612vm_proc_ep(VALUE procval)
1613{
1614 return vm_block_ep(vm_proc_block(procval));
1615}
1616
1617static inline const rb_iseq_t *
1618vm_block_iseq(const struct rb_block *block)
1619{
1620 switch (vm_block_type(block)) {
1621 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1622 case block_type_proc: return vm_proc_iseq(block->as.proc);
1623 case block_type_ifunc:
1624 case block_type_symbol: return NULL;
1625 }
1626 VM_UNREACHABLE(vm_block_iseq);
1627 return NULL;
1628}
1629
1630static inline const VALUE *
1631vm_block_ep(const struct rb_block *block)
1632{
1633 switch (vm_block_type(block)) {
1634 case block_type_iseq:
1635 case block_type_ifunc: return block->as.captured.ep;
1636 case block_type_proc: return vm_proc_ep(block->as.proc);
1637 case block_type_symbol: return NULL;
1638 }
1639 VM_UNREACHABLE(vm_block_ep);
1640 return NULL;
1641}
1642
1643static inline VALUE
1644vm_block_self(const struct rb_block *block)
1645{
1646 switch (vm_block_type(block)) {
1647 case block_type_iseq:
1648 case block_type_ifunc:
1649 return block->as.captured.self;
1650 case block_type_proc:
1651 return vm_block_self(vm_proc_block(block->as.proc));
1652 case block_type_symbol:
1653 return Qundef;
1654 }
1655 VM_UNREACHABLE(vm_block_self);
1656 return Qundef;
1657}
1658
1659static inline VALUE
1660VM_BH_TO_SYMBOL(VALUE block_handler)
1661{
1662 VM_ASSERT(SYMBOL_P(block_handler));
1663 return block_handler;
1664}
1665
1666static inline VALUE
1667VM_BH_FROM_SYMBOL(VALUE symbol)
1668{
1669 VM_ASSERT(SYMBOL_P(symbol));
1670 return symbol;
1671}
1672
1673static inline VALUE
1674VM_BH_TO_PROC(VALUE block_handler)
1675{
1676 VM_ASSERT(rb_obj_is_proc(block_handler));
1677 return block_handler;
1678}
1679
1680static inline VALUE
1681VM_BH_FROM_PROC(VALUE procval)
1682{
1683 VM_ASSERT(rb_obj_is_proc(procval));
1684 return procval;
1685}
1686
1687/* VM related object allocate functions */
1688VALUE rb_thread_alloc(VALUE klass);
1689VALUE rb_binding_alloc(VALUE klass);
1690VALUE rb_proc_alloc(VALUE klass);
1691VALUE rb_proc_dup(VALUE self);
1692
1693/* for debug */
1694extern void rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1695extern void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc);
1696extern void rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp
1697#if OPT_STACK_CACHING
1698 , VALUE reg_a, VALUE reg_b
1699#endif
1700);
1701
1702#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp)
1703#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp))
1704void rb_vm_bugreport(const void *);
1705typedef void (*ruby_sighandler_t)(int);
1706RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1707NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1708
1709/* functions about thread/vm execution */
1710RUBY_SYMBOL_EXPORT_BEGIN
1711VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1712VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1713VALUE rb_iseq_path(const rb_iseq_t *iseq);
1714VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1715RUBY_SYMBOL_EXPORT_END
1716
1717VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1718void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1719
1720int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1721void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1722
1723VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1724
1725VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1726static inline VALUE
1727rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1728{
1729 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1730}
1731
1732static inline VALUE
1733rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1734{
1735 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1736}
1737
1738VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1739VALUE rb_vm_env_local_variables(const rb_env_t *env);
1740const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1741const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1742void rb_vm_inc_const_missing_count(void);
1743VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1744 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1745void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1746MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec);
1747
1748void rb_thread_start_timer_thread(void);
1749void rb_thread_stop_timer_thread(void);
1750void rb_thread_reset_timer_thread(void);
1751void rb_thread_wakeup_timer_thread(int);
1752
1753static inline void
1754rb_vm_living_threads_init(rb_vm_t *vm)
1755{
1756 ccan_list_head_init(&vm->waiting_fds);
1757 ccan_list_head_init(&vm->waiting_pids);
1758 ccan_list_head_init(&vm->workqueue);
1759 ccan_list_head_init(&vm->waiting_grps);
1760 ccan_list_head_init(&vm->ractor.set);
1761}
1762
1763typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1764rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1765rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1766int rb_vm_get_sourceline(const rb_control_frame_t *);
1767void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1768void ruby_thread_init_stack(rb_thread_t *th);
1769rb_thread_t * ruby_thread_from_native(void);
1770int ruby_thread_set_native(rb_thread_t *th);
1771int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1772void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1773MJIT_STATIC VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1774
1775void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1776
1777#define rb_vm_register_special_exception(sp, e, m) \
1778 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1779
1780void rb_gc_mark_machine_stack(const rb_execution_context_t *ec);
1781
1782void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1783
1784MJIT_STATIC const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1785
1786#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1787
1788#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1789 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1790 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1791 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1792 if (UNLIKELY((cfp) <= &bound[1])) { \
1793 vm_stackoverflow(); \
1794 } \
1795} while (0)
1796
1797#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1798 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1799
1800VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1801
1802rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1803
1804/* for thread */
1805
1806#if RUBY_VM_THREAD_MODEL == 2
1807MJIT_SYMBOL_EXPORT_BEGIN
1808
1809RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
1810RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1811RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1812RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1813RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1814
1815MJIT_SYMBOL_EXPORT_END
1816
1817#define GET_VM() rb_current_vm()
1818#define GET_RACTOR() rb_current_ractor()
1819#define GET_THREAD() rb_current_thread()
1820#define GET_EC() rb_current_execution_context(true)
1821
1822static inline rb_thread_t *
1823rb_ec_thread_ptr(const rb_execution_context_t *ec)
1824{
1825 return ec->thread_ptr;
1826}
1827
1828static inline rb_ractor_t *
1829rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1830{
1831 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1832 if (th) {
1833 VM_ASSERT(th->ractor != NULL);
1834 return th->ractor;
1835 }
1836 else {
1837 return NULL;
1838 }
1839}
1840
1841static inline rb_vm_t *
1842rb_ec_vm_ptr(const rb_execution_context_t *ec)
1843{
1844 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1845 if (th) {
1846 return th->vm;
1847 }
1848 else {
1849 return NULL;
1850 }
1851}
1852
1853static inline rb_execution_context_t *
1854rb_current_execution_context(bool expect_ec)
1855{
1856#ifdef RB_THREAD_LOCAL_SPECIFIER
1857 #ifdef __APPLE__
1858 rb_execution_context_t *ec = rb_current_ec();
1859 #else
1860 rb_execution_context_t *ec = ruby_current_ec;
1861 #endif
1862#else
1863 rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
1864#endif
1865 VM_ASSERT(!expect_ec || ec != NULL);
1866 return ec;
1867}
1868
1869static inline rb_thread_t *
1870rb_current_thread(void)
1871{
1872 const rb_execution_context_t *ec = GET_EC();
1873 return rb_ec_thread_ptr(ec);
1874}
1875
1876static inline rb_ractor_t *
1877rb_current_ractor(void)
1878{
1879 if (ruby_single_main_ractor) {
1880 return ruby_single_main_ractor;
1881 }
1882 else {
1883 const rb_execution_context_t *ec = GET_EC();
1884 return rb_ec_ractor_ptr(ec);
1885 }
1886}
1887
1888static inline rb_vm_t *
1889rb_current_vm(void)
1890{
1891#if 0 // TODO: reconsider the assertions
1892 VM_ASSERT(ruby_current_vm_ptr == NULL ||
1893 ruby_current_execution_context_ptr == NULL ||
1894 rb_ec_thread_ptr(GET_EC()) == NULL ||
1895 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
1896 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
1897#endif
1898
1899 return ruby_current_vm_ptr;
1900}
1901
1902void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
1903 unsigned int recorded_lock_rec,
1904 unsigned int current_lock_rec);
1905
1906static inline unsigned int
1907rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
1908{
1909 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1910
1911 if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
1912 return 0;
1913 }
1914 else {
1915 return vm->ractor.sync.lock_rec;
1916 }
1917}
1918
1919#else
1920#error "unsupported thread model"
1921#endif
1922
1923enum {
1924 TIMER_INTERRUPT_MASK = 0x01,
1925 PENDING_INTERRUPT_MASK = 0x02,
1926 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
1927 TRAP_INTERRUPT_MASK = 0x08,
1928 TERMINATE_INTERRUPT_MASK = 0x10,
1929 VM_BARRIER_INTERRUPT_MASK = 0x20,
1930};
1931
1932#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
1933#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
1934#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
1935#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
1936#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
1937#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
1938#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
1939 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
1940
1941static inline bool
1942RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
1943{
1944#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1945 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
1946
1947 if (current_clock != ec->checked_clock) {
1948 ec->checked_clock = current_clock;
1949 RUBY_VM_SET_TIMER_INTERRUPT(ec);
1950 }
1951#endif
1952 return ec->interrupt_flag & ~(ec)->interrupt_mask;
1953}
1954
1955VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
1956int rb_signal_buff_size(void);
1957int rb_signal_exec(rb_thread_t *th, int sig);
1958void rb_threadptr_check_signal(rb_thread_t *mth);
1959void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
1960void rb_threadptr_signal_exit(rb_thread_t *th);
1961int rb_threadptr_execute_interrupts(rb_thread_t *, int);
1962void rb_threadptr_interrupt(rb_thread_t *th);
1963void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
1964void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
1965void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
1966VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
1967void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
1968void rb_execution_context_update(rb_execution_context_t *ec);
1969void rb_execution_context_mark(const rb_execution_context_t *ec);
1970void rb_fiber_close(rb_fiber_t *fib);
1971void Init_native_thread(rb_thread_t *th);
1972int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
1973
1974// vm_sync.h
1975void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
1976void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
1977
1978#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
1979static inline void
1980rb_vm_check_ints(rb_execution_context_t *ec)
1981{
1982#ifdef RUBY_ASSERT_CRITICAL_SECTION
1983 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1984#endif
1985
1986 VM_ASSERT(ec == GET_EC());
1987
1988 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
1989 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
1990 }
1991}
1992
1993/* tracer */
1994
1996 rb_event_flag_t event;
1998 const rb_control_frame_t *cfp;
1999 VALUE self;
2000 ID id;
2001 ID called_id;
2002 VALUE klass;
2003 VALUE data;
2004
2005 int klass_solved;
2006
2007 /* calc from cfp */
2008 int lineno;
2009 VALUE path;
2010};
2011
2012void rb_hook_list_mark(rb_hook_list_t *hooks);
2013void rb_hook_list_free(rb_hook_list_t *hooks);
2014void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2015void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2016
2017void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2018
2019#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2020 const rb_event_flag_t flag_arg_ = (flag_); \
2021 rb_hook_list_t *hooks_arg_ = (hooks_); \
2022 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2023 /* defer evaluating the other arguments */ \
2024 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2025 } \
2026} while (0)
2027
2028static inline void
2029rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2030 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2031{
2032 struct rb_trace_arg_struct trace_arg;
2033
2034 VM_ASSERT((hooks->events & flag) != 0);
2035
2036 trace_arg.event = flag;
2037 trace_arg.ec = ec;
2038 trace_arg.cfp = ec->cfp;
2039 trace_arg.self = self;
2040 trace_arg.id = id;
2041 trace_arg.called_id = called_id;
2042 trace_arg.klass = klass;
2043 trace_arg.data = data;
2044 trace_arg.path = Qundef;
2045 trace_arg.klass_solved = 0;
2046
2047 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2048}
2049
2051 VALUE self;
2052 uint32_t id;
2053 rb_hook_list_t hooks;
2054};
2055
2056static inline rb_hook_list_t *
2057rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2058{
2059 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2060 return &cr_pub->hooks;
2061}
2062
2063#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2064 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2065
2066#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2067 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2068
2069static inline void
2070rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2071{
2072 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2073 NIL_P(eval_script) ? (VALUE)iseq :
2074 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2075}
2076
2077void rb_vm_trap_exit(rb_vm_t *vm);
2078
2079RUBY_SYMBOL_EXPORT_BEGIN
2080
2081int rb_thread_check_trap_pending(void);
2082
2083/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2084#define RUBY_EVENT_COVERAGE_LINE 0x010000
2085#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2086
2087extern VALUE rb_get_coverages(void);
2088extern void rb_set_coverages(VALUE, int, VALUE);
2089extern void rb_clear_coverages(void);
2090extern void rb_reset_coverages(void);
2091extern void rb_resume_coverages(void);
2092extern void rb_suspend_coverages(void);
2093
2094void rb_postponed_job_flush(rb_vm_t *vm);
2095
2096// ractor.c
2097RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2098RUBY_EXTERN VALUE rb_eRactorIsolationError;
2099
2100RUBY_SYMBOL_EXPORT_END
2101
2102#endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:47
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:56
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:103
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition error.c:794
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition error.c:1058
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:175
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define RARRAY_AREF(a, i)
Definition rarray.h:583
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:102
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition vm_core.h:247
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:273
Definition vm_core.h:281
Definition vm_core.h:276
Definition iseq.h:234
Definition method.h:62
CREF (Class REFerence)
Definition method.h:44
Definition class.h:32
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:190
Definition vm_core.h:880
struct rb_iseq_constant_body::@132 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:84
Definition vm_core.h:241
Definition vm_core.h:285
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:375