Ruby  2.7.2p137(2020-10-01revision5445e0435260b449decf2ac16f9d09bae3cafe72)
cont.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  cont.c -
4 
5  $Author$
6  created at: Thu May 23 09:03:43 2007
7 
8  Copyright (C) 2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #include "internal.h"
13 #include "vm_core.h"
14 #include "gc.h"
15 #include "eval_intern.h"
16 #include "mjit.h"
17 
18 #include COROUTINE_H
19 
20 #ifndef _WIN32
21 #include <unistd.h>
22 #include <sys/mman.h>
23 #endif
24 
25 static const int DEBUG = 0;
26 
27 #define RB_PAGE_SIZE (pagesize)
28 #define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
29 static long pagesize;
30 
31 static const rb_data_type_t cont_data_type, fiber_data_type;
32 static VALUE rb_cContinuation;
33 static VALUE rb_cFiber;
34 static VALUE rb_eFiberError;
35 #ifdef RB_EXPERIMENTAL_FIBER_POOL
36 static VALUE rb_cFiberPool;
37 #endif
38 
39 #define CAPTURE_JUST_VALID_VM_STACK 1
40 
41 // Defined in `coroutine/$arch/Context.h`:
42 #ifdef COROUTINE_LIMITED_ADDRESS_SPACE
43 #define FIBER_POOL_ALLOCATION_FREE
44 #define FIBER_POOL_INITIAL_SIZE 8
45 #define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 32
46 #else
47 #define FIBER_POOL_INITIAL_SIZE 32
48 #define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 1024
49 #endif
50 
53  FIBER_CONTEXT = 1
54 };
55 
58 #ifdef CAPTURE_JUST_VALID_VM_STACK
59  size_t slen; /* length of stack (head of ec->vm_stack) */
60  size_t clen; /* length of control frames (tail of ec->vm_stack) */
61 #endif
62 };
63 
64 struct fiber_pool;
65 
66 // Represents a single stack.
68  // A pointer to the memory allocation (lowest address) for the stack.
69  void * base;
70 
71  // The current stack pointer, taking into account the direction of the stack.
72  void * current;
73 
74  // The size of the stack excluding any guard pages.
75  size_t size;
76 
77  // The available stack capacity w.r.t. the current stack offset.
78  size_t available;
79 
80  // The pool this stack should be allocated from.
81  struct fiber_pool * pool;
82 
83  // If the stack is allocated, the allocation it came from.
85 };
86 
87 // A linked list of vacant (unused) stacks.
88 // This structure is stored in the first page of a stack if it is not in use.
89 // @sa fiber_pool_vacancy_pointer
91  // Details about the vacant stack:
92  struct fiber_pool_stack stack;
93 
94  // The vacancy linked list.
95 #ifdef FIBER_POOL_ALLOCATION_FREE
96  struct fiber_pool_vacancy * previous;
97 #endif
99 };
100 
101 // Manages singly linked list of mapped regions of memory which contains 1 more more stack:
102 //
103 // base = +-------------------------------+-----------------------+ +
104 // |VM Stack |VM Stack | | |
105 // | | | | |
106 // | | | | |
107 // +-------------------------------+ | |
108 // |Machine Stack |Machine Stack | | |
109 // | | | | |
110 // | | | | |
111 // | | | . . . . | | size
112 // | | | | |
113 // | | | | |
114 // | | | | |
115 // | | | | |
116 // | | | | |
117 // +-------------------------------+ | |
118 // |Guard Page |Guard Page | | |
119 // +-------------------------------+-----------------------+ v
120 //
121 // +------------------------------------------------------->
122 //
123 // count
124 //
126  // A pointer to the memory mapped region.
127  void * base;
128 
129  // The size of the individual stacks.
130  size_t size;
131 
132  // The stride of individual stacks (including any guard pages or other accounting details).
133  size_t stride;
134 
135  // The number of stacks that were allocated.
136  size_t count;
137 
138 #ifdef FIBER_POOL_ALLOCATION_FREE
139  // The number of stacks used in this allocation.
140  size_t used;
141 #endif
142 
143  struct fiber_pool * pool;
144 
145  // The allocation linked list.
146 #ifdef FIBER_POOL_ALLOCATION_FREE
147  struct fiber_pool_allocation * previous;
148 #endif
150 };
151 
152 // A fiber pool manages vacant stacks to reduce the overhead of creating fibers.
153 struct fiber_pool {
154  // A singly-linked list of allocations which contain 1 or more stacks each.
156 
157  // Provides O(1) stack "allocation":
159 
160  // The size of the stack allocations (excluding any guard page).
161  size_t size;
162 
163  // The total number of stacks that have been allocated in this pool.
164  size_t count;
165 
166  // The initial number of stacks to allocate.
168 
169  // Whether to madvise(free) the stack or not:
171 
172  // The number of stacks that have been used in this pool.
173  size_t used;
174 
175  // The amount to allocate for the vm_stack:
177 };
178 
179 typedef struct rb_context_struct {
180  enum context_type type;
181  int argc;
182  int kw_splat;
183  VALUE self;
185 
187 
188  struct {
191  size_t stack_size;
196  /* Pointer to MJIT info about the continuation. */
199 
200 
201 /*
202  * Fiber status:
203  * [Fiber.new] ------> FIBER_CREATED
204  * | [Fiber#resume]
205  * v
206  * +--> FIBER_RESUMED ----+
207  * [Fiber#resume] | | [Fiber.yield] |
208  * | v |
209  * +-- FIBER_SUSPENDED | [Terminate]
210  * |
211  * FIBER_TERMINATED <-+
212  */
218 };
219 
220 #define FIBER_CREATED_P(fiber) ((fiber)->status == FIBER_CREATED)
221 #define FIBER_RESUMED_P(fiber) ((fiber)->status == FIBER_RESUMED)
222 #define FIBER_SUSPENDED_P(fiber) ((fiber)->status == FIBER_SUSPENDED)
223 #define FIBER_TERMINATED_P(fiber) ((fiber)->status == FIBER_TERMINATED)
224 #define FIBER_RUNNABLE_P(fiber) (FIBER_CREATED_P(fiber) || FIBER_SUSPENDED_P(fiber))
225 
230  BITFIELD(enum fiber_status, status, 2);
231  /* If a fiber invokes by "transfer",
232  * then this fiber can't be invoked by "resume" any more after that.
233  * You shouldn't mix "transfer" and "resume".
234  */
235  unsigned int transferred : 1;
236 
237  struct coroutine_context context;
238  struct fiber_pool_stack stack;
239 };
240 
241 static struct fiber_pool shared_fiber_pool = {NULL, NULL, 0, 0, 0, 0};
242 
243 /*
244  * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
245  * if MAP_STACK is passed.
246  * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
247  */
248 #if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
249 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
250 #else
251 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
252 #endif
253 
254 #define ERRNOMSG strerror(errno)
255 
256 // Locates the stack vacancy details for the given stack.
257 // Requires that fiber_pool_vacancy fits within one page.
258 inline static struct fiber_pool_vacancy *
259 fiber_pool_vacancy_pointer(void * base, size_t size)
260 {
262 
263  return (struct fiber_pool_vacancy *)(
264  (char*)base + STACK_DIR_UPPER(0, size - RB_PAGE_SIZE)
265  );
266 }
267 
268 // Reset the current stack pointer and available size of the given stack.
269 inline static void
270 fiber_pool_stack_reset(struct fiber_pool_stack * stack)
271 {
273 
274  stack->current = (char*)stack->base + STACK_DIR_UPPER(0, stack->size);
276 }
277 
278 // A pointer to the base of the current unused portion of the stack.
279 inline static void *
280 fiber_pool_stack_base(struct fiber_pool_stack * stack)
281 {
283 
285 
287 }
288 
289 // Allocate some memory from the stack. Used to allocate vm_stack inline with machine stack.
290 // @sa fiber_initialize_coroutine
291 inline static void *
292 fiber_pool_stack_alloca(struct fiber_pool_stack * stack, size_t offset)
293 {
295 
296  if (DEBUG) fprintf(stderr, "fiber_pool_stack_alloca(%p): %"PRIuSIZE"/%"PRIuSIZE"\n", (void*)stack, offset, stack->available);
297  VM_ASSERT(stack->available >= offset);
298 
299  // The pointer to the memory being allocated:
300  void * pointer = STACK_DIR_UPPER(stack->current, (char*)stack->current - offset);
301 
302  // Move the stack pointer:
303  stack->current = STACK_DIR_UPPER((char*)stack->current + offset, (char*)stack->current - offset);
304  stack->available -= offset;
305 
306  return pointer;
307 }
308 
309 // Reset the current stack pointer and available size of the given stack.
310 inline static void
311 fiber_pool_vacancy_reset(struct fiber_pool_vacancy * vacancy)
312 {
313  fiber_pool_stack_reset(&vacancy->stack);
314 
315  // Consume one page of the stack because it's used for the vacancy list:
316  fiber_pool_stack_alloca(&vacancy->stack, RB_PAGE_SIZE);
317 }
318 
319 inline static struct fiber_pool_vacancy *
320 fiber_pool_vacancy_push(struct fiber_pool_vacancy * vacancy, struct fiber_pool_vacancy * head)
321 {
322  vacancy->next = head;
323 
324 #ifdef FIBER_POOL_ALLOCATION_FREE
325  if (head) {
326  head->previous = vacancy;
327  }
328 #endif
329 
330  return vacancy;
331 }
332 
333 #ifdef FIBER_POOL_ALLOCATION_FREE
334 static void
335 fiber_pool_vacancy_remove(struct fiber_pool_vacancy * vacancy)
336 {
337  if (vacancy->next) {
338  vacancy->next->previous = vacancy->previous;
339  }
340 
341  if (vacancy->previous) {
342  vacancy->previous->next = vacancy->next;
343  }
344  else {
345  // It's the head of the list:
346  vacancy->stack.pool->vacancies = vacancy->next;
347  }
348 }
349 
350 inline static struct fiber_pool_vacancy *
351 fiber_pool_vacancy_pop(struct fiber_pool * pool)
352 {
353  struct fiber_pool_vacancy * vacancy = pool->vacancies;
354 
355  if (vacancy) {
356  fiber_pool_vacancy_remove(vacancy);
357  }
358 
359  return vacancy;
360 }
361 #else
362 inline static struct fiber_pool_vacancy *
363 fiber_pool_vacancy_pop(struct fiber_pool * pool)
364 {
365  struct fiber_pool_vacancy * vacancy = pool->vacancies;
366 
367  if (vacancy) {
368  pool->vacancies = vacancy->next;
369  }
370 
371  return vacancy;
372 }
373 #endif
374 
375 // Initialize the vacant stack. The [base, size] allocation should not include the guard page.
376 // @param base The pointer to the lowest address of the allocated memory.
377 // @param size The size of the allocated memory.
378 inline static struct fiber_pool_vacancy *
379 fiber_pool_vacancy_initialize(struct fiber_pool * fiber_pool, struct fiber_pool_vacancy * vacancies, void * base, size_t size)
380 {
381  struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, size);
382 
383  vacancy->stack.base = base;
384  vacancy->stack.size = size;
385 
386  fiber_pool_vacancy_reset(vacancy);
387 
388  vacancy->stack.pool = fiber_pool;
389 
390  return fiber_pool_vacancy_push(vacancy, vacancies);
391 }
392 
393 // Allocate a maximum of count stacks, size given by stride.
394 // @param count the number of stacks to allocate / were allocated.
395 // @param stride the size of the individual stacks.
396 // @return [void *] the allocated memory or NULL if allocation failed.
397 inline static void *
398 fiber_pool_allocate_memory(size_t * count, size_t stride)
399 {
400  // We use a divide-by-2 strategy to try and allocate memory. We are trying
401  // to allocate `count` stacks. In normal situation, this won't fail. But
402  // if we ran out of address space, or we are allocating more memory than
403  // the system would allow (e.g. overcommit * physical memory + swap), we
404  // divide count by two and try again. This condition should only be
405  // encountered in edge cases, but we handle it here gracefully.
406  while (*count > 1) {
407 #if defined(_WIN32)
408  void * base = VirtualAlloc(0, (*count)*stride, MEM_COMMIT, PAGE_READWRITE);
409 
410  if (!base) {
411  *count = (*count) >> 1;
412  }
413  else {
414  return base;
415  }
416 #else
417  errno = 0;
418  void * base = mmap(NULL, (*count)*stride, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
419 
420  if (base == MAP_FAILED) {
421  // If the allocation fails, count = count / 2, and try again.
422  *count = (*count) >> 1;
423  }
424  else {
425  return base;
426  }
427 #endif
428  }
429 
430  return NULL;
431 }
432 
433 // Given an existing fiber pool, expand it by the specified number of stacks.
434 // @param count the maximum number of stacks to allocate.
435 // @return the allocated fiber pool.
436 // @sa fiber_pool_allocation_free
437 static struct fiber_pool_allocation *
438 fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count)
439 {
441 
442  size_t size = fiber_pool->size;
443  size_t stride = size + RB_PAGE_SIZE;
444 
445  // Allocate the memory required for the stacks:
446  void * base = fiber_pool_allocate_memory(&count, stride);
447 
448  if (base == NULL) {
449  rb_raise(rb_eFiberError, "can't alloc machine stack to fiber (%"PRIuSIZE" x %"PRIuSIZE" bytes): %s", count, size, ERRNOMSG);
450  }
451 
452  struct fiber_pool_vacancy * vacancies = fiber_pool->vacancies;
453  struct fiber_pool_allocation * allocation = RB_ALLOC(struct fiber_pool_allocation);
454 
455  // Initialize fiber pool allocation:
456  allocation->base = base;
457  allocation->size = size;
458  allocation->stride = stride;
459  allocation->count = count;
460 #ifdef FIBER_POOL_ALLOCATION_FREE
461  allocation->used = 0;
462 #endif
463  allocation->pool = fiber_pool;
464 
465  if (DEBUG) {
466  fprintf(stderr, "fiber_pool_expand(%"PRIuSIZE"): %p, %"PRIuSIZE"/%"PRIuSIZE" x [%"PRIuSIZE":%"PRIuSIZE"]\n",
468  }
469 
470  // Iterate over all stacks, initializing the vacancy list:
471  for (size_t i = 0; i < count; i += 1) {
472  void * base = (char*)allocation->base + (stride * i);
473  void * page = (char*)base + STACK_DIR_UPPER(size, 0);
474 
475 #if defined(_WIN32)
476  DWORD old_protect;
477 
478  if (!VirtualProtect(page, RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
479  VirtualFree(allocation->base, 0, MEM_RELEASE);
480  rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
481  }
482 #else
483  if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
484  munmap(allocation->base, count*stride);
485  rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
486  }
487 #endif
488 
489  vacancies = fiber_pool_vacancy_initialize(
490  fiber_pool, vacancies,
491  (char*)base + STACK_DIR_UPPER(0, RB_PAGE_SIZE),
492  size
493  );
494 
495 #ifdef FIBER_POOL_ALLOCATION_FREE
496  vacancies->stack.allocation = allocation;
497 #endif
498  }
499 
500  // Insert the allocation into the head of the pool:
501  allocation->next = fiber_pool->allocations;
502 
503 #ifdef FIBER_POOL_ALLOCATION_FREE
504  if (allocation->next) {
505  allocation->next->previous = allocation;
506  }
507 
508  allocation->previous = NULL;
509 #endif
510 
511  fiber_pool->allocations = allocation;
512  fiber_pool->vacancies = vacancies;
513  fiber_pool->count += count;
514 
515  return allocation;
516 }
517 
518 // Initialize the specified fiber pool with the given number of stacks.
519 // @param vm_stack_size The size of the vm stack to allocate.
520 static void
521 fiber_pool_initialize(struct fiber_pool * fiber_pool, size_t size, size_t count, size_t vm_stack_size)
522 {
523  VM_ASSERT(vm_stack_size < size);
524 
528  fiber_pool->count = 0;
530  fiber_pool->free_stacks = 1;
531  fiber_pool->used = 0;
532 
533  fiber_pool->vm_stack_size = vm_stack_size;
534 
535  fiber_pool_expand(fiber_pool, count);
536 }
537 
538 #ifdef FIBER_POOL_ALLOCATION_FREE
539 // Free the list of fiber pool allocations.
540 static void
541 fiber_pool_allocation_free(struct fiber_pool_allocation * allocation)
542 {
544 
545  VM_ASSERT(allocation->used == 0);
546 
547  if (DEBUG) fprintf(stderr, "fiber_pool_allocation_free: %p base=%p count=%"PRIuSIZE"\n", allocation, allocation->base, allocation->count);
548 
549  size_t i;
550  for (i = 0; i < allocation->count; i += 1) {
551  void * base = (char*)allocation->base + (allocation->stride * i) + STACK_DIR_UPPER(0, RB_PAGE_SIZE);
552 
553  struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, allocation->size);
554 
555  // Pop the vacant stack off the free list:
556  fiber_pool_vacancy_remove(vacancy);
557  }
558 
559 #ifdef _WIN32
560  VirtualFree(allocation->base, 0, MEM_RELEASE);
561 #else
562  munmap(allocation->base, allocation->stride * allocation->count);
563 #endif
564 
565  if (allocation->previous) {
566  allocation->previous->next = allocation->next;
567  }
568  else {
569  // We are the head of the list, so update the pool:
570  allocation->pool->allocations = allocation->next;
571  }
572 
573  if (allocation->next) {
574  allocation->next->previous = allocation->previous;
575  }
576 
577  allocation->pool->count -= allocation->count;
578 
579  ruby_xfree(allocation);
580 }
581 #endif
582 
583 // Acquire a stack from the given fiber pool. If none are available, allocate more.
584 static struct fiber_pool_stack
585 fiber_pool_stack_acquire(struct fiber_pool * fiber_pool) {
586  struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pop(fiber_pool);
587 
588  if (DEBUG) fprintf(stderr, "fiber_pool_stack_acquire: %p used=%"PRIuSIZE"\n", (void*)fiber_pool->vacancies, fiber_pool->used);
589 
590  if (!vacancy) {
591  const size_t maximum = FIBER_POOL_ALLOCATION_MAXIMUM_SIZE;
592  const size_t minimum = fiber_pool->initial_count;
593 
594  size_t count = fiber_pool->count;
595  if (count > maximum) count = maximum;
596  if (count < minimum) count = minimum;
597 
598  fiber_pool_expand(fiber_pool, count);
599 
600  // The free list should now contain some stacks:
602 
603  vacancy = fiber_pool_vacancy_pop(fiber_pool);
604  }
605 
606  VM_ASSERT(vacancy);
607  VM_ASSERT(vacancy->stack.base);
608 
609  // Take the top item from the free list:
610  fiber_pool->used += 1;
611 
612 #ifdef FIBER_POOL_ALLOCATION_FREE
613  vacancy->stack.allocation->used += 1;
614 #endif
615 
616  fiber_pool_stack_reset(&vacancy->stack);
617 
618  return vacancy->stack;
619 }
620 
621 // We advise the operating system that the stack memory pages are no longer being used.
622 // This introduce some performance overhead but allows system to relaim memory when there is pressure.
623 static inline void
624 fiber_pool_stack_free(struct fiber_pool_stack * stack)
625 {
626  void * base = fiber_pool_stack_base(stack);
627  size_t size = stack->available;
628 
629  // If this is not true, the vacancy information will almost certainly be destroyed:
631 
632  if (DEBUG) fprintf(stderr, "fiber_pool_stack_free: %p+%"PRIuSIZE" [base=%p, size=%"PRIuSIZE"]\n", base, size, stack->base, stack->size);
633 
634 #if VM_CHECK_MODE > 0 && defined(MADV_DONTNEED)
635  // This immediately discards the pages and the memory is reset to zero.
636  madvise(base, size, MADV_DONTNEED);
637 #elif defined(MADV_FREE_REUSABLE)
638  madvise(base, size, MADV_FREE_REUSABLE);
639 #elif defined(MADV_FREE)
640  madvise(base, size, MADV_FREE);
641 #elif defined(MADV_DONTNEED)
642  madvise(base, size, MADV_DONTNEED);
643 #elif defined(_WIN32)
644  VirtualAlloc(base, size, MEM_RESET, PAGE_READWRITE);
645  // Not available in all versions of Windows.
646  //DiscardVirtualMemory(base, size);
647 #endif
648 }
649 
650 // Release and return a stack to the vacancy list.
651 static void
652 fiber_pool_stack_release(struct fiber_pool_stack * stack)
653 {
654  struct fiber_pool * pool = stack->pool;
655  struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(stack->base, stack->size);
656 
657  if (DEBUG) fprintf(stderr, "fiber_pool_stack_release: %p used=%"PRIuSIZE"\n", stack->base, stack->pool->used);
658 
659  // Copy the stack details into the vacancy area:
660  vacancy->stack = *stack;
661  // After this point, be careful about updating/using state in stack, since it's copied to the vacancy area.
662 
663  // Reset the stack pointers and reserve space for the vacancy data:
664  fiber_pool_vacancy_reset(vacancy);
665 
666  // Push the vacancy into the vancancies list:
667  pool->vacancies = fiber_pool_vacancy_push(vacancy, stack->pool->vacancies);
668  pool->used -= 1;
669 
670 #ifdef FIBER_POOL_ALLOCATION_FREE
671  struct fiber_pool_allocation * allocation = stack->allocation;
672 
673  allocation->used -= 1;
674 
675  // Release address space and/or dirty memory:
676  if (allocation->used == 0) {
677  fiber_pool_allocation_free(allocation);
678  }
679  else if (stack->pool->free_stacks) {
680  fiber_pool_stack_free(&vacancy->stack);
681  }
682 #else
683  // This is entirely optional, but clears the dirty flag from the stack memory, so it won't get swapped to disk when there is memory pressure:
684  if (stack->pool->free_stacks) {
685  fiber_pool_stack_free(&vacancy->stack);
686  }
687 #endif
688 }
689 
690 static COROUTINE
691 fiber_entry(struct coroutine_context * from, struct coroutine_context * to)
692 {
693  rb_fiber_start();
694 }
695 
696 // Initialize a fiber's coroutine's machine stack and vm stack.
697 static VALUE *
698 fiber_initialize_coroutine(rb_fiber_t *fiber, size_t * vm_stack_size)
699 {
700  struct fiber_pool * fiber_pool = fiber->stack.pool;
701  rb_execution_context_t *sec = &fiber->cont.saved_ec;
702  void * vm_stack = NULL;
703 
705 
706  fiber->stack = fiber_pool_stack_acquire(fiber_pool);
707  vm_stack = fiber_pool_stack_alloca(&fiber->stack, fiber_pool->vm_stack_size);
709 
710 #ifdef COROUTINE_PRIVATE_STACK
711  coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available, sec->machine.stack_start);
712  // The stack for this execution context is still the main machine stack, so don't adjust it.
713  // If this is not managed correctly, you will fail in `rb_ec_stack_check`.
714 
715  // We limit the machine stack usage to the fiber stack size.
716  if (sec->machine.stack_maxsize > fiber->stack.available) {
717  sec->machine.stack_maxsize = fiber->stack.available;
718  }
719 #else
720  coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available);
721 
722  // The stack for this execution context is the one we allocated:
723  sec->machine.stack_start = fiber->stack.current;
724  sec->machine.stack_maxsize = fiber->stack.available;
725 #endif
726 
727  return vm_stack;
728 }
729 
730 // Release the stack from the fiber, it's execution context, and return it to the fiber pool.
731 static void
732 fiber_stack_release(rb_fiber_t * fiber)
733 {
734  rb_execution_context_t *ec = &fiber->cont.saved_ec;
735 
736  if (DEBUG) fprintf(stderr, "fiber_stack_release: %p, stack.base=%p\n", (void*)fiber, fiber->stack.base);
737 
738  // Return the stack back to the fiber pool if it wasn't already:
739  if (fiber->stack.base) {
740  fiber_pool_stack_release(&fiber->stack);
741  fiber->stack.base = NULL;
742  }
743 
744  // The stack is no longer associated with this execution context:
746 }
747 
748 static const char *
749 fiber_status_name(enum fiber_status s)
750 {
751  switch (s) {
752  case FIBER_CREATED: return "created";
753  case FIBER_RESUMED: return "resumed";
754  case FIBER_SUSPENDED: return "suspended";
755  case FIBER_TERMINATED: return "terminated";
756  }
757  VM_UNREACHABLE(fiber_status_name);
758  return NULL;
759 }
760 
761 static void
762 fiber_verify(const rb_fiber_t *fiber)
763 {
764 #if VM_CHECK_MODE > 0
765  VM_ASSERT(fiber->cont.saved_ec.fiber_ptr == fiber);
766 
767  switch (fiber->status) {
768  case FIBER_RESUMED:
769  VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
770  break;
771  case FIBER_SUSPENDED:
772  VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
773  break;
774  case FIBER_CREATED:
775  case FIBER_TERMINATED:
776  /* TODO */
777  break;
778  default:
779  VM_UNREACHABLE(fiber_verify);
780  }
781 #endif
782 }
783 
784 inline static void
785 fiber_status_set(rb_fiber_t *fiber, enum fiber_status s)
786 {
787  // if (DEBUG) fprintf(stderr, "fiber: %p, status: %s -> %s\n", (void *)fiber, fiber_status_name(fiber->status), fiber_status_name(s));
788  VM_ASSERT(!FIBER_TERMINATED_P(fiber));
789  VM_ASSERT(fiber->status != s);
790  fiber_verify(fiber);
791  fiber->status = s;
792 }
793 
794 static inline void
795 ec_switch(rb_thread_t *th, rb_fiber_t *fiber)
796 {
797  rb_execution_context_t *ec = &fiber->cont.saved_ec;
798 
800 
801  /*
802  * timer-thread may set trap interrupt on previous th->ec at any time;
803  * ensure we do not delay (or lose) the trap interrupt handling.
804  */
805  if (th->vm->main_thread == th && rb_signal_buff_size() > 0) {
807  }
808 
809  VM_ASSERT(ec->fiber_ptr->cont.self == 0 || ec->vm_stack != NULL);
810 }
811 
812 static rb_context_t *
813 cont_ptr(VALUE obj)
814 {
815  rb_context_t *cont;
816 
817  TypedData_Get_Struct(obj, rb_context_t, &cont_data_type, cont);
818 
819  return cont;
820 }
821 
822 static rb_fiber_t *
823 fiber_ptr(VALUE obj)
824 {
825  rb_fiber_t *fiber;
826 
827  TypedData_Get_Struct(obj, rb_fiber_t, &fiber_data_type, fiber);
828  if (!fiber) rb_raise(rb_eFiberError, "uninitialized fiber");
829 
830  return fiber;
831 }
832 
833 NOINLINE(static VALUE cont_capture(volatile int *volatile stat));
834 
835 #define THREAD_MUST_BE_RUNNING(th) do { \
836  if (!(th)->ec->tag) rb_raise(rb_eThreadError, "not running thread"); \
837  } while (0)
838 
839 static VALUE
840 cont_thread_value(const rb_context_t *cont)
841 {
842  return cont->saved_ec.thread_ptr->self;
843 }
844 
845 static void
846 cont_compact(void *ptr)
847 {
848  rb_context_t *cont = ptr;
849 
850  if (cont->self) {
851  cont->self = rb_gc_location(cont->self);
852  }
853  cont->value = rb_gc_location(cont->value);
855 }
856 
857 static void
858 cont_mark(void *ptr)
859 {
860  rb_context_t *cont = ptr;
861 
862  RUBY_MARK_ENTER("cont");
863  if (cont->self) {
864  rb_gc_mark_movable(cont->self);
865  }
866  rb_gc_mark_movable(cont->value);
867 
869  rb_gc_mark(cont_thread_value(cont));
870 
871  if (cont->saved_vm_stack.ptr) {
872 #ifdef CAPTURE_JUST_VALID_VM_STACK
874  cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
875 #else
877  cont->saved_vm_stack.ptr, cont->saved_ec.stack_size);
878 #endif
879  }
880 
881  if (cont->machine.stack) {
882  if (cont->type == CONTINUATION_CONTEXT) {
883  /* cont */
885  cont->machine.stack + cont->machine.stack_size);
886  }
887  else {
888  /* fiber */
889  const rb_fiber_t *fiber = (rb_fiber_t*)cont;
890 
891  if (!FIBER_TERMINATED_P(fiber)) {
893  cont->machine.stack + cont->machine.stack_size);
894  }
895  }
896  }
897 
898  RUBY_MARK_LEAVE("cont");
899 }
900 
901 static int
902 fiber_is_root_p(const rb_fiber_t *fiber)
903 {
904  return fiber == fiber->cont.saved_ec.thread_ptr->root_fiber;
905 }
906 
907 static void
908 cont_free(void *ptr)
909 {
910  rb_context_t *cont = ptr;
911 
912  RUBY_FREE_ENTER("cont");
913 
914  if (cont->type == CONTINUATION_CONTEXT) {
916  ruby_xfree(cont->ensure_array);
918  }
919  else {
920  rb_fiber_t *fiber = (rb_fiber_t*)cont;
921  coroutine_destroy(&fiber->context);
922  if (!fiber_is_root_p(fiber)) {
923  fiber_stack_release(fiber);
924  }
925  }
926 
928 
929  if (mjit_enabled && cont->mjit_cont != NULL) {
930  mjit_cont_free(cont->mjit_cont);
931  }
932  /* free rb_cont_t or rb_fiber_t */
933  ruby_xfree(ptr);
934  RUBY_FREE_LEAVE("cont");
935 }
936 
937 static size_t
938 cont_memsize(const void *ptr)
939 {
940  const rb_context_t *cont = ptr;
941  size_t size = 0;
942 
943  size = sizeof(*cont);
944  if (cont->saved_vm_stack.ptr) {
945 #ifdef CAPTURE_JUST_VALID_VM_STACK
946  size_t n = (cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
947 #else
948  size_t n = cont->saved_ec.vm_stack_size;
949 #endif
950  size += n * sizeof(*cont->saved_vm_stack.ptr);
951  }
952 
953  if (cont->machine.stack) {
954  size += cont->machine.stack_size * sizeof(*cont->machine.stack);
955  }
956 
957  return size;
958 }
959 
960 void
962 {
963  if (fiber->cont.self) {
964  fiber->cont.self = rb_gc_location(fiber->cont.self);
965  }
966  else {
968  }
969 }
970 
971 void
973 {
974  if (fiber->cont.self) {
975  rb_gc_mark_movable(fiber->cont.self);
976  }
977  else {
979  }
980 }
981 
982 static void
983 fiber_compact(void *ptr)
984 {
985  rb_fiber_t *fiber = ptr;
986  fiber->first_proc = rb_gc_location(fiber->first_proc);
987 
988  if (fiber->prev) rb_fiber_update_self(fiber->prev);
989 
990  cont_compact(&fiber->cont);
991  fiber_verify(fiber);
992 }
993 
994 static void
995 fiber_mark(void *ptr)
996 {
997  rb_fiber_t *fiber = ptr;
998  RUBY_MARK_ENTER("cont");
999  fiber_verify(fiber);
1001  if (fiber->prev) rb_fiber_mark_self(fiber->prev);
1002  cont_mark(&fiber->cont);
1003  RUBY_MARK_LEAVE("cont");
1004 }
1005 
1006 static void
1007 fiber_free(void *ptr)
1008 {
1009  rb_fiber_t *fiber = ptr;
1010  RUBY_FREE_ENTER("fiber");
1011 
1012  //if (DEBUG) fprintf(stderr, "fiber_free: %p[%p]\n", fiber, fiber->stack.base);
1013 
1014  if (fiber->cont.saved_ec.local_storage) {
1016  }
1017 
1018  cont_free(&fiber->cont);
1019  RUBY_FREE_LEAVE("fiber");
1020 }
1021 
1022 static size_t
1023 fiber_memsize(const void *ptr)
1024 {
1025  const rb_fiber_t *fiber = ptr;
1026  size_t size = sizeof(*fiber);
1027  const rb_execution_context_t *saved_ec = &fiber->cont.saved_ec;
1028  const rb_thread_t *th = rb_ec_thread_ptr(saved_ec);
1029 
1030  /*
1031  * vm.c::thread_memsize already counts th->ec->local_storage
1032  */
1033  if (saved_ec->local_storage && fiber != th->root_fiber) {
1034  size += st_memsize(saved_ec->local_storage);
1035  }
1036  size += cont_memsize(&fiber->cont);
1037  return size;
1038 }
1039 
1040 VALUE
1042 {
1043  if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
1044  return Qtrue;
1045  }
1046  else {
1047  return Qfalse;
1048  }
1049 }
1050 
1051 static void
1052 cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
1053 {
1054  size_t size;
1055 
1057 
1058  if (th->ec->machine.stack_start > th->ec->machine.stack_end) {
1060  cont->machine.stack_src = th->ec->machine.stack_end;
1061  }
1062  else {
1064  cont->machine.stack_src = th->ec->machine.stack_start;
1065  }
1066 
1067  if (cont->machine.stack) {
1068  REALLOC_N(cont->machine.stack, VALUE, size);
1069  }
1070  else {
1071  cont->machine.stack = ALLOC_N(VALUE, size);
1072  }
1073 
1075  MEMCPY(cont->machine.stack, cont->machine.stack_src, VALUE, size);
1076 }
1077 
1078 static const rb_data_type_t cont_data_type = {
1079  "continuation",
1080  {cont_mark, cont_free, cont_memsize, cont_compact},
1082 };
1083 
1084 static inline void
1085 cont_save_thread(rb_context_t *cont, rb_thread_t *th)
1086 {
1087  rb_execution_context_t *sec = &cont->saved_ec;
1088 
1090 
1091  /* save thread context */
1092  *sec = *th->ec;
1093 
1094  /* saved_ec->machine.stack_end should be NULL */
1095  /* because it may happen GC afterward */
1096  sec->machine.stack_end = NULL;
1097 }
1098 
1099 static void
1100 cont_init_mjit_cont(rb_context_t *cont)
1101 {
1102  VM_ASSERT(cont->mjit_cont == NULL);
1103  if (mjit_enabled) {
1104  cont->mjit_cont = mjit_cont_new(&(cont->saved_ec));
1105  }
1106 }
1107 
1108 static void
1109 cont_init(rb_context_t *cont, rb_thread_t *th)
1110 {
1111  /* save thread context */
1112  cont_save_thread(cont, th);
1113  cont->saved_ec.thread_ptr = th;
1114  cont->saved_ec.local_storage = NULL;
1117  cont_init_mjit_cont(cont);
1118 }
1119 
1120 static rb_context_t *
1121 cont_new(VALUE klass)
1122 {
1123  rb_context_t *cont;
1124  volatile VALUE contval;
1125  rb_thread_t *th = GET_THREAD();
1126 
1128  contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
1129  cont->self = contval;
1130  cont_init(cont, th);
1131  return cont;
1132 }
1133 
1134 void
1136 {
1137  // Currently this function is meant for root_fiber. Others go through cont_new.
1138  // XXX: Is this mjit_cont `mjit_cont_free`d?
1139  cont_init_mjit_cont(&fiber->cont);
1140 }
1141 
1142 #if 0
1143 void
1144 show_vm_stack(const rb_execution_context_t *ec)
1145 {
1146  VALUE *p = ec->vm_stack;
1147  while (p < ec->cfp->sp) {
1148  fprintf(stderr, "%3d ", (int)(p - ec->vm_stack));
1149  rb_obj_info_dump(*p);
1150  p++;
1151  }
1152 }
1153 
1154 void
1155 show_vm_pcs(const rb_control_frame_t *cfp,
1156  const rb_control_frame_t *end_of_cfp)
1157 {
1158  int i=0;
1159  while (cfp != end_of_cfp) {
1160  int pc = 0;
1161  if (cfp->iseq) {
1162  pc = cfp->pc - cfp->iseq->body->iseq_encoded;
1163  }
1164  fprintf(stderr, "%2d pc: %d\n", i++, pc);
1166  }
1167 }
1168 #endif
1170 #ifdef __clang__
1171 COMPILER_WARNING_IGNORED(-Wduplicate-decl-specifier)
1172 #endif
1173 static VALUE
1174 cont_capture(volatile int *volatile stat)
1175 {
1176  rb_context_t *volatile cont;
1177  rb_thread_t *th = GET_THREAD();
1178  volatile VALUE contval;
1179  const rb_execution_context_t *ec = th->ec;
1180 
1182  rb_vm_stack_to_heap(th->ec);
1183  cont = cont_new(rb_cContinuation);
1184  contval = cont->self;
1185 
1186 #ifdef CAPTURE_JUST_VALID_VM_STACK
1187  cont->saved_vm_stack.slen = ec->cfp->sp - ec->vm_stack;
1188  cont->saved_vm_stack.clen = ec->vm_stack + ec->vm_stack_size - (VALUE*)ec->cfp;
1190  MEMCPY(cont->saved_vm_stack.ptr,
1191  ec->vm_stack,
1192  VALUE, cont->saved_vm_stack.slen);
1194  (VALUE*)ec->cfp,
1195  VALUE,
1196  cont->saved_vm_stack.clen);
1197 #else
1200 #endif
1201  // At this point, `cfp` is valid but `vm_stack` should be cleared:
1202  rb_ec_set_vm_stack(&cont->saved_ec, NULL, 0);
1203  VM_ASSERT(cont->saved_ec.cfp != NULL);
1204  cont_save_machine_stack(th, cont);
1205 
1206  /* backup ensure_list to array for search in another context */
1207  {
1208  rb_ensure_list_t *p;
1209  int size = 0;
1210  rb_ensure_entry_t *entry;
1211  for (p=th->ec->ensure_list; p; p=p->next)
1212  size++;
1213  entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1);
1214  for (p=th->ec->ensure_list; p; p=p->next) {
1215  if (!p->entry.marker)
1216  p->entry.marker = rb_ary_tmp_new(0); /* dummy object */
1217  *entry++ = p->entry;
1218  }
1219  entry->marker = 0;
1220  }
1221 
1222  if (ruby_setjmp(cont->jmpbuf)) {
1223  VALUE value;
1224 
1225  VAR_INITIALIZED(cont);
1226  value = cont->value;
1227  if (cont->argc == -1) rb_exc_raise(value);
1228  cont->value = Qnil;
1229  *stat = 1;
1230  return value;
1231  }
1232  else {
1233  *stat = 0;
1234  return contval;
1235  }
1236 }
1238 
1239 static inline void
1240 fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fiber)
1241 {
1242  ec_switch(th, fiber);
1243  VM_ASSERT(th->ec->fiber_ptr == fiber);
1244 }
1245 
1246 static inline void
1247 cont_restore_thread(rb_context_t *cont)
1248 {
1249  rb_thread_t *th = GET_THREAD();
1250 
1251  /* restore thread context */
1252  if (cont->type == CONTINUATION_CONTEXT) {
1253  /* continuation */
1254  rb_execution_context_t *sec = &cont->saved_ec;
1255  rb_fiber_t *fiber = NULL;
1256 
1257  if (sec->fiber_ptr != NULL) {
1258  fiber = sec->fiber_ptr;
1259  }
1260  else if (th->root_fiber) {
1261  fiber = th->root_fiber;
1262  }
1263 
1264  if (fiber && th->ec != &fiber->cont.saved_ec) {
1265  ec_switch(th, fiber);
1266  }
1267 
1268  if (th->ec->trace_arg != sec->trace_arg) {
1269  rb_raise(rb_eRuntimeError, "can't call across trace_func");
1270  }
1271 
1272  /* copy vm stack */
1273 #ifdef CAPTURE_JUST_VALID_VM_STACK
1274  MEMCPY(th->ec->vm_stack,
1275  cont->saved_vm_stack.ptr,
1276  VALUE, cont->saved_vm_stack.slen);
1277  MEMCPY(th->ec->vm_stack + th->ec->vm_stack_size - cont->saved_vm_stack.clen,
1278  cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
1279  VALUE, cont->saved_vm_stack.clen);
1280 #else
1281  MEMCPY(th->ec->vm_stack, cont->saved_vm_stack.ptr, VALUE, sec->vm_stack_size);
1282 #endif
1283  /* other members of ec */
1284 
1285  th->ec->cfp = sec->cfp;
1286  th->ec->raised_flag = sec->raised_flag;
1287  th->ec->tag = sec->tag;
1288  th->ec->protect_tag = sec->protect_tag;
1289  th->ec->root_lep = sec->root_lep;
1290  th->ec->root_svar = sec->root_svar;
1291  th->ec->ensure_list = sec->ensure_list;
1292  th->ec->errinfo = sec->errinfo;
1293 
1294  VM_ASSERT(th->ec->vm_stack != NULL);
1295  }
1296  else {
1297  /* fiber */
1298  fiber_restore_thread(th, (rb_fiber_t*)cont);
1299  }
1300 }
1301 
1302 NOINLINE(static void fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber));
1303 
1304 static void
1305 fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber)
1306 {
1307  rb_thread_t *th = GET_THREAD();
1308 
1309  /* save old_fiber's machine stack - to ensure efficient garbage collection */
1310  if (!FIBER_TERMINATED_P(old_fiber)) {
1313  if (STACK_DIR_UPPER(0, 1)) {
1314  old_fiber->cont.machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
1315  old_fiber->cont.machine.stack = th->ec->machine.stack_end;
1316  }
1317  else {
1318  old_fiber->cont.machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
1319  old_fiber->cont.machine.stack = th->ec->machine.stack_start;
1320  }
1321  }
1322 
1323  /* exchange machine_stack_start between old_fiber and new_fiber */
1325 
1326  /* old_fiber->machine.stack_end should be NULL */
1327  old_fiber->cont.saved_ec.machine.stack_end = NULL;
1328 
1329  /* restore thread context */
1330  fiber_restore_thread(th, new_fiber);
1331 
1332  // if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] -> %p[%p]\n", old_fiber, old_fiber->stack.base, new_fiber, new_fiber->stack.base);
1333 
1334  /* swap machine context */
1335  coroutine_transfer(&old_fiber->context, &new_fiber->context);
1336 
1337  // It's possible to get here, and new_fiber is already freed.
1338  // if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] <- %p[%p]\n", old_fiber, old_fiber->stack.base, new_fiber, new_fiber->stack.base);
1339 }
1340 
1341 NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
1342 
1343 static void
1344 cont_restore_1(rb_context_t *cont)
1345 {
1346  cont_restore_thread(cont);
1347 
1348  /* restore machine stack */
1349 #ifdef _M_AMD64
1350  {
1351  /* workaround for x64 SEH */
1352  jmp_buf buf;
1353  setjmp(buf);
1354  _JUMP_BUFFER *bp = (void*)&cont->jmpbuf;
1355  bp->Frame = ((_JUMP_BUFFER*)((void*)&buf))->Frame;
1356  }
1357 #endif
1358  if (cont->machine.stack_src) {
1360  MEMCPY(cont->machine.stack_src, cont->machine.stack,
1361  VALUE, cont->machine.stack_size);
1362  }
1363 
1364  ruby_longjmp(cont->jmpbuf, 1);
1365 }
1366 
1367 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
1368 
1369 static void
1370 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
1371 {
1372  if (cont->machine.stack_src) {
1373 #ifdef HAVE_ALLOCA
1374 #define STACK_PAD_SIZE 1
1375 #else
1376 #define STACK_PAD_SIZE 1024
1377 #endif
1378  VALUE space[STACK_PAD_SIZE];
1379 
1380 #if !STACK_GROW_DIRECTION
1381  if (addr_in_prev_frame > &space[0]) {
1382  /* Stack grows downward */
1383 #endif
1384 #if STACK_GROW_DIRECTION <= 0
1385  volatile VALUE *const end = cont->machine.stack_src;
1386  if (&space[0] > end) {
1387 # ifdef HAVE_ALLOCA
1388  volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
1389  space[0] = *sp;
1390 # else
1391  cont_restore_0(cont, &space[0]);
1392 # endif
1393  }
1394 #endif
1395 #if !STACK_GROW_DIRECTION
1396  }
1397  else {
1398  /* Stack grows upward */
1399 #endif
1400 #if STACK_GROW_DIRECTION >= 0
1401  volatile VALUE *const end = cont->machine.stack_src + cont->machine.stack_size;
1402  if (&space[STACK_PAD_SIZE] < end) {
1403 # ifdef HAVE_ALLOCA
1404  volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
1405  space[0] = *sp;
1406 # else
1407  cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
1408 # endif
1409  }
1410 #endif
1411 #if !STACK_GROW_DIRECTION
1412  }
1413 #endif
1414  }
1415  cont_restore_1(cont);
1416 }
1417 
1418 /*
1419  * Document-class: Continuation
1420  *
1421  * Continuation objects are generated by Kernel#callcc,
1422  * after having +require+d <i>continuation</i>. They hold
1423  * a return address and execution context, allowing a nonlocal return
1424  * to the end of the #callcc block from anywhere within a
1425  * program. Continuations are somewhat analogous to a structured
1426  * version of C's <code>setjmp/longjmp</code> (although they contain
1427  * more state, so you might consider them closer to threads).
1428  *
1429  * For instance:
1430  *
1431  * require "continuation"
1432  * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1433  * callcc{|cc| $cc = cc}
1434  * puts(message = arr.shift)
1435  * $cc.call unless message =~ /Max/
1436  *
1437  * <em>produces:</em>
1438  *
1439  * Freddie
1440  * Herbie
1441  * Ron
1442  * Max
1443  *
1444  * Also you can call callcc in other methods:
1445  *
1446  * require "continuation"
1447  *
1448  * def g
1449  * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1450  * cc = callcc { |cc| cc }
1451  * puts arr.shift
1452  * return cc, arr.size
1453  * end
1454  *
1455  * def f
1456  * c, size = g
1457  * c.call(c) if size > 1
1458  * end
1459  *
1460  * f
1461  *
1462  * This (somewhat contrived) example allows the inner loop to abandon
1463  * processing early:
1464  *
1465  * require "continuation"
1466  * callcc {|cont|
1467  * for i in 0..4
1468  * print "#{i}: "
1469  * for j in i*5...(i+1)*5
1470  * cont.call() if j == 17
1471  * printf "%3d", j
1472  * end
1473  * end
1474  * }
1475  * puts
1476  *
1477  * <em>produces:</em>
1478  *
1479  * 0: 0 1 2 3 4
1480  * 1: 5 6 7 8 9
1481  * 2: 10 11 12 13 14
1482  * 3: 15 16
1483  */
1484 
1485 /*
1486  * call-seq:
1487  * callcc {|cont| block } -> obj
1488  *
1489  * Generates a Continuation object, which it passes to
1490  * the associated block. You need to <code>require
1491  * 'continuation'</code> before using this method. Performing a
1492  * <em>cont</em><code>.call</code> will cause the #callcc
1493  * to return (as will falling through the end of the block). The
1494  * value returned by the #callcc is the value of the
1495  * block, or the value passed to <em>cont</em><code>.call</code>. See
1496  * class Continuation for more details. Also see
1497  * Kernel#throw for an alternative mechanism for
1498  * unwinding a call stack.
1499  */
1500 
1501 static VALUE
1502 rb_callcc(VALUE self)
1503 {
1504  volatile int called;
1505  volatile VALUE val = cont_capture(&called);
1506 
1507  if (called) {
1508  return val;
1509  }
1510  else {
1511  return rb_yield(val);
1512  }
1513 }
1514 
1515 static VALUE
1516 make_passing_arg(int argc, const VALUE *argv)
1517 {
1518  switch (argc) {
1519  case -1:
1520  return argv[0];
1521  case 0:
1522  return Qnil;
1523  case 1:
1524  return argv[0];
1525  default:
1526  return rb_ary_new4(argc, argv);
1527  }
1528 }
1529 
1530 typedef VALUE e_proc(VALUE);
1531 
1532 /* CAUTION!! : Currently, error in rollback_func is not supported */
1533 /* same as rb_protect if set rollback_func to NULL */
1534 void
1536 {
1537  st_table **table_p = &GET_VM()->ensure_rollback_table;
1538  if (UNLIKELY(*table_p == NULL)) {
1539  *table_p = st_init_numtable();
1540  }
1541  st_insert(*table_p, (st_data_t)ensure_func, (st_data_t)rollback_func);
1542 }
1543 
1544 static inline e_proc *
1545 lookup_rollback_func(e_proc *ensure_func)
1546 {
1547  st_table *table = GET_VM()->ensure_rollback_table;
1548  st_data_t val;
1549  if (table && st_lookup(table, (st_data_t)ensure_func, &val))
1550  return (e_proc *) val;
1551  return (e_proc *) Qundef;
1552 }
1553 
1554 
1555 static inline void
1556 rollback_ensure_stack(VALUE self,rb_ensure_list_t *current,rb_ensure_entry_t *target)
1557 {
1558  rb_ensure_list_t *p;
1559  rb_ensure_entry_t *entry;
1560  size_t i, j;
1561  size_t cur_size;
1562  size_t target_size;
1563  size_t base_point;
1564  e_proc *func;
1565 
1566  cur_size = 0;
1567  for (p=current; p; p=p->next)
1568  cur_size++;
1569  target_size = 0;
1570  for (entry=target; entry->marker; entry++)
1571  target_size++;
1572 
1573  /* search common stack point */
1574  p = current;
1575  base_point = cur_size;
1576  while (base_point) {
1577  if (target_size >= base_point &&
1578  p->entry.marker == target[target_size - base_point].marker)
1579  break;
1580  base_point --;
1581  p = p->next;
1582  }
1583 
1584  /* rollback function check */
1585  for (i=0; i < target_size - base_point; i++) {
1586  if (!lookup_rollback_func(target[i].e_proc)) {
1587  rb_raise(rb_eRuntimeError, "continuation called from out of critical rb_ensure scope");
1588  }
1589  }
1590  /* pop ensure stack */
1591  while (cur_size > base_point) {
1592  /* escape from ensure block */
1593  (*current->entry.e_proc)(current->entry.data2);
1594  current = current->next;
1595  cur_size--;
1596  }
1597  /* push ensure stack */
1598  for (j = 0; j < i; j++) {
1599  func = lookup_rollback_func(target[i - j - 1].e_proc);
1600  if ((VALUE)func != Qundef) {
1601  (*func)(target[i - j - 1].data2);
1602  }
1603  }
1604 }
1605 
1606 /*
1607  * call-seq:
1608  * cont.call(args, ...)
1609  * cont[args, ...]
1610  *
1611  * Invokes the continuation. The program continues from the end of
1612  * the #callcc block. If no arguments are given, the original #callcc
1613  * returns +nil+. If one argument is given, #callcc returns
1614  * it. Otherwise, an array containing <i>args</i> is returned.
1615  *
1616  * callcc {|cont| cont.call } #=> nil
1617  * callcc {|cont| cont.call 1 } #=> 1
1618  * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
1619  */
1620 
1621 static VALUE
1622 rb_cont_call(int argc, VALUE *argv, VALUE contval)
1623 {
1624  rb_context_t *cont = cont_ptr(contval);
1625  rb_thread_t *th = GET_THREAD();
1626 
1627  if (cont_thread_value(cont) != th->self) {
1628  rb_raise(rb_eRuntimeError, "continuation called across threads");
1629  }
1630  if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
1631  rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
1632  }
1633  if (cont->saved_ec.fiber_ptr) {
1634  if (th->ec->fiber_ptr != cont->saved_ec.fiber_ptr) {
1635  rb_raise(rb_eRuntimeError, "continuation called across fiber");
1636  }
1637  }
1638  rollback_ensure_stack(contval, th->ec->ensure_list, cont->ensure_array);
1639 
1640  cont->argc = argc;
1641  cont->value = make_passing_arg(argc, argv);
1642 
1643  cont_restore_0(cont, &contval);
1644  return Qnil; /* unreachable */
1645 }
1646 
1647 /*********/
1648 /* fiber */
1649 /*********/
1650 
1651 /*
1652  * Document-class: Fiber
1653  *
1654  * Fibers are primitives for implementing light weight cooperative
1655  * concurrency in Ruby. Basically they are a means of creating code blocks
1656  * that can be paused and resumed, much like threads. The main difference
1657  * is that they are never preempted and that the scheduling must be done by
1658  * the programmer and not the VM.
1659  *
1660  * As opposed to other stackless light weight concurrency models, each fiber
1661  * comes with a stack. This enables the fiber to be paused from deeply
1662  * nested function calls within the fiber block. See the ruby(1)
1663  * manpage to configure the size of the fiber stack(s).
1664  *
1665  * When a fiber is created it will not run automatically. Rather it must
1666  * be explicitly asked to run using the Fiber#resume method.
1667  * The code running inside the fiber can give up control by calling
1668  * Fiber.yield in which case it yields control back to caller (the
1669  * caller of the Fiber#resume).
1670  *
1671  * Upon yielding or termination the Fiber returns the value of the last
1672  * executed expression
1673  *
1674  * For instance:
1675  *
1676  * fiber = Fiber.new do
1677  * Fiber.yield 1
1678  * 2
1679  * end
1680  *
1681  * puts fiber.resume
1682  * puts fiber.resume
1683  * puts fiber.resume
1684  *
1685  * <em>produces</em>
1686  *
1687  * 1
1688  * 2
1689  * FiberError: dead fiber called
1690  *
1691  * The Fiber#resume method accepts an arbitrary number of parameters,
1692  * if it is the first call to #resume then they will be passed as
1693  * block arguments. Otherwise they will be the return value of the
1694  * call to Fiber.yield
1695  *
1696  * Example:
1697  *
1698  * fiber = Fiber.new do |first|
1699  * second = Fiber.yield first + 2
1700  * end
1701  *
1702  * puts fiber.resume 10
1703  * puts fiber.resume 1_000_000
1704  * puts fiber.resume "The fiber will be dead before I can cause trouble"
1705  *
1706  * <em>produces</em>
1707  *
1708  * 12
1709  * 1000000
1710  * FiberError: dead fiber called
1711  *
1712  */
1713 
1714 static const rb_data_type_t fiber_data_type = {
1715  "fiber",
1716  {fiber_mark, fiber_free, fiber_memsize, fiber_compact,},
1718 };
1719 
1720 static VALUE
1721 fiber_alloc(VALUE klass)
1722 {
1723  return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
1724 }
1725 
1726 static rb_fiber_t*
1727 fiber_t_alloc(VALUE fiber_value)
1728 {
1729  rb_fiber_t *fiber;
1730  rb_thread_t *th = GET_THREAD();
1731 
1732  if (DATA_PTR(fiber_value) != 0) {
1733  rb_raise(rb_eRuntimeError, "cannot initialize twice");
1734  }
1735 
1737  fiber = ZALLOC(rb_fiber_t);
1738  fiber->cont.self = fiber_value;
1739  fiber->cont.type = FIBER_CONTEXT;
1740  cont_init(&fiber->cont, th);
1741 
1742  fiber->cont.saved_ec.fiber_ptr = fiber;
1744 
1745  fiber->prev = NULL;
1746 
1747  /* fiber->status == 0 == CREATED
1748  * So that we don't need to set status: fiber_status_set(fiber, FIBER_CREATED); */
1749  VM_ASSERT(FIBER_CREATED_P(fiber));
1750 
1751  DATA_PTR(fiber_value) = fiber;
1752 
1753  return fiber;
1754 }
1755 
1756 static VALUE
1757 fiber_initialize(VALUE self, VALUE proc, struct fiber_pool * fiber_pool)
1758 {
1759  rb_fiber_t *fiber = fiber_t_alloc(self);
1760 
1761  fiber->first_proc = proc;
1762  fiber->stack.base = NULL;
1763  fiber->stack.pool = fiber_pool;
1764 
1765  return self;
1766 }
1767 
1768 static void
1769 fiber_prepare_stack(rb_fiber_t *fiber)
1770 {
1771  rb_context_t *cont = &fiber->cont;
1772  rb_execution_context_t *sec = &cont->saved_ec;
1773 
1774  size_t vm_stack_size = 0;
1775  VALUE *vm_stack = fiber_initialize_coroutine(fiber, &vm_stack_size);
1776 
1777  /* initialize cont */
1778  cont->saved_vm_stack.ptr = NULL;
1779  rb_ec_initialize_vm_stack(sec, vm_stack, vm_stack_size / sizeof(VALUE));
1780 
1781  sec->tag = NULL;
1782  sec->local_storage = NULL;
1785 }
1786 
1787 /* :nodoc: */
1788 static VALUE
1789 rb_fiber_initialize(int argc, VALUE* argv, VALUE self)
1790 {
1791  return fiber_initialize(self, rb_block_proc(), &shared_fiber_pool);
1792 }
1793 
1794 VALUE
1796 {
1797  return fiber_initialize(fiber_alloc(rb_cFiber), rb_proc_new(func, obj), &shared_fiber_pool);
1798 }
1799 
1800 static void rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt);
1801 
1802 #define PASS_KW_SPLAT (rb_empty_keyword_given_p() ? RB_PASS_EMPTY_KEYWORDS : rb_keyword_given_p())
1803 
1804 void
1806 {
1807  rb_thread_t * volatile th = GET_THREAD();
1808  rb_fiber_t *fiber = th->ec->fiber_ptr;
1809  rb_proc_t *proc;
1810  enum ruby_tag_type state;
1811  int need_interrupt = TRUE;
1812 
1814  VM_ASSERT(FIBER_RESUMED_P(fiber));
1815 
1816  EC_PUSH_TAG(th->ec);
1817  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1818  rb_context_t *cont = &VAR_FROM_MEMORY(fiber)->cont;
1819  int argc;
1820  const VALUE *argv, args = cont->value;
1821  int kw_splat = cont->kw_splat;
1822  GetProcPtr(fiber->first_proc, proc);
1823  argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
1824  cont->value = Qnil;
1825  th->ec->errinfo = Qnil;
1826  th->ec->root_lep = rb_vm_proc_local_ep(fiber->first_proc);
1827  th->ec->root_svar = Qfalse;
1828 
1829  EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
1830  rb_adjust_argv_kw_splat(&argc, &argv, &kw_splat);
1831  cont->value = rb_vm_invoke_proc(th->ec, proc, argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE);
1832  }
1833  EC_POP_TAG();
1834 
1835  if (state) {
1836  VALUE err = th->ec->errinfo;
1837  VM_ASSERT(FIBER_RESUMED_P(fiber));
1838 
1839  if (state == TAG_RAISE || state == TAG_FATAL) {
1841  }
1842  else {
1844  if (!NIL_P(err)) {
1846  }
1847  }
1848  need_interrupt = TRUE;
1849  }
1850 
1851  rb_fiber_terminate(fiber, need_interrupt);
1853 }
1854 
1855 static rb_fiber_t *
1856 root_fiber_alloc(rb_thread_t *th)
1857 {
1858  VALUE fiber_value = fiber_alloc(rb_cFiber);
1859  rb_fiber_t *fiber = th->ec->fiber_ptr;
1860 
1861  VM_ASSERT(DATA_PTR(fiber_value) == NULL);
1862  VM_ASSERT(fiber->cont.type == FIBER_CONTEXT);
1863  VM_ASSERT(fiber->status == FIBER_RESUMED);
1864 
1865  th->root_fiber = fiber;
1866  DATA_PTR(fiber_value) = fiber;
1867  fiber->cont.self = fiber_value;
1868 
1869 #ifdef COROUTINE_PRIVATE_STACK
1870  fiber->stack = fiber_pool_stack_acquire(&shared_fiber_pool);
1871  coroutine_initialize_main(&fiber->context, fiber_pool_stack_base(&fiber->stack), fiber->stack.available, th->ec->machine.stack_start);
1872 #else
1873  coroutine_initialize_main(&fiber->context);
1874 #endif
1875 
1876  return fiber;
1877 }
1878 
1879 void
1881 {
1882  rb_fiber_t *fiber = ruby_mimmalloc(sizeof(rb_fiber_t));
1883  if (!fiber) {
1884  rb_bug("%s", strerror(errno)); /* ... is it possible to call rb_bug here? */
1885  }
1886  MEMZERO(fiber, rb_fiber_t, 1);
1887  fiber->cont.type = FIBER_CONTEXT;
1888  fiber->cont.saved_ec.fiber_ptr = fiber;
1889  fiber->cont.saved_ec.thread_ptr = th;
1890  fiber_status_set(fiber, FIBER_RESUMED); /* skip CREATED */
1891  th->ec = &fiber->cont.saved_ec;
1892 }
1893 
1894 void
1896 {
1897  if (th->root_fiber) {
1898  /* ignore. A root fiber object will free th->ec */
1899  }
1900  else {
1902  VM_ASSERT(th->ec->fiber_ptr->cont.self == 0);
1903  fiber_free(th->ec->fiber_ptr);
1904 
1907  }
1908  th->ec = NULL;
1909  }
1910 }
1911 
1912 void
1914 {
1915  rb_fiber_t *fiber = th->ec->fiber_ptr;
1916 
1917  fiber->status = FIBER_TERMINATED;
1918 
1919  // The vm_stack is `alloca`ed on the thread stack, so it's gone too:
1920  rb_ec_clear_vm_stack(th->ec);
1921 }
1922 
1923 static inline rb_fiber_t*
1924 fiber_current(void)
1925 {
1927  if (ec->fiber_ptr->cont.self == 0) {
1928  root_fiber_alloc(rb_ec_thread_ptr(ec));
1929  }
1930  return ec->fiber_ptr;
1931 }
1932 
1933 static inline rb_fiber_t*
1934 return_fiber(void)
1935 {
1936  rb_fiber_t *fiber = fiber_current();
1937  rb_fiber_t *prev = fiber->prev;
1938 
1939  if (!prev) {
1940  rb_thread_t *th = GET_THREAD();
1941  rb_fiber_t *root_fiber = th->root_fiber;
1942 
1943  VM_ASSERT(root_fiber != NULL);
1944 
1945  if (root_fiber == fiber) {
1946  rb_raise(rb_eFiberError, "can't yield from root fiber");
1947  }
1948  return root_fiber;
1949  }
1950  else {
1951  fiber->prev = NULL;
1952  return prev;
1953  }
1954 }
1955 
1956 VALUE
1958 {
1959  return fiber_current()->cont.self;
1960 }
1961 
1962 // Prepare to execute next_fiber on the given thread.
1963 static inline VALUE
1964 fiber_store(rb_fiber_t *next_fiber, rb_thread_t *th)
1965 {
1966  rb_fiber_t *fiber;
1967 
1968  if (th->ec->fiber_ptr != NULL) {
1969  fiber = th->ec->fiber_ptr;
1970  }
1971  else {
1972  /* create root fiber */
1973  fiber = root_fiber_alloc(th);
1974  }
1975 
1976  if (FIBER_CREATED_P(next_fiber)) {
1977  fiber_prepare_stack(next_fiber);
1978  }
1979 
1980  VM_ASSERT(FIBER_RESUMED_P(fiber) || FIBER_TERMINATED_P(fiber));
1981  VM_ASSERT(FIBER_RUNNABLE_P(next_fiber));
1982 
1983  if (FIBER_RESUMED_P(fiber)) fiber_status_set(fiber, FIBER_SUSPENDED);
1984 
1985  fiber_status_set(next_fiber, FIBER_RESUMED);
1986  fiber_setcontext(next_fiber, fiber);
1987 
1988  fiber = th->ec->fiber_ptr;
1989 
1990  /* Raise an exception if that was the result of executing the fiber */
1991  if (fiber->cont.argc == -1) rb_exc_raise(fiber->cont.value);
1992 
1993  return fiber->cont.value;
1994 }
1995 
1996 static inline VALUE
1997 fiber_switch(rb_fiber_t *fiber, int argc, const VALUE *argv, int is_resume, int kw_splat)
1998 {
1999  VALUE value;
2000  rb_context_t *cont = &fiber->cont;
2001  rb_thread_t *th = GET_THREAD();
2002 
2003  /* make sure the root_fiber object is available */
2004  if (th->root_fiber == NULL) root_fiber_alloc(th);
2005 
2006  if (th->ec->fiber_ptr == fiber) {
2007  /* ignore fiber context switch
2008  * because destination fiber is same as current fiber
2009  */
2010  return make_passing_arg(argc, argv);
2011  }
2012 
2013  if (cont_thread_value(cont) != th->self) {
2014  rb_raise(rb_eFiberError, "fiber called across threads");
2015  }
2016  else if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
2017  rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
2018  }
2019  else if (FIBER_TERMINATED_P(fiber)) {
2020  value = rb_exc_new2(rb_eFiberError, "dead fiber called");
2021 
2022  if (!FIBER_TERMINATED_P(th->ec->fiber_ptr)) {
2023  rb_exc_raise(value);
2024  VM_UNREACHABLE(fiber_switch);
2025  }
2026  else {
2027  /* th->ec->fiber_ptr is also dead => switch to root fiber */
2028  /* (this means we're being called from rb_fiber_terminate, */
2029  /* and the terminated fiber's return_fiber() is already dead) */
2031 
2032  cont = &th->root_fiber->cont;
2033  cont->argc = -1;
2034  cont->value = value;
2035 
2036  fiber_setcontext(th->root_fiber, th->ec->fiber_ptr);
2037 
2038  VM_UNREACHABLE(fiber_switch);
2039  }
2040  }
2041 
2042  if (is_resume) {
2043  fiber->prev = fiber_current();
2044  }
2045 
2046  VM_ASSERT(FIBER_RUNNABLE_P(fiber));
2047 
2048  cont->argc = argc;
2049  cont->kw_splat = kw_splat;
2050  cont->value = make_passing_arg(argc, argv);
2051 
2052  value = fiber_store(fiber, th);
2053 
2054  if (is_resume && FIBER_TERMINATED_P(fiber)) {
2055  fiber_stack_release(fiber);
2056  }
2057 
2058  RUBY_VM_CHECK_INTS(th->ec);
2059 
2060  EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
2061 
2062  return value;
2063 }
2064 
2065 VALUE
2066 rb_fiber_transfer(VALUE fiber_value, int argc, const VALUE *argv)
2067 {
2068  return fiber_switch(fiber_ptr(fiber_value), argc, argv, 0, RB_NO_KEYWORDS);
2069 }
2070 
2071 void
2073 {
2074  fiber_status_set(fiber, FIBER_TERMINATED);
2075 }
2076 
2077 static void
2078 rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt)
2079 {
2080  VALUE value = fiber->cont.value;
2081  rb_fiber_t *next_fiber;
2082 
2083  VM_ASSERT(FIBER_RESUMED_P(fiber));
2084  rb_fiber_close(fiber);
2085 
2086  coroutine_destroy(&fiber->context);
2087 
2088  fiber->cont.machine.stack = NULL;
2089  fiber->cont.machine.stack_size = 0;
2090 
2091  next_fiber = return_fiber();
2092  if (need_interrupt) RUBY_VM_SET_INTERRUPT(&next_fiber->cont.saved_ec);
2093  fiber_switch(next_fiber, 1, &value, 0, RB_NO_KEYWORDS);
2094 }
2095 
2096 VALUE
2097 rb_fiber_resume_kw(VALUE fiber_value, int argc, const VALUE *argv, int kw_splat)
2098 {
2099  rb_fiber_t *fiber = fiber_ptr(fiber_value);
2100 
2101  if (argc == -1 && FIBER_CREATED_P(fiber)) {
2102  rb_raise(rb_eFiberError, "cannot raise exception on unborn fiber");
2103  }
2104 
2105  if (fiber->prev != 0 || fiber_is_root_p(fiber)) {
2106  rb_raise(rb_eFiberError, "double resume");
2107  }
2108 
2109  if (fiber->transferred != 0) {
2110  rb_raise(rb_eFiberError, "cannot resume transferred Fiber");
2111  }
2112 
2113  return fiber_switch(fiber, argc, argv, 1, kw_splat);
2114 }
2115 
2116 VALUE
2117 rb_fiber_resume(VALUE fiber_value, int argc, const VALUE *argv)
2118 {
2119  return rb_fiber_resume_kw(fiber_value, argc, argv, RB_NO_KEYWORDS);
2120 }
2121 
2122 VALUE
2123 rb_fiber_yield_kw(int argc, const VALUE *argv, int kw_splat)
2124 {
2125  return fiber_switch(return_fiber(), argc, argv, 0, kw_splat);
2126 }
2127 
2128 VALUE
2130 {
2131  return fiber_switch(return_fiber(), argc, argv, 0, RB_NO_KEYWORDS);
2132 }
2133 
2134 void
2136 {
2137  if (th->root_fiber && th->root_fiber != th->ec->fiber_ptr) {
2139  }
2140 }
2141 
2142 /*
2143  * call-seq:
2144  * fiber.alive? -> true or false
2145  *
2146  * Returns true if the fiber can still be resumed (or transferred
2147  * to). After finishing execution of the fiber block this method will
2148  * always return false. You need to <code>require 'fiber'</code>
2149  * before using this method.
2150  */
2151 VALUE
2153 {
2154  return FIBER_TERMINATED_P(fiber_ptr(fiber_value)) ? Qfalse : Qtrue;
2155 }
2156 
2157 /*
2158  * call-seq:
2159  * fiber.resume(args, ...) -> obj
2160  *
2161  * Resumes the fiber from the point at which the last Fiber.yield was
2162  * called, or starts running it if it is the first call to
2163  * #resume. Arguments passed to resume will be the value of the
2164  * Fiber.yield expression or will be passed as block parameters to
2165  * the fiber's block if this is the first #resume.
2166  *
2167  * Alternatively, when resume is called it evaluates to the arguments passed
2168  * to the next Fiber.yield statement inside the fiber's block
2169  * or to the block value if it runs to completion without any
2170  * Fiber.yield
2171  */
2172 static VALUE
2173 rb_fiber_m_resume(int argc, VALUE *argv, VALUE fiber)
2174 {
2175  return rb_fiber_resume_kw(fiber, argc, argv, PASS_KW_SPLAT);
2176 }
2177 
2178 /*
2179  * call-seq:
2180  * fiber.raise -> obj
2181  * fiber.raise(string) -> obj
2182  * fiber.raise(exception [, string [, array]]) -> obj
2183  *
2184  * Raises an exception in the fiber at the point at which the last
2185  * Fiber.yield was called, or at the start if neither +resume+
2186  * nor +raise+ were called before.
2187  *
2188  * With no arguments, raises a +RuntimeError+. With a single +String+
2189  * argument, raises a +RuntimeError+ with the string as a message. Otherwise,
2190  * the first parameter should be the name of an +Exception+ class (or an
2191  * object that returns an +Exception+ object when sent an +exception+
2192  * message). The optional second parameter sets the message associated with
2193  * the exception, and the third parameter is an array of callback information.
2194  * Exceptions are caught by the +rescue+ clause of <code>begin...end</code>
2195  * blocks.
2196  */
2197 static VALUE
2198 rb_fiber_raise(int argc, VALUE *argv, VALUE fiber)
2199 {
2201  return rb_fiber_resume_kw(fiber, -1, &exc, RB_NO_KEYWORDS);
2202 }
2203 
2204 /*
2205  * call-seq:
2206  * fiber.transfer(args, ...) -> obj
2207  *
2208  * Transfer control to another fiber, resuming it from where it last
2209  * stopped or starting it if it was not resumed before. The calling
2210  * fiber will be suspended much like in a call to
2211  * Fiber.yield. You need to <code>require 'fiber'</code>
2212  * before using this method.
2213  *
2214  * The fiber which receives the transfer call is treats it much like
2215  * a resume call. Arguments passed to transfer are treated like those
2216  * passed to resume.
2217  *
2218  * You cannot call +resume+ on a fiber that has been transferred to.
2219  * If you call +transfer+ on a fiber, and later call +resume+ on the
2220  * the fiber, a +FiberError+ will be raised. Once you call +transfer+ on
2221  * a fiber, the only way to resume processing the fiber is to
2222  * call +transfer+ on it again.
2223  *
2224  * Example:
2225  *
2226  * fiber1 = Fiber.new do
2227  * puts "In Fiber 1"
2228  * Fiber.yield
2229  * puts "In Fiber 1 again"
2230  * end
2231  *
2232  * fiber2 = Fiber.new do
2233  * puts "In Fiber 2"
2234  * fiber1.transfer
2235  * puts "Never see this message"
2236  * end
2237  *
2238  * fiber3 = Fiber.new do
2239  * puts "In Fiber 3"
2240  * end
2241  *
2242  * fiber2.resume
2243  * fiber3.resume
2244  * fiber1.resume rescue (p $!)
2245  * fiber1.transfer
2246  *
2247  * <em>produces</em>
2248  *
2249  * In Fiber 2
2250  * In Fiber 1
2251  * In Fiber 3
2252  * #<FiberError: cannot resume transferred Fiber>
2253  * In Fiber 1 again
2254  *
2255  */
2256 static VALUE
2257 rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fiber_value)
2258 {
2259  rb_fiber_t *fiber = fiber_ptr(fiber_value);
2260  fiber->transferred = 1;
2261  return fiber_switch(fiber, argc, argv, 0, PASS_KW_SPLAT);
2262 }
2263 
2264 /*
2265  * call-seq:
2266  * Fiber.yield(args, ...) -> obj
2267  *
2268  * Yields control back to the context that resumed the fiber, passing
2269  * along any arguments that were passed to it. The fiber will resume
2270  * processing at this point when #resume is called next.
2271  * Any arguments passed to the next #resume will be the value that
2272  * this Fiber.yield expression evaluates to.
2273  */
2274 static VALUE
2275 rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
2276 {
2278 }
2279 
2280 /*
2281  * call-seq:
2282  * Fiber.current() -> fiber
2283  *
2284  * Returns the current fiber. You need to <code>require 'fiber'</code>
2285  * before using this method. If you are not running in the context of
2286  * a fiber this method will return the root fiber.
2287  */
2288 static VALUE
2289 rb_fiber_s_current(VALUE klass)
2290 {
2291  return rb_fiber_current();
2292 }
2293 
2294 /*
2295  * call-seq:
2296  * fiber.to_s -> string
2297  *
2298  * Returns fiber information string.
2299  *
2300  */
2301 
2302 static VALUE
2303 fiber_to_s(VALUE fiber_value)
2304 {
2305  const rb_fiber_t *fiber = fiber_ptr(fiber_value);
2306  const rb_proc_t *proc;
2307  char status_info[0x20];
2308 
2309  if (fiber->transferred) {
2310  snprintf(status_info, 0x20, " (%s, transferred)", fiber_status_name(fiber->status));
2311  }
2312  else {
2313  snprintf(status_info, 0x20, " (%s)", fiber_status_name(fiber->status));
2314  }
2315 
2316  if (!rb_obj_is_proc(fiber->first_proc)) {
2317  VALUE str = rb_any_to_s(fiber_value);
2318  strlcat(status_info, ">", sizeof(status_info));
2320  rb_str_cat_cstr(str, status_info);
2321  return str;
2322  }
2323  GetProcPtr(fiber->first_proc, proc);
2324  return rb_block_to_s(fiber_value, &proc->block, status_info);
2325 }
2326 
2327 #ifdef HAVE_WORKING_FORK
2328 void
2329 rb_fiber_atfork(rb_thread_t *th)
2330 {
2331  if (th->root_fiber) {
2332  if (&th->root_fiber->cont.saved_ec != th->ec) {
2333  th->root_fiber = th->ec->fiber_ptr;
2334  }
2335  th->root_fiber->prev = 0;
2336  }
2337 }
2338 #endif
2339 
2340 #ifdef RB_EXPERIMENTAL_FIBER_POOL
2341 static void
2342 fiber_pool_free(void *ptr)
2343 {
2344  struct fiber_pool * fiber_pool = ptr;
2345  RUBY_FREE_ENTER("fiber_pool");
2346 
2347  fiber_pool_free_allocations(fiber_pool->allocations);
2349 
2350  RUBY_FREE_LEAVE("fiber_pool");
2351 }
2352 
2353 static size_t
2354 fiber_pool_memsize(const void *ptr)
2355 {
2356  const struct fiber_pool * fiber_pool = ptr;
2357  size_t size = sizeof(*fiber_pool);
2358 
2360 
2361  return size;
2362 }
2363 
2364 static const rb_data_type_t FiberPoolDataType = {
2365  "fiber_pool",
2366  {NULL, fiber_pool_free, fiber_pool_memsize,},
2368 };
2369 
2370 static VALUE
2371 fiber_pool_alloc(VALUE klass)
2372 {
2373  struct fiber_pool * fiber_pool = RB_ALLOC(struct fiber_pool);
2374 
2375  return TypedData_Wrap_Struct(klass, &FiberPoolDataType, fiber_pool);
2376 }
2377 
2378 static VALUE
2379 rb_fiber_pool_initialize(int argc, VALUE* argv, VALUE self)
2380 {
2381  rb_thread_t *th = GET_THREAD();
2383  struct fiber_pool * fiber_pool = NULL;
2384 
2385  // Maybe these should be keyword arguments.
2386  rb_scan_args(argc, argv, "03", &size, &count, &vm_stack_size);
2387 
2388  if (NIL_P(size)) {
2390  }
2391 
2392  if (NIL_P(count)) {
2393  count = INT2NUM(128);
2394  }
2395 
2396  if (NIL_P(vm_stack_size)) {
2398  }
2399 
2400  TypedData_Get_Struct(self, struct fiber_pool, &FiberPoolDataType, fiber_pool);
2401 
2402  fiber_pool_initialize(fiber_pool, NUM2SIZET(size), NUM2SIZET(count), NUM2SIZET(vm_stack_size));
2403 
2404  return self;
2405 }
2406 #endif
2407 
2408 /*
2409  * Document-class: FiberError
2410  *
2411  * Raised when an invalid operation is attempted on a Fiber, in
2412  * particular when attempting to call/resume a dead fiber,
2413  * attempting to yield from the root fiber, or calling a fiber across
2414  * threads.
2415  *
2416  * fiber = Fiber.new{}
2417  * fiber.resume #=> nil
2418  * fiber.resume #=> FiberError: dead fiber called
2419  */
2420 
2421 void
2423 {
2424  rb_thread_t *th = GET_THREAD();
2426  size_t machine_stack_size = th->vm->default_params.fiber_machine_stack_size;
2427  size_t stack_size = machine_stack_size + vm_stack_size;
2428 
2429 #ifdef _WIN32
2430  SYSTEM_INFO info;
2431  GetSystemInfo(&info);
2432  pagesize = info.dwPageSize;
2433 #else /* not WIN32 */
2434  pagesize = sysconf(_SC_PAGESIZE);
2435 #endif
2437 
2438  fiber_pool_initialize(&shared_fiber_pool, stack_size, FIBER_POOL_INITIAL_SIZE, vm_stack_size);
2439 
2440  char * fiber_shared_fiber_pool_free_stacks = getenv("RUBY_SHARED_FIBER_POOL_FREE_STACKS");
2441  if (fiber_shared_fiber_pool_free_stacks) {
2442  shared_fiber_pool.free_stacks = atoi(fiber_shared_fiber_pool_free_stacks);
2443  }
2444 
2445  rb_cFiber = rb_define_class("Fiber", rb_cObject);
2446  rb_define_alloc_func(rb_cFiber, fiber_alloc);
2447  rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
2448  rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
2449  rb_define_method(rb_cFiber, "initialize", rb_fiber_initialize, -1);
2450  rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
2451  rb_define_method(rb_cFiber, "raise", rb_fiber_raise, -1);
2452  rb_define_method(rb_cFiber, "to_s", fiber_to_s, 0);
2453  rb_define_alias(rb_cFiber, "inspect", "to_s");
2454 
2455 #ifdef RB_EXPERIMENTAL_FIBER_POOL
2456  rb_cFiberPool = rb_define_class("Pool", rb_cFiber);
2457  rb_define_alloc_func(rb_cFiberPool, fiber_pool_alloc);
2458  rb_define_method(rb_cFiberPool, "initialize", rb_fiber_pool_initialize, -1);
2459 #endif
2460 }
2461 
2463 
2464 void
2466 {
2467  rb_cContinuation = rb_define_class("Continuation", rb_cObject);
2468  rb_undef_alloc_func(rb_cContinuation);
2469  rb_undef_method(CLASS_OF(rb_cContinuation), "new");
2470  rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
2471  rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
2472  rb_define_global_function("callcc", rb_callcc, 0);
2473 }
2474 
2475 void
2477 {
2478  rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
2479  rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
2480  rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
2481 }
2482 
RB_ALLOC
#define RB_ALLOC(type)
Definition: ruby.h:1658
FIBER_TERMINATED_P
#define FIBER_TERMINATED_P(fiber)
Definition: cont.c:223
rb_execution_context_struct::raised_flag
uint8_t raised_flag
Definition: vm_core.h:878
NOINLINE
NOINLINE(static VALUE cont_capture(volatile int *volatile stat))
rb_execution_context_struct::protect_tag
struct rb_vm_protect_tag * protect_tag
Definition: vm_core.h:850
rb_ec_set_vm_stack
void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:2676
rb_context_struct::saved_vm_stack
struct cont_saved_vm_stack saved_vm_stack
Definition: cont.c:186
UNLIKELY
#define UNLIKELY(x)
Definition: ffi_common.h:126
RUBY_SYMBOL_EXPORT_END
#define RUBY_SYMBOL_EXPORT_END
Definition: missing.h:49
ruby_xfree
void ruby_xfree(void *x)
Definition: gc.c:10170
rb_define_class
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:649
rb_fiber_struct::first_proc
VALUE first_proc
Definition: cont.c:228
ruby::backward::cxxanyargs::rb_proc_new
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
Definition: cxxanyargs.hpp:324
fiber_pool_stack::size
size_t size
Definition: cont.c:75
TypedData_Make_Struct
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1244
TRUE
#define TRUE
Definition: nkf.h:175
stat
Definition: rb_mjit_min_header-2.7.2.h:2423
FIBER_RUNNABLE_P
#define FIBER_RUNNABLE_P(fiber)
Definition: cont.c:224
fiber_pool_stack::available
size_t available
Definition: cont.c:78
ruby_current_execution_context_ptr
rb_execution_context_t * ruby_current_execution_context_ptr
Definition: vm.c:373
strlcat
RUBY_EXTERN size_t strlcat(char *, const char *, size_t)
Definition: strlcat.c:31
ruby_mimmalloc
void * ruby_mimmalloc(size_t size)
Definition: gc.c:10207
rb_context_struct::stack
VALUE * stack
Definition: cont.c:189
RUBY_MARK_LEAVE
#define RUBY_MARK_LEAVE(msg)
Definition: gc.h:56
THREAD_RUNNABLE
@ THREAD_RUNNABLE
Definition: vm_core.h:783
rb_context_t
struct rb_context_struct rb_context_t
cont_saved_vm_stack
Definition: cont.c:56
fiber_pool_vacancy
Definition: cont.c:90
fiber_pool_allocation::next
struct fiber_pool_allocation * next
Definition: cont.c:149
FIBER_SUSPENDED
@ FIBER_SUSPENDED
Definition: cont.c:216
rb_vm_invoke_proc
MJIT_FUNC_EXPORTED VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
Definition: vm.c:1249
NORETURN
NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)))
cfp
rb_control_frame_t * cfp
Definition: rb_mjit_min_header-2.7.2.h:14607
fiber_pool::free_stacks
int free_stacks
Definition: cont.c:170
gc.h
rb_execution_context_struct::local_storage
st_table * local_storage
Definition: vm_core.h:860
fiber_status
fiber_status
Definition: cont.c:213
rb_fiber_struct::context
struct coroutine_context context
Definition: cont.c:237
bp
#define bp()
Definition: internal.h:1445
fiber_pool
Definition: cont.c:153
VAR_INITIALIZED
#define VAR_INITIALIZED(var)
Definition: eval_intern.h:156
i
uint32_t i
Definition: rb_mjit_min_header-2.7.2.h:5499
rb_gc_mark_locations
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Definition: gc.c:4702
rb_str_cat_cstr
#define rb_str_cat_cstr(str, ptr)
Definition: rb_mjit_min_header-2.7.2.h:6161
EXEC_EVENT_HOOK
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:1935
rb_proc_t::block
const struct rb_block block
Definition: vm_core.h:1050
FIBER_SUSPENDED_P
#define FIBER_SUSPENDED_P(fiber)
Definition: cont.c:222
FLUSH_REGISTER_WINDOWS
#define FLUSH_REGISTER_WINDOWS
Definition: defines.h:431
rb_block_to_s
VALUE rb_block_to_s(VALUE self, const struct rb_block *block, const char *additional_info)
Definition: proc.c:1360
st_init_numtable
st_table * st_init_numtable(void)
Definition: st.c:653
setjmp
int setjmp(jmp_buf __jmpb)
VALUE
unsigned long VALUE
Definition: ruby.h:102
GET_VM
#define GET_VM()
Definition: vm_core.h:1764
rb_context_struct::mjit_cont
struct mjit_cont * mjit_cont
Definition: cont.c:197
ZALLOC
#define ZALLOC(type)
Definition: ruby.h:1666
fiber_pool::vacancies
struct fiber_pool_vacancy * vacancies
Definition: cont.c:158
rb_gc_location
VALUE rb_gc_location(VALUE value)
Definition: gc.c:8114
FIBER_STACK_FLAGS
#define FIBER_STACK_FLAGS
Definition: cont.c:251
TAG_FATAL
#define TAG_FATAL
Definition: vm_core.h:205
context_type
context_type
Definition: cont.c:51
ruby_Init_Fiber_as_Coroutine
void ruby_Init_Fiber_as_Coroutine(void)
Definition: cont.c:2476
rb_fiber_mark_self
void rb_fiber_mark_self(const rb_fiber_t *fiber)
Definition: cont.c:972
FIBER_CONTEXT
@ FIBER_CONTEXT
Definition: cont.c:53
rb_ensure_list::entry
struct rb_ensure_entry entry
Definition: vm_core.h:836
rb_define_global_function
void rb_define_global_function(const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a global function.
Definition: class.c:1787
rb_execution_context_struct::ensure_list
rb_ensure_list_t * ensure_list
Definition: vm_core.h:869
getenv
#define getenv(name)
Definition: win32.c:73
DWORD
IUnknown DWORD
Definition: win32ole.c:33
FIBER_POOL_ALLOCATION_MAXIMUM_SIZE
#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE
Definition: cont.c:48
FIBER_CREATED
@ FIBER_CREATED
Definition: cont.c:214
rb_execution_context_struct::tag
struct rb_vm_tag * tag
Definition: vm_core.h:849
exc
const rb_iseq_t const VALUE exc
Definition: rb_mjit_min_header-2.7.2.h:13552
rb_ensure_entry::marker
VALUE marker
Definition: vm_core.h:829
Qundef
#define Qundef
Definition: ruby.h:470
rb_define_singleton_method
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1755
rb_signal_buff_size
int rb_signal_buff_size(void)
Definition: signal.c:726
rb_make_exception
VALUE rb_make_exception(int argc, const VALUE *argv)
Make an Exception object from the list of arguments in a manner similar to Kernel#raise.
Definition: eval.c:851
fiber_pool_allocation::base
void * base
Definition: cont.c:127
rb_define_method
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1551
GET_EC
#define GET_EC()
Definition: vm_core.h:1766
RUBY_VM_CHECK_INTS
#define RUBY_VM_CHECK_INTS(ec)
Definition: vm_core.h:1862
INT2NUM
#define INT2NUM(x)
Definition: ruby.h:1609
fiber_pool_vacancy::stack
struct fiber_pool_stack stack
Definition: cont.c:92
ptr
struct RIMemo * ptr
Definition: debug.c:65
_SC_PAGESIZE
#define _SC_PAGESIZE
Definition: rb_mjit_min_header-2.7.2.h:3414
Qfalse
#define Qfalse
Definition: ruby.h:467
fiber_pool_allocation::pool
struct fiber_pool * pool
Definition: cont.c:143
fiber_pool::allocations
struct fiber_pool_allocation * allocations
Definition: cont.c:155
NULL
#define NULL
Definition: _sdbm.c:101
STACK_PAD_SIZE
#define STACK_PAD_SIZE
rb_context_struct::kw_splat
int kw_splat
Definition: cont.c:182
rb_execution_context_struct::machine
struct rb_execution_context_struct::@13 machine
ruby_longjmp
#define ruby_longjmp(env, val)
Definition: eval_intern.h:59
fiber_pool_stack::base
void * base
Definition: cont.c:69
st_insert
int st_insert(st_table *tab, st_data_t key, st_data_t value)
Definition: st.c:1171
rb_thread_struct::ec
rb_execution_context_t * ec
Definition: vm_core.h:915
rb_fiber_start
void rb_fiber_start(void)
Definition: cont.c:1805
VM_ASSERT
#define VM_ASSERT(expr)
Definition: vm_core.h:56
ruby_setjmp
#define ruby_setjmp(env)
Definition: eval_intern.h:58
rb_define_alias
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:1800
rb_fiber_alive_p
VALUE rb_fiber_alive_p(VALUE fiber_value)
Definition: cont.c:2152
rb_undef_method
void rb_undef_method(VALUE klass, const char *name)
Definition: class.c:1575
VM_BLOCK_HANDLER_NONE
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1291
ALLOC_N
#define ALLOC_N(type, n)
Definition: ruby.h:1663
rb_context_struct::stack_size
size_t stack_size
Definition: cont.c:191
rb_control_frame_struct::sp
VALUE * sp
Definition: vm_core.h:762
COROUTINE
#define COROUTINE
Definition: Context.h:13
rb_raise
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2671
e_proc
VALUE e_proc(VALUE)
Definition: cont.c:1530
rb_execution_context_struct::cfp
rb_control_frame_t * cfp
Definition: vm_core.h:847
rb_context_struct::argc
int argc
Definition: cont.c:181
rb_obj_is_proc
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:152
jmp_buf
long jmp_buf[32]
Definition: rb_mjit_min_header-2.7.2.h:9438
PRIuSIZE
#define PRIuSIZE
Definition: ruby.h:208
snprintf
int snprintf(char *__restrict, size_t, const char *__restrict,...) __attribute__((__format__(__printf__
rb_vm_proc_local_ep
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:653
strerror
RUBY_EXTERN char * strerror(int)
Definition: strerror.c:11
rb_fiber_resume_kw
VALUE rb_fiber_resume_kw(VALUE fiber_value, int argc, const VALUE *argv, int kw_splat)
Definition: cont.c:2097
DATA_PTR
#define DATA_PTR(dta)
Definition: ruby.h:1175
rb_ec_clear_vm_stack
void rb_ec_clear_vm_stack(rb_execution_context_t *ec)
Definition: vm.c:2699
klass
VALUE klass
Definition: rb_mjit_min_header-2.7.2.h:13302
VM_UNREACHABLE
#define VM_UNREACHABLE(func)
Definition: vm_core.h:57
EC_POP_TAG
#define EC_POP_TAG()
Definition: eval_intern.h:137
THREAD_MUST_BE_RUNNING
#define THREAD_MUST_BE_RUNNING(th)
Definition: cont.c:835
rb_vm_struct::main_thread
struct rb_thread_struct * main_thread
Definition: vm_core.h:581
FIBER_RESUMED
@ FIBER_RESUMED
Definition: cont.c:215
pc
rb_control_frame_t const VALUE * pc
Definition: rb_mjit_min_header-2.7.2.h:16986
NUM2SIZET
#define NUM2SIZET(x)
Definition: ruby.h:769
rb_fiber_struct::stack
struct fiber_pool_stack stack
Definition: cont.c:238
RB_PAGE_SIZE
#define RB_PAGE_SIZE
Definition: cont.c:27
rb_thread_struct::status
enum rb_thread_status status
Definition: rb_mjit_min_header-2.7.2.h:9940
rb_ary_tmp_new
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:768
fiber_pool_stack::allocation
struct fiber_pool_allocation * allocation
Definition: cont.c:84
rb_execution_context_struct::thread_ptr
struct rb_thread_struct * thread_ptr
Definition: vm_core.h:857
rb_ensure_entry::data2
VALUE data2
Definition: vm_core.h:831
rb_fiber_transfer
VALUE rb_fiber_transfer(VALUE fiber_value, int argc, const VALUE *argv)
Definition: cont.c:2066
VAR_FROM_MEMORY
#define VAR_FROM_MEMORY(var)
Definition: eval_intern.h:155
rb_fiber_reset_root_local_storage
void rb_fiber_reset_root_local_storage(rb_thread_t *th)
Definition: cont.c:2135
cont_saved_vm_stack::ptr
VALUE * ptr
Definition: cont.c:57
rb_execution_context_struct::errinfo
VALUE errinfo
Definition: vm_core.h:875
FIBER_TERMINATED
@ FIBER_TERMINATED
Definition: cont.c:217
rb_exc_new2
#define rb_exc_new2
Definition: intern.h:292
fiber_pool_vacancy::next
struct fiber_pool_vacancy * next
Definition: cont.c:98
mjit.h
COMPILER_WARNING_POP
#define COMPILER_WARNING_POP
Definition: internal.h:2671
EC_EXEC_TAG
#define EC_EXEC_TAG()
Definition: eval_intern.h:181
rb_threadptr_root_fiber_terminate
void rb_threadptr_root_fiber_terminate(rb_thread_t *th)
Definition: cont.c:1913
TypedData_Wrap_Struct
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: ruby.h:1231
vm_core.h
rb_context_struct::self
VALUE self
Definition: cont.c:183
rb_execution_context_struct::vm_stack
VALUE * vm_stack
Definition: vm_core.h:845
rb_execution_context_struct::trace_arg
struct rb_trace_arg_struct * trace_arg
Definition: vm_core.h:872
rb_vm_struct::fiber_vm_stack_size
size_t fiber_vm_stack_size
Definition: vm_core.h:665
RUBY_FREE_UNLESS_NULL
#define RUBY_FREE_UNLESS_NULL(ptr)
Definition: gc.h:70
RUBY_FREE_LEAVE
#define RUBY_FREE_LEAVE(msg)
Definition: gc.h:58
rb_eRuntimeError
VALUE rb_eRuntimeError
Definition: error.c:922
rb_fiber_struct
Definition: cont.c:226
rb_execution_context_struct::stack_start
VALUE * stack_start
Definition: vm_core.h:887
ALLOCA_N
#define ALLOCA_N(type, n)
Definition: ruby.h:1684
mjit_enabled
#define mjit_enabled
Definition: internal.h:1766
RUBY_FREE_ENTER
#define RUBY_FREE_ENTER(msg)
Definition: gc.h:57
rb_control_frame_struct
Definition: vm_core.h:760
size
int size
Definition: encoding.c:58
rb_str_set_len
void rb_str_set_len(VALUE, long)
Definition: string.c:2692
fiber_pool_stack
Definition: cont.c:67
rb_thread_struct::root_fiber
rb_fiber_t * root_fiber
Definition: vm_core.h:985
rb_gc_mark_movable
void rb_gc_mark_movable(VALUE ptr)
Definition: gc.c:5209
atoi
int atoi(const char *__nptr)
rb_jmpbuf_t
void * rb_jmpbuf_t[5]
Definition: vm_core.h:792
rb_obj_is_fiber
VALUE rb_obj_is_fiber(VALUE obj)
Definition: cont.c:1041
rb_context_struct::ensure_array
rb_ensure_entry_t * ensure_array
Definition: cont.c:195
MEMZERO
#define MEMZERO(p, type, n)
Definition: ruby.h:1752
sysconf
long sysconf(int __name)
fiber_pool_allocation::stride
size_t stride
Definition: cont.c:133
ERRNOMSG
#define ERRNOMSG
Definition: cont.c:254
rb_context_struct::machine
struct rb_context_struct::@0 machine
rb_fiber_update_self
void rb_fiber_update_self(rb_fiber_t *fiber)
Definition: cont.c:961
rb_execution_context_struct::stack_maxsize
size_t stack_maxsize
Definition: vm_core.h:889
GetProcPtr
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:1046
coroutine_context
Definition: Context.h:18
rb_context_struct::jmpbuf
rb_jmpbuf_t jmpbuf
Definition: cont.c:194
COMPILER_WARNING_PUSH
#define COMPILER_WARNING_PUSH
Definition: internal.h:2670
fiber_pool::initial_count
size_t initial_count
Definition: cont.c:167
rb_scan_args
#define rb_scan_args(argc, argvp, fmt,...)
Definition: rb_mjit_min_header-2.7.2.h:6407
fiber_pool_stack::pool
struct fiber_pool * pool
Definition: cont.c:81
rb_typeddata_is_kind_of
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:874
CLASS_OF
#define CLASS_OF(v)
Definition: ruby.h:484
rb_ec_initialize_vm_stack
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:2683
ruby_Init_Continuation_body
RUBY_SYMBOL_EXPORT_BEGIN void ruby_Init_Continuation_body(void)
Definition: cont.c:2465
rb_fiber_struct::transferred
unsigned int transferred
Definition: cont.c:235
rb_context_struct
Definition: cont.c:179
rb_vm_make_jump_tag_but_local_jump
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:1478
TAG_RAISE
#define TAG_RAISE
Definition: vm_core.h:203
rb_thread_struct::vm
rb_vm_t * vm
Definition: vm_core.h:913
coroutine_transfer
struct coroutine_context * coroutine_transfer(struct coroutine_context *current, struct coroutine_context *target)
Definition: Context.c:115
rb_ary_new4
#define rb_ary_new4
Definition: intern.h:105
rb_cObject
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:2010
buf
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4322
rb_exc_raise
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:668
obj
const VALUE VALUE obj
Definition: rb_mjit_min_header-2.7.2.h:5777
TypedData_Get_Struct
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: ruby.h:1252
rb_threadptr_root_fiber_setup
void rb_threadptr_root_fiber_setup(rb_thread_t *th)
Definition: cont.c:1880
fiber_pool::vm_stack_size
size_t vm_stack_size
Definition: cont.c:176
rb_execution_context_struct::vm_stack_size
size_t vm_stack_size
Definition: vm_core.h:846
rb_bug
void rb_bug(const char *fmt,...)
Definition: error.c:636
rb_context_struct::value
VALUE value
Definition: cont.c:184
RUBY_MARK_ENTER
#define RUBY_MARK_ENTER(msg)
Definition: gc.h:55
internal.h
rb_ensure_entry::e_proc
VALUE(* e_proc)(VALUE)
Definition: vm_core.h:830
cont_saved_vm_stack::slen
size_t slen
Definition: cont.c:59
rb_block_call_func_t
rb_block_call_func * rb_block_call_func_t
Definition: ruby.h:1967
argv
char ** argv
Definition: ruby.c:223
rb_execution_context_struct::fiber_ptr
rb_fiber_t * fiber_ptr
Definition: vm_core.h:856
fiber_pool::count
size_t count
Definition: cont.c:164
rb_iseq_constant_body::iseq_encoded
VALUE * iseq_encoded
Definition: vm_core.h:325
RUBY_SYMBOL_EXPORT_BEGIN
#define RUBY_SYMBOL_EXPORT_BEGIN
Definition: missing.h:48
mjit_cont_new
struct mjit_cont * mjit_cont_new(rb_execution_context_t *ec)
rb_fiber_current
VALUE rb_fiber_current(void)
Definition: cont.c:1957
rb_fiber_resume
VALUE rb_fiber_resume(VALUE fiber_value, int argc, const VALUE *argv)
Definition: cont.c:2117
rb_fiber_close
void rb_fiber_close(rb_fiber_t *fiber)
Definition: cont.c:2072
rb_ensure_list
Definition: vm_core.h:834
str
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
GET_THREAD
#define GET_THREAD()
Definition: vm_core.h:1765
rb_control_frame_struct::iseq
const rb_iseq_t * iseq
Definition: vm_core.h:763
RARRAY_CONST_PTR
#define RARRAY_CONST_PTR(s)
Definition: psych_emitter.c:4
ruby_register_rollback_func_for_ensure
void ruby_register_rollback_func_for_ensure(e_proc *ensure_func, e_proc *rollback_func)
Definition: cont.c:1535
RUBY_TYPED_FREE_IMMEDIATELY
#define RUBY_TYPED_FREE_IMMEDIATELY
Definition: ruby.h:1207
rb_vm_struct::fiber_machine_stack_size
size_t fiber_machine_stack_size
Definition: vm_core.h:666
MEMCPY
#define MEMCPY(p1, p2, type, n)
Definition: ruby.h:1753
fiber_pool_allocation::count
size_t count
Definition: cont.c:136
rb_threadptr_root_fiber_release
void rb_threadptr_root_fiber_release(rb_thread_t *th)
Definition: cont.c:1895
mjit_cont_free
void mjit_cont_free(struct mjit_cont *cont)
rb_obj_info_dump
void rb_obj_info_dump(VALUE obj)
Definition: gc.c:11689
NIL_P
#define NIL_P(v)
Definition: ruby.h:482
TAG_NONE
#define TAG_NONE
Definition: vm_core.h:197
STACK_DIR_UPPER
#define STACK_DIR_UPPER(a, b)
Definition: gc.h:99
rb_vm_stack_to_heap
void rb_vm_stack_to_heap(rb_execution_context_t *ec)
Definition: vm.c:786
argc
int argc
Definition: ruby.c:222
rb_fiber_init_mjit_cont
void rb_fiber_init_mjit_cont(struct rb_fiber_struct *fiber)
Definition: cont.c:1135
rb_context_struct::type
enum context_type type
Definition: cont.c:180
rb_fiber_yield
VALUE rb_fiber_yield(int argc, const VALUE *argv)
Definition: cont.c:2129
REALLOC_N
#define REALLOC_N(var, type, n)
Definition: ruby.h:1667
RB_NO_KEYWORDS
#define RB_NO_KEYWORDS
Definition: ruby.h:1977
err
int err
Definition: win32.c:135
RUBY_EVENT_FIBER_SWITCH
#define RUBY_EVENT_FIBER_SWITCH
Definition: ruby.h:2257
rb_adjust_argv_kw_splat
VALUE rb_adjust_argv_kw_splat(int *, const VALUE **, int *)
Definition: vm_eval.c:237
rb_fiber_struct::prev
struct rb_fiber_struct * prev
Definition: cont.c:229
rb_fiber_struct::cont
rb_context_t cont
Definition: cont.c:227
rb_data_type_struct
Definition: ruby.h:1148
CONTINUATION_CONTEXT
@ CONTINUATION_CONTEXT
Definition: cont.c:52
fiber_pool::size
size_t size
Definition: cont.c:161
st_data_t
unsigned long st_data_t
Definition: rb_mjit_min_header-2.7.2.h:5398
SET_MACHINE_STACK_END
#define SET_MACHINE_STACK_END(p)
Definition: gc.h:13
FIBER_CREATED_P
#define FIBER_CREATED_P(fiber)
Definition: cont.c:220
COMPILER_WARNING_IGNORED
#define COMPILER_WARNING_IGNORED(flag)
Definition: internal.h:2673
FIBER_RESUMED_P
#define FIBER_RESUMED_P(fiber)
Definition: cont.c:221
rb_gc_mark
void rb_gc_mark(VALUE ptr)
Definition: gc.c:5215
PASS_KW_SPLAT
#define PASS_KW_SPLAT
Definition: cont.c:1802
EC_PUSH_TAG
#define EC_PUSH_TAG(ec)
Definition: eval_intern.h:130
fiber_pool_allocation
Definition: cont.c:125
count
int count
Definition: encoding.c:57
st_memsize
size_t st_memsize(const st_table *tab)
Definition: st.c:719
Qtrue
#define Qtrue
Definition: ruby.h:468
errno
int errno
STACK_GROW_DIR_DETECTION
#define STACK_GROW_DIR_DETECTION
Definition: gc.h:98
rb_iseq_struct::body
struct rb_iseq_constant_body * body
Definition: vm_core.h:460
rb_fiber_yield_kw
VALUE rb_fiber_yield_kw(int argc, const VALUE *argv, int kw_splat)
Definition: cont.c:2123
rb_control_frame_struct::pc
const VALUE * pc
Definition: vm_core.h:761
stderr
#define stderr
Definition: rb_mjit_min_header-2.7.2.h:1522
rb_fiber_struct::BITFIELD
BITFIELD(enum fiber_status, status, 2)
fiber_pool_allocation::size
size_t size
Definition: cont.c:130
rb_thread_struct::self
VALUE self
Definition: vm_core.h:912
rb_yield
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1237
RUBY_VM_SET_INTERRUPT
#define RUBY_VM_SET_INTERRUPT(ec)
Definition: vm_core.h:1837
eval_intern.h
rb_vm_struct::default_params
struct rb_vm_struct::@10 default_params
rb_execution_context_struct::root_svar
VALUE root_svar
Definition: vm_core.h:866
rb_fiber_new
VALUE rb_fiber_new(rb_block_call_func_t func, VALUE obj)
Definition: cont.c:1795
Qnil
#define Qnil
Definition: ruby.h:469
rb_execution_context_mark
void rb_execution_context_mark(const rb_execution_context_t *ec)
Definition: vm.c:2500
FIBER_POOL_INITIAL_SIZE
#define FIBER_POOL_INITIAL_SIZE
Definition: cont.c:47
rb_ensure_entry
Definition: vm_core.h:828
rb_execution_context_struct::local_storage_recursive_hash_for_trace
VALUE local_storage_recursive_hash_for_trace
Definition: vm_core.h:862
st_lookup
int st_lookup(st_table *tab, st_data_t key, st_data_t *value)
Definition: st.c:1101
rb_eStandardError
VALUE rb_eStandardError
Definition: error.c:921
RUBY_VM_SET_TRAP_INTERRUPT
#define RUBY_VM_SET_TRAP_INTERRUPT(ec)
Definition: vm_core.h:1839
rb_undef_alloc_func
void rb_undef_alloc_func(VALUE)
Definition: vm_method.c:722
rb_thread_struct
Definition: vm_core.h:910
fprintf
int fprintf(FILE *__restrict, const char *__restrict,...) __attribute__((__format__(__printf__
rb_execution_context_struct::local_storage_recursive_hash
VALUE local_storage_recursive_hash
Definition: vm_core.h:861
RSTRING_LEN
#define RSTRING_LEN(str)
Definition: ruby.h:1005
st_free_table
void st_free_table(st_table *tab)
Definition: st.c:709
rb_proc_t
Definition: vm_core.h:1049
st_table
Definition: st.h:79
rb_context_struct::saved_ec
rb_execution_context_t saved_ec
Definition: cont.c:193
rb_ensure_list::next
struct rb_ensure_list * next
Definition: vm_core.h:835
RUBY_VM_PREVIOUS_CONTROL_FRAME
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)
Definition: vm_core.h:1384
rb_any_to_s
VALUE rb_any_to_s(VALUE)
Default implementation of #to_s.
Definition: object.c:527
fiber_pool_stack::current
void * current
Definition: cont.c:72
ruby_tag_type
ruby_tag_type
Definition: vm_core.h:184
rb_define_alloc_func
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
rb_execution_context_update
void rb_execution_context_update(const rb_execution_context_t *ec)
Definition: vm.c:2472
rb_execution_context_struct::stack_end
VALUE * stack_end
Definition: vm_core.h:888
fiber_pool::used
size_t used
Definition: cont.c:173
cont_saved_vm_stack::clen
size_t clen
Definition: cont.c:60
rb_threadptr_pending_interrupt_enque
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1757
Init_Cont
void Init_Cont(void)
Definition: cont.c:2422
rb_context_struct::stack_src
VALUE * stack_src
Definition: cont.c:190
rb_execution_context_struct::root_lep
const VALUE * root_lep
Definition: vm_core.h:865
rb_execution_context_struct
Definition: vm_core.h:843
n
const char size_t n
Definition: rb_mjit_min_header-2.7.2.h:5491
rb_block_proc
VALUE rb_block_proc(void)
Definition: proc.c:837