XNU kernel is part of the Darwin operating system for use in macOS and iOS operating systems. XNU is an acronym for X is Not Unix. XNU is a hybrid kernel combining the Mach kernel developed at Carnegie Mellon University with components from FreeBSD and a C++ API for writing drivers called IOKit. XNU runs on x86_64 for both single processor and multi-processor configurations.
/* * vfork * * Description: vfork system call * * Parameters: void [no arguments] * * Retval: 0 (to child process) * !0 pid of child (to parent process) * -1 error (see "Returns:") * * Returns: EAGAIN Administrative limit reached * EINVAL vfork() called during vfork() * ENOMEM Failed to allocate new process * * Note: After a successful call to this function, the parent process * has its task, thread, and uthread lent to the child process, * and control is returned to the caller; if this function is * invoked as a system call, the return is to user space, and * is effectively running on the child process. * * Subsequent calls that operate on process state are permitted, * though discouraged, and will operate on the child process; any * operations on the task, thread, or uthread will result in * changes in the parent state, and, if inheritable, the child * state, when a task, thread, and uthread are realized for the * child process at execve() time, will also be effected. Given * this, it's recemmended that people use the posix_spawn() call * instead. * * BLOCK DIAGRAM OF VFORK * * Before: * * ,----------------. ,-------------. * | | task | | * | parent_thread | ------> | parent_task | * | | <.list. | | * `----------------' `-------------' * uthread | ^ bsd_info | ^ * v | vc_thread v | task * ,----------------. ,-------------. * | | | | * | parent_uthread | <.list. | parent_proc | <-- current_proc() * | | | | * `----------------' `-------------' * uu_proc | * v * NULL * * After: * * ,----------------. ,-------------. * | | task | | * ,----> | parent_thread | ------> | parent_task | * | | | <.list. | | * | `----------------' `-------------' * | uthread | ^ bsd_info | ^ * | v | vc_thread v | task * | ,----------------. ,-------------. * | | | | | * | | parent_uthread | <.list. | parent_proc | * | | | | | * | `----------------' `-------------' * | uu_proc | . list * | v v * | ,----------------. * `----- | | * p_vforkact | child_proc | <-- current_proc() * | | * `----------------' */
if (!waitq_irq_safe(waitq)) { /* JMM - add flag to waitq to avoid global lookup if no waiters */ eventmask = _CAST_TO_EVENT_MASK(waitq); safeq = waitq_get_safeq(waitq); if (*nthreads == 0) spl = splsched(); waitq_lock(safeq); } else { eventmask = _CAST_TO_EVENT_MASK(args->event); safeq = waitq; }
/* * If the safeq doesn't have an eventmask (not global) or the event * we're looking for IS set in its eventmask, then scan the threads * in that queue for ones that match the original <waitq,event> pair. */ if (!waitq_is_global(safeq) || (safeq->waitq_eventmask & eventmask) == eventmask) {
/* * Update the eventmask of global queues we just scanned: * - If we selected all the threads in the queue, we can clear its * eventmask. * * - If we didn't find enough threads to fill our needs, then we can * assume we looked at every thread in the queue and the mask we * computed is complete - so reset it. */ if (waitq_is_global(safeq)) { if (waitq_empty(safeq)) safeq->waitq_eventmask = 0; else if (max_threads < 0 || *nthreads < max_threads) safeq->waitq_eventmask = remaining_eventmask; } }
/* * Grab the first thread in the queue if no other thread was selected. * We can guarantee that no one has manipulated this thread because * it's waiting on the given waitq, and we have that waitq locked. */ if (*nthreads == 0 && first_thread != THREAD_NULL && args->threadq) { /* we know this is the first (and only) thread */ ++(*nthreads); *(args->spl) = (safeq != waitq) ? spl : splsched(); thread_lock(first_thread); thread_clear_waitq_state(first_thread); waitq_thread_remove(safeq, first_thread); enqueue_tail(args->threadq, &(first_thread->wait_links));
/* update the eventmask on [now] empty global queues */ if (waitq_is_global(safeq) && waitq_empty(safeq)) safeq->waitq_eventmask = 0; }
/* unlock the safe queue if we locked one above */ if (safeq != waitq) { waitq_unlock(safeq); if (*nthreads == 0) splx(spl); } if (max_threads > 0 && *nthreads >= max_threads) return;
/* * wait queues that are not in any sets * are the bottom of the recursion */ if (!waitq->waitq_set_id) return;
/* check to see if the set ID for this wait queue is valid */ struct waitq_link *link = wql_get_link(waitq->waitq_set_id); if (!link) { /* the waitq set to which this waitq belonged, has been invalidated */ waitq->waitq_set_id = 0; return; }
wql_put_link(link);
/* * If this waitq is a member of any wait queue sets, we need to look * for waiting thread(s) in any of those sets, and prepost all sets that * don't have active waiters. * * Note that we do a local walk of this waitq's links - we manually * recurse down wait queue set's with non-zero wqset_q.waitq_set_id */ (void)walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id, WQL_WQS, (void *)args, waitq_select_walk_cb); }
struct run_queue { int highq; /* highest runnable queue */ bitmap_t bitmap[BITMAP_LEN(NRQS)]; /* run queue bitmap array */ int count; /* # of threads total */ int urgency; /* level of preemption urgency */ queue_head_t queues[NRQS]; /* one for each priority */