Linux-Process

This page is more about kernel call flow for various operations, like Fork, Schedule and other Process related things.

Each process has unique process specific data, such as ptable, task, mm struct, kernel stack
Each process shares physical memory, kernel code and kernel data.

Process VM
user stack
%esp ->
mmap region
shared
brk ->
heap
uninit bss
init data .data
test .text



Fork.c

unsigned long total_forks; /* Handle normal Linux uptimes. */
int nr_threads; /* The idle threads do not count.. */
int max_threads; /* tunable limit on nr_threads */

DEFINE_PER_CPU(unsigned long, process_counts) = 0;

fork_init(unsigned long mempages)
1. create a slab on which task_structs can be allocated
    struct kmem_cache *task_struct_cachep=kmem_cache_create("task_struct", sizeof(struct task_struct), ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);

2. max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
    Make max_threads at least 20.

3. Update init_task (init procecss) descriptor for rlimit array.
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
init_task.signal->rlim[RLIMIT_SIGPENDING] = init_task.signal->rlim[RLIMIT_NPROC];

long do_fork(unsigned long clone_flags, unsigned long stack_start,  struct pt_regs *regs,
   unsigned long stack_size,       int __user *parent_tidptr,       int __user *child_tidptr)
1. Do some parameter validation

2. When called from kernel_thread, don't do user tracing stuff

3. Copy process
struct task_struct *p = copy_process(clone_flags, stack_start, regs, stack_size,
child_tidptr, NULL, trace);
4. If flags had CLONE_VFORK, init_completion(&vfork) 

5a. If flags had CLONE_STOPPED, start with immediate SIGSTOP
sigaddset(&p->pending.signal, SIGSTOP);
set_tsk_thread_flag(p, TIF_SIGPENDING);
__set_task_state(p, TASK_STOPPED);

5b. Else, Wakeup New Thread

copy_process(): copies the registers, and all the appropriate parts of the process environment (as per the clone flags
1. Thread groups must share signals
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))  
    return ERR_PTR(-EINVAL);

2.Shared signal handlers imply shared VM. Clubbed with 1, thread groups also imply shared VM
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
            return ERR_PTR(-EINVAL);

3.Siblings of global init remain as zombies on exit since they are not reaped by parent(reaper) , prevent global and container-inits from creating siblings
if ((clone_flags & CLONE_PARENT) && current->signal->flags & SIGNAL_UNKILLABLE)
return ERR_PTR(-EINVAL);

4.  Duplicate task struct
struct task_struct *p=dup_task_struct(curent)

5. Init RT Mutex for this task
rt_mutex_init_task(p);
       spin_lock_init(&p->pi_lock);
       #ifdef CONFIG_RT_MUTEXES
    plist_head_init(&p->pi_waiters, &p->pi_lock);
    p->pi_blocked_on = NULL;
       #endif

6. Do other copies,
        copy_flags(clone_flags, p);
        rcu_copy_process(p);
        spin_lock_init(&p->alloc_lock);
        init_sigpending(&p->pending);
        Init task_struct time variables, like utime, prev_utime,  etc.
        posix_cpu_timers_init(p);
        sched_fork(p, clone_flags); //Perform scheduler related setup. Assign this task to a CPU. 

What happens when schedule() is called?
//need_sched_preemptible case:
Disable preemption, get cpu_id, find runqueue(rq), store rq->curr as prev.
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_sched_qs(cpu);
prev = rq->curr;
switch_count = &prev->nivcsw;

release_kernel_lock(prev);

No comments:

Post a Comment