[PATCH] sched: hardcode non-smp set_cpus_allowed
[cascardo/linux.git] / kernel / exit.c
index 5b0fb9f..2d39ccc 100644 (file)
@@ -368,17 +368,25 @@ EXPORT_SYMBOL(daemonize);
 static inline void close_files(struct files_struct * files)
 {
        int i, j;
+       struct fdtable *fdt;
 
        j = 0;
+
+       /*
+        * It is safe to dereference the fd table without RCU or
+        * ->file_lock because this is the last reference to the
+        * files structure.
+        */
+       fdt = files_fdtable(files);
        for (;;) {
                unsigned long set;
                i = j * __NFDBITS;
-               if (i >= files->max_fdset || i >= files->max_fds)
+               if (i >= fdt->max_fdset || i >= fdt->max_fds)
                        break;
-               set = files->open_fds->fds_bits[j++];
+               set = fdt->open_fds->fds_bits[j++];
                while (set) {
                        if (set & 1) {
-                               struct file * file = xchg(&files->fd[i], NULL);
+                               struct file * file = xchg(&fdt->fd[i], NULL);
                                if (file)
                                        filp_close(file, files);
                        }
@@ -403,18 +411,22 @@ struct files_struct *get_files_struct(struct task_struct *task)
 
 void fastcall put_files_struct(struct files_struct *files)
 {
+       struct fdtable *fdt;
+
        if (atomic_dec_and_test(&files->count)) {
                close_files(files);
                /*
                 * Free the fd and fdset arrays if we expanded them.
+                * If the fdtable was embedded, pass files for freeing
+                * at the end of the RCU grace period. Otherwise,
+                * you can free files immediately.
                 */
-               if (files->fd != &files->fd_array[0])
-                       free_fd_array(files->fd, files->max_fds);
-               if (files->max_fdset > __FD_SETSIZE) {
-                       free_fdset(files->open_fds, files->max_fdset);
-                       free_fdset(files->close_on_exec, files->max_fdset);
-               }
-               kmem_cache_free(files_cachep, files);
+               fdt = files_fdtable(files);
+               if (fdt == &files->fdtab)
+                       fdt->free_files = files;
+               else
+                       kmem_cache_free(files_cachep, files);
+               free_fdtable(fdt);
        }
 }
 
@@ -771,10 +783,6 @@ static void exit_notify(struct task_struct *tsk)
        /* If the process is dead, release it - nobody will wait for it */
        if (state == EXIT_DEAD)
                release_task(tsk);
-
-       /* PF_DEAD causes final put_task_struct after we schedule. */
-       preempt_disable();
-       tsk->flags |= PF_DEAD;
 }
 
 fastcall NORET_TYPE void do_exit(long code)
@@ -827,10 +835,14 @@ fastcall NORET_TYPE void do_exit(long code)
                                preempt_count());
 
        acct_update_integrals(tsk);
-       update_mem_hiwater(tsk);
+       if (tsk->mm) {
+               update_hiwater_rss(tsk->mm);
+               update_hiwater_vm(tsk->mm);
+       }
        group_dead = atomic_dec_and_test(&tsk->signal->live);
        if (group_dead) {
                del_timer_sync(&tsk->signal->real_timer);
+               exit_itimers(tsk->signal);
                acct_process(code);
        }
        exit_mm(tsk);
@@ -857,7 +869,11 @@ fastcall NORET_TYPE void do_exit(long code)
        tsk->mempolicy = NULL;
 #endif
 
-       BUG_ON(!(current->flags & PF_DEAD));
+       /* PF_DEAD causes final put_task_struct after we schedule. */
+       preempt_disable();
+       BUG_ON(tsk->flags & PF_DEAD);
+       tsk->flags |= PF_DEAD;
+
        schedule();
        BUG();
        /* Avoid "noreturn function does return".  */
@@ -1191,7 +1207,7 @@ static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap,
 
                exit_code = p->exit_code;
                if (unlikely(!exit_code) ||
-                   unlikely(p->state > TASK_STOPPED))
+                   unlikely(p->state & TASK_TRACED))
                        goto bail_ref;
                return wait_noreap_copyout(p, pid, uid,
                                           why, (exit_code << 8) | 0x7f,
@@ -1367,6 +1383,15 @@ repeat:
 
                        switch (p->state) {
                        case TASK_TRACED:
+                               /*
+                                * When we hit the race with PTRACE_ATTACH,
+                                * we will not report this child.  But the
+                                * race means it has not yet been moved to
+                                * our ptrace_children list, so we need to
+                                * set the flag here to avoid a spurious ECHILD
+                                * when the race happens with the only child.
+                                */
+                               flag = 1;
                                if (!my_ptrace_child(p))
                                        continue;
                                /*FALLTHROUGH*/