Merge master.kernel.org:/home/rmk/linux-2.6-arm
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Mon, 1 Oct 2007 20:08:22 +0000 (13:08 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Mon, 1 Oct 2007 20:08:22 +0000 (13:08 -0700)
* master.kernel.org:/home/rmk/linux-2.6-arm:
  [ARM] Resolve PCI section warnings

Documentation/devices.txt
drivers/char/vt_ioctl.c
drivers/media/video/ivtv/ivtv-fileops.c
fs/xfs/xfs_buf_item.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_trans_buf.c
kernel/futex.c
kernel/futex_compat.c
kernel/sys.c
mm/hugetlb.c

index 8de132a..6c46730 100644 (file)
@@ -94,6 +94,8 @@ Your cooperation is appreciated.
                  9 = /dev/urandom      Faster, less secure random number gen.
                 10 = /dev/aio          Asynchronous I/O notification interface
                 11 = /dev/kmsg         Writes to this come out as printk's
+                12 = /dev/oldmem       Used by crashdump kernels to access
+                                       the memory of the kernel that crashed.
 
   1 block      RAM disk
                  0 = /dev/ram0         First RAM disk
index 045e688..c799b7f 100644 (file)
@@ -770,6 +770,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                /*
                 * Switching-from response
                 */
+               acquire_console_sem();
                if (vc->vt_newvt >= 0) {
                        if (arg == 0)
                                /*
@@ -784,7 +785,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                                 * complete the switch.
                                 */
                                int newvt;
-                               acquire_console_sem();
                                newvt = vc->vt_newvt;
                                vc->vt_newvt = -1;
                                i = vc_allocate(newvt);
@@ -798,7 +798,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                                 * other console switches..
                                 */
                                complete_change_console(vc_cons[newvt].d);
-                               release_console_sem();
                        }
                }
 
@@ -810,9 +809,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                        /*
                         * If it's just an ACK, ignore it
                         */
-                       if (arg != VT_ACKACQ)
+                       if (arg != VT_ACKACQ) {
+                               release_console_sem();
                                return -EINVAL;
+                       }
                }
+               release_console_sem();
 
                return 0;
 
index 0285c4a..66ea3cb 100644 (file)
@@ -754,9 +754,11 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
                ivtv_yuv_close(itv);
        }
        if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV)
-           itv->output_mode = OUT_NONE;
+               itv->output_mode = OUT_NONE;
+       else if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_UDMA_YUV)
+               itv->output_mode = OUT_NONE;
        else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG)
-           itv->output_mode = OUT_NONE;
+               itv->output_mode = OUT_NONE;
 
        itv->speed = 0;
        clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags);
index fa25b7d..d7e1361 100644 (file)
@@ -52,11 +52,6 @@ typedef struct xfs_buf_log_format_t {
 #define        XFS_BLI_UDQUOT_BUF      0x4
 #define XFS_BLI_PDQUOT_BUF     0x8
 #define        XFS_BLI_GDQUOT_BUF      0x10
-/*
- * This flag indicates that the buffer contains newly allocated
- * inodes.
- */
-#define        XFS_BLI_INODE_NEW_BUF   0x20
 
 #define        XFS_BLI_CHUNK           128
 #define        XFS_BLI_SHIFT           7
index 7174991..8ae6e8e 100644 (file)
@@ -1874,7 +1874,6 @@ xlog_recover_do_inode_buffer(
 /*ARGSUSED*/
 STATIC void
 xlog_recover_do_reg_buffer(
-       xfs_mount_t             *mp,
        xlog_recover_item_t     *item,
        xfs_buf_t               *bp,
        xfs_buf_log_format_t    *buf_f)
@@ -1885,50 +1884,6 @@ xlog_recover_do_reg_buffer(
        unsigned int            *data_map = NULL;
        unsigned int            map_size = 0;
        int                     error;
-       int                     stale_buf = 1;
-
-       /*
-        * Scan through the on-disk inode buffer and attempt to
-        * determine if it has been written to since it was logged.
-        *
-        * - If any of the magic numbers are incorrect then the buffer is stale
-        * - If any of the modes are non-zero then the buffer is not stale
-        * - If all of the modes are zero and at least one of the generation
-        *   counts is non-zero then the buffer is stale
-        *
-        * If the end result is a stale buffer then the log buffer is replayed
-        * otherwise it is skipped.
-        *
-        * This heuristic is not perfect.  It can be improved by scanning the
-        * entire inode chunk for evidence that any of the inode clusters have
-        * been updated.  To fix this problem completely we will need a major
-        * architectural change to the logging system.
-        */
-       if (buf_f->blf_flags & XFS_BLI_INODE_NEW_BUF) {
-               xfs_dinode_t    *dip;
-               int             inodes_per_buf;
-               int             mode_count = 0;
-               int             gen_count = 0;
-
-               stale_buf = 0;
-               inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
-               for (i = 0; i < inodes_per_buf; i++) {
-                       dip = (xfs_dinode_t *)xfs_buf_offset(bp,
-                               i * mp->m_sb.sb_inodesize);
-                       if (be16_to_cpu(dip->di_core.di_magic) !=
-                                       XFS_DINODE_MAGIC) {
-                               stale_buf = 1;
-                               break;
-                       }
-                       if (dip->di_core.di_mode)
-                               mode_count++;
-                       if (dip->di_core.di_gen)
-                               gen_count++;
-               }
-
-               if (!mode_count && gen_count)
-                       stale_buf = 1;
-       }
 
        switch (buf_f->blf_type) {
        case XFS_LI_BUF:
@@ -1962,7 +1917,7 @@ xlog_recover_do_reg_buffer(
                                               -1, 0, XFS_QMOPT_DOWARN,
                                               "dquot_buf_recover");
                }
-               if (!error && stale_buf)
+               if (!error)
                        memcpy(xfs_buf_offset(bp,
                                (uint)bit << XFS_BLI_SHIFT),    /* dest */
                                item->ri_buf[i].i_addr,         /* source */
@@ -2134,7 +2089,7 @@ xlog_recover_do_dquot_buffer(
        if (log->l_quotaoffs_flag & type)
                return;
 
-       xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
+       xlog_recover_do_reg_buffer(item, bp, buf_f);
 }
 
 /*
@@ -2235,7 +2190,7 @@ xlog_recover_do_buffer_trans(
                  (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
                xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
        } else {
-               xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
+               xlog_recover_do_reg_buffer(item, bp, buf_f);
        }
        if (error)
                return XFS_ERROR(error);
index 95fff68..60b6b89 100644 (file)
@@ -966,7 +966,6 @@ xfs_trans_inode_alloc_buf(
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 
        bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
-       bip->bli_format.blf_flags |= XFS_BLI_INODE_NEW_BUF;
 }
 
 
index e8935b1..fcc94e7 100644 (file)
@@ -1943,9 +1943,10 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
 void exit_robust_list(struct task_struct *curr)
 {
        struct robust_list_head __user *head = curr->robust_list;
-       struct robust_list __user *entry, *pending;
-       unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
+       struct robust_list __user *entry, *next_entry, *pending;
+       unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
        unsigned long futex_offset;
+       int rc;
 
        /*
         * Fetch the list head (which was registered earlier, via
@@ -1965,11 +1966,13 @@ void exit_robust_list(struct task_struct *curr)
        if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
                return;
 
-       if (pending)
-               handle_futex_death((void __user *)pending + futex_offset,
-                                  curr, pip);
-
+       next_entry = NULL;      /* avoid warning with gcc */
        while (entry != &head->list) {
+               /*
+                * Fetch the next entry in the list before calling
+                * handle_futex_death:
+                */
+               rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
                /*
                 * A pending lock might already be on the list, so
                 * don't process it twice:
@@ -1978,11 +1981,10 @@ void exit_robust_list(struct task_struct *curr)
                        if (handle_futex_death((void __user *)entry + futex_offset,
                                                curr, pi))
                                return;
-               /*
-                * Fetch the next entry in the list:
-                */
-               if (fetch_robust_entry(&entry, &entry->next, &pi))
+               if (rc)
                        return;
+               entry = next_entry;
+               pi = next_pi;
                /*
                 * Avoid excessively long or circular lists:
                 */
@@ -1991,6 +1993,10 @@ void exit_robust_list(struct task_struct *curr)
 
                cond_resched();
        }
+
+       if (pending)
+               handle_futex_death((void __user *)pending + futex_offset,
+                                  curr, pip);
 }
 
 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
index 7e52eb0..2c2e295 100644 (file)
@@ -38,10 +38,11 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
 void compat_exit_robust_list(struct task_struct *curr)
 {
        struct compat_robust_list_head __user *head = curr->compat_robust_list;
-       struct robust_list __user *entry, *pending;
-       unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
-       compat_uptr_t uentry, upending;
+       struct robust_list __user *entry, *next_entry, *pending;
+       unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
+       compat_uptr_t uentry, next_uentry, upending;
        compat_long_t futex_offset;
+       int rc;
 
        /*
         * Fetch the list head (which was registered earlier, via
@@ -61,10 +62,15 @@ void compat_exit_robust_list(struct task_struct *curr)
        if (fetch_robust_entry(&upending, &pending,
                               &head->list_op_pending, &pip))
                return;
-       if (pending)
-               handle_futex_death((void __user *)pending + futex_offset, curr, pip);
 
+       next_entry = NULL;      /* avoid warning with gcc */
        while (entry != (struct robust_list __user *) &head->list) {
+               /*
+                * Fetch the next entry in the list before calling
+                * handle_futex_death:
+                */
+               rc = fetch_robust_entry(&next_uentry, &next_entry,
+                       (compat_uptr_t __user *)&entry->next, &next_pi);
                /*
                 * A pending lock might already be on the list, so
                 * dont process it twice:
@@ -74,12 +80,11 @@ void compat_exit_robust_list(struct task_struct *curr)
                                                curr, pi))
                                return;
 
-               /*
-                * Fetch the next entry in the list:
-                */
-               if (fetch_robust_entry(&uentry, &entry,
-                                      (compat_uptr_t __user *)&entry->next, &pi))
+               if (rc)
                        return;
+               uentry = next_uentry;
+               entry = next_entry;
+               pi = next_pi;
                /*
                 * Avoid excessively long or circular lists:
                 */
@@ -88,6 +93,9 @@ void compat_exit_robust_list(struct task_struct *curr)
 
                cond_resched();
        }
+       if (pending)
+               handle_futex_death((void __user *)pending + futex_offset,
+                                  curr, pip);
 }
 
 asmlinkage long
index 1b33b05..8ae2e63 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/getcpu.h>
 #include <linux/task_io_accounting_ops.h>
 #include <linux/seccomp.h>
+#include <linux/cpu.h>
 
 #include <linux/compat.h>
 #include <linux/syscalls.h>
@@ -878,6 +879,7 @@ void kernel_power_off(void)
        kernel_shutdown_prepare(SYSTEM_POWER_OFF);
        if (pm_power_off_prepare)
                pm_power_off_prepare();
+       disable_nonboot_cpus();
        sysdev_shutdown();
        printk(KERN_EMERG "Power down.\n");
        machine_power_off();
index 84c795e..eab8c42 100644 (file)
@@ -42,7 +42,7 @@ static void clear_huge_page(struct page *page, unsigned long addr)
        might_sleep();
        for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
                cond_resched();
-               clear_user_highpage(page + i, addr);
+               clear_user_highpage(page + i, addr + i * PAGE_SIZE);
        }
 }