Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 7 Feb 2014 20:19:06 +0000 (12:19 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 7 Feb 2014 20:19:06 +0000 (12:19 -0800)
Pull MIPS updates from Ralf Baechle:
 "hree minor patches.  All have sat in -next for a few days"

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
  MIPS: fpu.h: Fix build when CONFIG_BUG is not set
  MIPS: Wire up sched_setattr/sched_getattr syscalls
  MIPS: Alchemy: Fix DB1100 GPIO registration

119 files changed:
Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt [new file with mode: 0644]
Documentation/dvb/contributors.txt
Documentation/kernel-parameters.txt
arch/ia64/include/asm/unistd.h
arch/ia64/include/uapi/asm/unistd.h
arch/ia64/kernel/entry.S
arch/parisc/hpux/fs.c
arch/s390/crypto/aes_s390.c
arch/s390/crypto/des_s390.c
arch/x86/Kconfig.debug
arch/x86/include/asm/xen/page.h
arch/x86/mm/numa.c
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
drivers/acpi/battery.c
drivers/acpi/proc.c
drivers/acpi/scan.c
drivers/acpi/utils.c
drivers/acpi/video_detect.c
drivers/block/nvme-core.c
drivers/block/nvme-scsi.c
drivers/block/xen-blkback/blkback.c
drivers/cpufreq/intel_pstate.c
drivers/gpu/drm/ast/ast_fb.c
drivers/gpu/drm/cirrus/cirrus_fbdev.c
drivers/gpu/drm/mgag200/mgag200_fb.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/reg_srcs/r600
drivers/gpu/drm/ttm/ttm_object.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vmwgfx/svga3d_reg.h
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/hwmon/da9055-hwmon.c
drivers/hwmon/pmbus/pmbus_core.c
drivers/irqchip/Makefile
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-zevio.c [new file with mode: 0644]
drivers/media/dvb-frontends/cx24117.c
drivers/media/dvb-frontends/nxt200x.c
drivers/media/i2c/adv7842.c
drivers/media/i2c/s5k5baf.c
drivers/media/pci/bt8xx/bttv-cards.c
drivers/media/pci/bt8xx/bttv-gpio.c
drivers/media/pci/saa7134/saa7134-cards.c
drivers/media/platform/exynos4-is/fimc-core.c
drivers/media/platform/exynos4-is/fimc-lite.c
drivers/media/platform/s5p-jpeg/jpeg-core.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
drivers/media/usb/dvb-usb-v2/mxl111sf.c
drivers/media/usb/dvb-usb-v2/mxl111sf.h
drivers/media/usb/hdpvr/hdpvr-core.c
drivers/media/v4l2-core/v4l2-dv-timings.c
drivers/media/v4l2-core/videobuf-dma-contig.c
drivers/media/v4l2-core/videobuf-dma-sg.c
drivers/media/v4l2-core/videobuf-vmalloc.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/regulator/ab3100.c
drivers/regulator/core.c
drivers/regulator/s2mps11.c
drivers/staging/media/go7007/go7007-loader.c
drivers/xen/gntdev.c
drivers/xen/grant-table.c
fs/btrfs/check-integrity.c
fs/btrfs/disk-io.c
fs/btrfs/inode.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/buffer.c
fs/exec.c
fs/namei.c
fs/nfs/nfs3acl.c
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4session.c
fs/nfs/nfs4session.h
fs/ocfs2/alloc.c
fs/ocfs2/localalloc.c
fs/ocfs2/localalloc.h
fs/posix_acl.c
include/linux/binfmts.h
include/linux/fs.h
include/linux/nfs_xdr.h
include/linux/nvme.h
include/linux/page-flags.h
include/linux/sched.h
include/uapi/linux/nvme.h
include/xen/grant_table.h
init/main.c
kernel/auditsc.c
kernel/kmod.c
lib/Kconfig.debug
mm/page-writeback.c
mm/swap_state.c
mm/swapfile.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_realtek.c
sound/usb/Kconfig

diff --git a/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt
new file mode 100644 (file)
index 0000000..aee38e7
--- /dev/null
@@ -0,0 +1,18 @@
+TI-NSPIRE interrupt controller
+
+Required properties:
+- compatible: Compatible property value should be "lsi,zevio-intc".
+
+- reg: Physical base address of the controller and length of memory mapped
+       region.
+
+- interrupt-controller : Identifies the node as an interrupt controller
+
+Example:
+
+interrupt-controller {
+       compatible = "lsi,zevio-intc";
+       interrupt-controller;
+       reg = <0xDC000000 0x1000>;
+       #interrupt-cells = <1>;
+};
index 47c3009..731a009 100644 (file)
@@ -78,7 +78,7 @@ Peter Beutner <p.beutner@gmx.net>
 Wilson Michaels <wilsonmichaels@earthlink.net>
   for the lgdt330x frontend driver, and various bugfixes
 
-Michael Krufky <mkrufky@m1k.net>
+Michael Krufky <mkrufky@linuxtv.org>
   for maintaining v4l/dvb inter-tree dependencies
 
 Taylor Jacob <rtjacob@earthlink.net>
index 8f441da..7116fda 100644 (file)
@@ -1726,16 +1726,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        option description.
 
        memmap=nn[KMG]@ss[KMG]
-                       [KNL] Force usage of a specific region of memory
-                       Region of memory to be used, from ss to ss+nn.
+                       [KNL] Force usage of a specific region of memory.
+                       Region of memory to be used is from ss to ss+nn.
 
        memmap=nn[KMG]#ss[KMG]
                        [KNL,ACPI] Mark specific memory as ACPI data.
-                       Region of memory to be used, from ss to ss+nn.
+                       Region of memory to be marked is from ss to ss+nn.
 
        memmap=nn[KMG]$ss[KMG]
                        [KNL,ACPI] Mark specific memory as reserved.
-                       Region of memory to be used, from ss to ss+nn.
+                       Region of memory to be reserved is from ss to ss+nn.
                        Example: Exclude memory from 0x18690000-0x1869ffff
                                 memmap=64K$0x18690000
                                 or
index afd45e0..ae763d8 100644 (file)
@@ -11,7 +11,7 @@
 
 
 
-#define NR_syscalls                    312 /* length of syscall table */
+#define NR_syscalls                    314 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
index 34fd6fe..715e85f 100644 (file)
 #define __NR_process_vm_writev         1333
 #define __NR_accept4                   1334
 #define __NR_finit_module              1335
+#define __NR_sched_setattr             1336
+#define __NR_sched_getattr             1337
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
index ddea607..fa8d61a 100644 (file)
@@ -1773,6 +1773,8 @@ sys_call_table:
        data8 sys_process_vm_writev
        data8 sys_accept4
        data8 sys_finit_module                  // 1335
+       data8 sys_sched_setattr
+       data8 sys_sched_getattr
 
        .org sys_call_table + 8*NR_syscalls     // guard against failures to increase NR_syscalls
 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
index 88d0962..2bedafe 100644 (file)
 
 int hpux_execve(struct pt_regs *regs)
 {
-       int error;
-       struct filename *filename;
-
-       filename = getname((const char __user *) regs->gr[26]);
-       error = PTR_ERR(filename);
-       if (IS_ERR(filename))
-               goto out;
-
-       error = do_execve(filename->name,
+       return  do_execve(getname((const char __user *) regs->gr[26]),
                          (const char __user *const __user *) regs->gr[25],
                          (const char __user *const __user *) regs->gr[24]);
-
-       putname(filename);
-
-out:
-       return error;
 }
 
 struct hpux_dirent {
index b3feabd..cf3c008 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/spinlock.h>
 #include "crypt_s390.h"
 
 #define AES_KEYLEN_128         1
@@ -32,6 +33,7 @@
 #define AES_KEYLEN_256         4
 
 static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
 static char keylen_flag;
 
 struct s390_aes_ctx {
@@ -758,43 +760,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
        return aes_set_key(tfm, in_key, key_len);
 }
 
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+       unsigned int i, n;
+
+       /* only use complete blocks, max. PAGE_SIZE */
+       n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
+       for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+               memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
+                      AES_BLOCK_SIZE);
+               crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
+       }
+       return n;
+}
+
 static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
                         struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
 {
        int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
-       unsigned int i, n, nbytes;
-       u8 buf[AES_BLOCK_SIZE];
-       u8 *out, *in;
+       unsigned int n, nbytes;
+       u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
+       u8 *out, *in, *ctrptr = ctrbuf;
 
        if (!walk->nbytes)
                return ret;
 
-       memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
+       if (spin_trylock(&ctrblk_lock))
+               ctrptr = ctrblk;
+
+       memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
        while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
                out = walk->dst.virt.addr;
                in = walk->src.virt.addr;
                while (nbytes >= AES_BLOCK_SIZE) {
-                       /* only use complete blocks, max. PAGE_SIZE */
-                       n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
-                                                nbytes & ~(AES_BLOCK_SIZE - 1);
-                       for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
-                               memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
-                                      AES_BLOCK_SIZE);
-                               crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
-                       }
-                       ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
-                       if (ret < 0 || ret != n)
+                       if (ctrptr == ctrblk)
+                               n = __ctrblk_init(ctrptr, nbytes);
+                       else
+                               n = AES_BLOCK_SIZE;
+                       ret = crypt_s390_kmctr(func, sctx->key, out, in,
+                                              n, ctrptr);
+                       if (ret < 0 || ret != n) {
+                               if (ctrptr == ctrblk)
+                                       spin_unlock(&ctrblk_lock);
                                return -EIO;
+                       }
                        if (n > AES_BLOCK_SIZE)
-                               memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
+                               memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
                                       AES_BLOCK_SIZE);
-                       crypto_inc(ctrblk, AES_BLOCK_SIZE);
+                       crypto_inc(ctrptr, AES_BLOCK_SIZE);
                        out += n;
                        in += n;
                        nbytes -= n;
                }
                ret = blkcipher_walk_done(desc, walk, nbytes);
        }
+       if (ctrptr == ctrblk) {
+               if (nbytes)
+                       memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
+               else
+                       memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+               spin_unlock(&ctrblk_lock);
+       }
        /*
         * final block may be < AES_BLOCK_SIZE, copy only nbytes
         */
@@ -802,14 +828,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
                out = walk->dst.virt.addr;
                in = walk->src.virt.addr;
                ret = crypt_s390_kmctr(func, sctx->key, buf, in,
-                                      AES_BLOCK_SIZE, ctrblk);
+                                      AES_BLOCK_SIZE, ctrbuf);
                if (ret < 0 || ret != AES_BLOCK_SIZE)
                        return -EIO;
                memcpy(out, buf, nbytes);
-               crypto_inc(ctrblk, AES_BLOCK_SIZE);
+               crypto_inc(ctrbuf, AES_BLOCK_SIZE);
                ret = blkcipher_walk_done(desc, walk, 0);
+               memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
        }
-       memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
+
        return ret;
 }
 
index 200f2a1..0a5aac8 100644 (file)
@@ -25,6 +25,7 @@
 #define DES3_KEY_SIZE  (3 * DES_KEY_SIZE)
 
 static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
 
 struct s390_des_ctx {
        u8 iv[DES_BLOCK_SIZE];
@@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
 }
 
 static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
-                           u8 *iv, struct blkcipher_walk *walk)
+                           struct blkcipher_walk *walk)
 {
+       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
        int ret = blkcipher_walk_virt(desc, walk);
        unsigned int nbytes = walk->nbytes;
+       struct {
+               u8 iv[DES_BLOCK_SIZE];
+               u8 key[DES3_KEY_SIZE];
+       } param;
 
        if (!nbytes)
                goto out;
 
-       memcpy(iv, walk->iv, DES_BLOCK_SIZE);
+       memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
+       memcpy(param.key, ctx->key, DES3_KEY_SIZE);
        do {
                /* only use complete blocks */
                unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
                u8 *out = walk->dst.virt.addr;
                u8 *in = walk->src.virt.addr;
 
-               ret = crypt_s390_kmc(func, iv, out, in, n);
+               ret = crypt_s390_kmc(func, &param, out, in, n);
                if (ret < 0 || ret != n)
                        return -EIO;
 
                nbytes &= DES_BLOCK_SIZE - 1;
                ret = blkcipher_walk_done(desc, walk, nbytes);
        } while ((nbytes = walk->nbytes));
-       memcpy(walk->iv, iv, DES_BLOCK_SIZE);
+       memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
 
 out:
        return ret;
@@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
                           struct scatterlist *dst, struct scatterlist *src,
                           unsigned int nbytes)
 {
-       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
-       return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
+       return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
 }
 
 static int cbc_des_decrypt(struct blkcipher_desc *desc,
                           struct scatterlist *dst, struct scatterlist *src,
                           unsigned int nbytes)
 {
-       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
-       return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
+       return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
 }
 
 static struct crypto_alg cbc_des_alg = {
@@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
                            struct scatterlist *dst, struct scatterlist *src,
                            unsigned int nbytes)
 {
-       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
-       return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
+       return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
 }
 
 static int cbc_des3_decrypt(struct blkcipher_desc *desc,
                            struct scatterlist *dst, struct scatterlist *src,
                            unsigned int nbytes)
 {
-       struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
-       return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
+       return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
 }
 
 static struct crypto_alg cbc_des3_alg = {
@@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = {
        }
 };
 
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+       unsigned int i, n;
+
+       /* align to block size, max. PAGE_SIZE */
+       n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
+       for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+               memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
+               crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
+       }
+       return n;
+}
+
 static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
-                           struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
+                           struct s390_des_ctx *ctx,
+                           struct blkcipher_walk *walk)
 {
        int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
-       unsigned int i, n, nbytes;
-       u8 buf[DES_BLOCK_SIZE];
-       u8 *out, *in;
+       unsigned int n, nbytes;
+       u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
+       u8 *out, *in, *ctrptr = ctrbuf;
+
+       if (!walk->nbytes)
+               return ret;
 
-       memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
+       if (spin_trylock(&ctrblk_lock))
+               ctrptr = ctrblk;
+
+       memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
        while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
                out = walk->dst.virt.addr;
                in = walk->src.virt.addr;
                while (nbytes >= DES_BLOCK_SIZE) {
-                       /* align to block size, max. PAGE_SIZE */
-                       n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
-                               nbytes & ~(DES_BLOCK_SIZE - 1);
-                       for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
-                               memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
-                                      DES_BLOCK_SIZE);
-                               crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
-                       }
-                       ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
-                       if (ret < 0 || ret != n)
+                       if (ctrptr == ctrblk)
+                               n = __ctrblk_init(ctrptr, nbytes);
+                       else
+                               n = DES_BLOCK_SIZE;
+                       ret = crypt_s390_kmctr(func, ctx->key, out, in,
+                                              n, ctrptr);
+                       if (ret < 0 || ret != n) {
+                               if (ctrptr == ctrblk)
+                                       spin_unlock(&ctrblk_lock);
                                return -EIO;
+                       }
                        if (n > DES_BLOCK_SIZE)
-                               memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
+                               memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
                                       DES_BLOCK_SIZE);
-                       crypto_inc(ctrblk, DES_BLOCK_SIZE);
+                       crypto_inc(ctrptr, DES_BLOCK_SIZE);
                        out += n;
                        in += n;
                        nbytes -= n;
                }
                ret = blkcipher_walk_done(desc, walk, nbytes);
        }
-
+       if (ctrptr == ctrblk) {
+               if (nbytes)
+                       memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
+               else
+                       memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+               spin_unlock(&ctrblk_lock);
+       }
        /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
        if (nbytes) {
                out = walk->dst.virt.addr;
                in = walk->src.virt.addr;
                ret = crypt_s390_kmctr(func, ctx->key, buf, in,
-                                      DES_BLOCK_SIZE, ctrblk);
+                                      DES_BLOCK_SIZE, ctrbuf);
                if (ret < 0 || ret != DES_BLOCK_SIZE)
                        return -EIO;
                memcpy(out, buf, nbytes);
-               crypto_inc(ctrblk, DES_BLOCK_SIZE);
+               crypto_inc(ctrbuf, DES_BLOCK_SIZE);
                ret = blkcipher_walk_done(desc, walk, 0);
+               memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
        }
-       memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
        return ret;
 }
 
index 0f3621e..321a52c 100644 (file)
@@ -184,6 +184,7 @@ config HAVE_MMIOTRACE_SUPPORT
 config X86_DECODER_SELFTEST
        bool "x86 instruction decoder selftest"
        depends on DEBUG_KERNEL && KPROBES
+       depends on !COMPILE_TEST
        ---help---
         Perform x86 instruction decoder selftests at build time.
         This option is useful for checking the sanity of x86 instruction
index 787e1bb..3e276eb 100644 (file)
@@ -52,8 +52,7 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
 extern int m2p_add_override(unsigned long mfn, struct page *page,
                            struct gnttab_map_grant_ref *kmap_op);
 extern int m2p_remove_override(struct page *page,
-                              struct gnttab_map_grant_ref *kmap_op,
-                              unsigned long mfn);
+                               struct gnttab_map_grant_ref *kmap_op);
 extern struct page *m2p_find_override(unsigned long mfn);
 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
 
@@ -122,7 +121,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
                pfn = m2p_find_override_pfn(mfn, ~0);
        }
 
-       /*
+       /* 
         * pfn is ~0 if there are no entries in the m2p for mfn or if the
         * entry doesn't map back to the mfn and m2p_override doesn't have a
         * valid entry for it.
index 81b2750..27aa045 100644 (file)
@@ -493,14 +493,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
                struct numa_memblk *mb = &mi->blk[i];
                memblock_set_node(mb->start, mb->end - mb->start,
                                  &memblock.memory, mb->nid);
-
-               /*
-                * At this time, all memory regions reserved by memblock are
-                * used by the kernel. Set the nid in memblock.reserved will
-                * mark out all the nodes the kernel resides in.
-                */
-               memblock_set_node(mb->start, mb->end - mb->start,
-                                 &memblock.reserved, mb->nid);
        }
 
        /*
@@ -565,10 +557,21 @@ static void __init numa_init_array(void)
 static void __init numa_clear_kernel_node_hotplug(void)
 {
        int i, nid;
-       nodemask_t numa_kernel_nodes;
+       nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
        unsigned long start, end;
        struct memblock_type *type = &memblock.reserved;
 
+       /*
+        * At this time, all memory regions reserved by memblock are
+        * used by the kernel. Set the nid in memblock.reserved will
+        * mark out all the nodes the kernel resides in.
+        */
+       for (i = 0; i < numa_meminfo.nr_blks; i++) {
+               struct numa_memblk *mb = &numa_meminfo.blk[i];
+               memblock_set_node(mb->start, mb->end - mb->start,
+                                 &memblock.reserved, mb->nid);
+       }
+
        /* Mark all kernel nodes. */
        for (i = 0; i < type->cnt; i++)
                node_set(type->regions[i].nid, numa_kernel_nodes);
index a4d7b64..201d09a 100644 (file)
@@ -1473,6 +1473,18 @@ static void xen_pvh_set_cr_flags(int cpu)
         * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
         * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
        write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
+
+       if (!cpu)
+               return;
+       /*
+        * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
+        * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init.
+       */
+       if (cpu_has_pse)
+               set_in_cr4(X86_CR4_PSE);
+
+       if (cpu_has_pge)
+               set_in_cr4(X86_CR4_PGE);
 }
 
 /*
index 8009acb..696c694 100644 (file)
@@ -899,6 +899,13 @@ int m2p_add_override(unsigned long mfn, struct page *page,
                                        "m2p_add_override: pfn %lx not mapped", pfn))
                        return -EINVAL;
        }
+       WARN_ON(PagePrivate(page));
+       SetPagePrivate(page);
+       set_page_private(page, mfn);
+       page->index = pfn_to_mfn(pfn);
+
+       if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
+               return -ENOMEM;
 
        if (kmap_op != NULL) {
                if (!PageHighMem(page)) {
@@ -937,16 +944,19 @@ int m2p_add_override(unsigned long mfn, struct page *page,
 }
 EXPORT_SYMBOL_GPL(m2p_add_override);
 int m2p_remove_override(struct page *page,
-                       struct gnttab_map_grant_ref *kmap_op,
-                       unsigned long mfn)
+               struct gnttab_map_grant_ref *kmap_op)
 {
        unsigned long flags;
+       unsigned long mfn;
        unsigned long pfn;
        unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
 
        pfn = page_to_pfn(page);
+       mfn = get_phys_to_machine(pfn);
+       if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
+               return -EINVAL;
 
        if (!PageHighMem(page)) {
                address = (unsigned long)__va(pfn << PAGE_SHIFT);
@@ -960,7 +970,10 @@ int m2p_remove_override(struct page *page,
        spin_lock_irqsave(&m2p_override_lock, flags);
        list_del(&page->lru);
        spin_unlock_irqrestore(&m2p_override_lock, flags);
+       WARN_ON(!PagePrivate(page));
+       ClearPagePrivate(page);
 
+       set_phys_to_machine(pfn, page->index);
        if (kmap_op != NULL) {
                if (!PageHighMem(page)) {
                        struct multicall_space mcs;
index 470e754..018a428 100644 (file)
@@ -549,7 +549,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
 {
        unsigned long x;
        struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
-       if (sscanf(buf, "%ld\n", &x) == 1)
+       if (sscanf(buf, "%lu\n", &x) == 1)
                battery->alarm = x/1000;
        if (acpi_battery_present(battery))
                acpi_battery_set_alarm(battery);
index 50fe34f..75c28ea 100644 (file)
@@ -60,7 +60,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
                                seq_printf(seq, "%c%-8s  %s:%s\n",
                                        dev->wakeup.flags.run_wake ? '*' : ' ',
                                        (device_may_wakeup(&dev->dev) ||
-                                       (ldev && device_may_wakeup(ldev))) ?
+                                       device_may_wakeup(ldev)) ?
                                        "enabled" : "disabled",
                                        ldev->bus ? ldev->bus->name :
                                        "no-bus", dev_name(ldev));
index 7384158..57b053f 100644 (file)
@@ -484,7 +484,6 @@ static void acpi_device_hotplug(void *data, u32 src)
 static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
 {
        u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
-       struct acpi_scan_handler *handler = data;
        struct acpi_device *adev;
        acpi_status status;
 
@@ -500,7 +499,10 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
                break;
        case ACPI_NOTIFY_EJECT_REQUEST:
                acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
-               if (!handler->hotplug.enabled) {
+               if (!adev->handler)
+                       goto err_out;
+
+               if (!adev->handler->hotplug.enabled) {
                        acpi_handle_err(handle, "Eject disabled\n");
                        ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
                        goto err_out;
index 0347a37..85e3b61 100644 (file)
@@ -99,10 +99,6 @@ acpi_extract_package(union acpi_object *package,
 
                union acpi_object *element = &(package->package.elements[i]);
 
-               if (!element) {
-                       return AE_BAD_DATA;
-               }
-
                switch (element->type) {
 
                case ACPI_TYPE_INTEGER:
index f0447d3..a697b77 100644 (file)
@@ -170,6 +170,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
        },
        {
        .callback = video_detect_force_vendor,
+       .ident = "HP EliteBook Revolve 810",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Revolve 810 G1"),
+               },
+       },
+       {
+       .callback = video_detect_force_vendor,
        .ident = "Lenovo Yoga 13",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
index 1f14ac4..51824d1 100644 (file)
@@ -46,7 +46,6 @@
 #define NVME_Q_DEPTH 1024
 #define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
-#define NVME_MINORS 64
 #define ADMIN_TIMEOUT  (60 * HZ)
 
 static int nvme_major;
@@ -58,6 +57,17 @@ module_param(use_threaded_interrupts, int, 0);
 static DEFINE_SPINLOCK(dev_list_lock);
 static LIST_HEAD(dev_list);
 static struct task_struct *nvme_thread;
+static struct workqueue_struct *nvme_workq;
+
+static void nvme_reset_failed_dev(struct work_struct *ws);
+
+struct async_cmd_info {
+       struct kthread_work work;
+       struct kthread_worker *worker;
+       u32 result;
+       int status;
+       void *ctx;
+};
 
 /*
  * An NVM Express queue.  Each device has at least two (one for admin
@@ -66,6 +76,7 @@ static struct task_struct *nvme_thread;
 struct nvme_queue {
        struct device *q_dmadev;
        struct nvme_dev *dev;
+       char irqname[24];       /* nvme4294967295-65535\0 */
        spinlock_t q_lock;
        struct nvme_command *sq_cmds;
        volatile struct nvme_completion *cqes;
@@ -80,9 +91,11 @@ struct nvme_queue {
        u16 sq_head;
        u16 sq_tail;
        u16 cq_head;
+       u16 qid;
        u8 cq_phase;
        u8 cqe_seen;
        u8 q_suspended;
+       struct async_cmd_info cmdinfo;
        unsigned long cmdid_data[];
 };
 
@@ -97,6 +110,7 @@ static inline void _nvme_check_size(void)
        BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
        BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
@@ -111,6 +125,7 @@ struct nvme_cmd_info {
        nvme_completion_fn fn;
        void *ctx;
        unsigned long timeout;
+       int aborted;
 };
 
 static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
@@ -154,6 +169,7 @@ static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
        info[cmdid].fn = handler;
        info[cmdid].ctx = ctx;
        info[cmdid].timeout = jiffies + timeout;
+       info[cmdid].aborted = 0;
        return cmdid;
 }
 
@@ -172,6 +188,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
 #define CMD_CTX_COMPLETED      (0x310 + CMD_CTX_BASE)
 #define CMD_CTX_INVALID                (0x314 + CMD_CTX_BASE)
 #define CMD_CTX_FLUSH          (0x318 + CMD_CTX_BASE)
+#define CMD_CTX_ABORT          (0x31C + CMD_CTX_BASE)
 
 static void special_completion(struct nvme_dev *dev, void *ctx,
                                                struct nvme_completion *cqe)
@@ -180,6 +197,10 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
                return;
        if (ctx == CMD_CTX_FLUSH)
                return;
+       if (ctx == CMD_CTX_ABORT) {
+               ++dev->abort_limit;
+               return;
+       }
        if (ctx == CMD_CTX_COMPLETED) {
                dev_warn(&dev->pci_dev->dev,
                                "completed id %d twice on queue %d\n",
@@ -196,6 +217,15 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
        dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
 }
 
+static void async_completion(struct nvme_dev *dev, void *ctx,
+                                               struct nvme_completion *cqe)
+{
+       struct async_cmd_info *cmdinfo = ctx;
+       cmdinfo->result = le32_to_cpup(&cqe->result);
+       cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+       queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
+}
+
 /*
  * Called with local interrupts disabled and the q_lock held.  May not sleep.
  */
@@ -693,7 +723,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
        if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
                return 0;
 
-       writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+       writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
        nvmeq->cq_head = head;
        nvmeq->cq_phase = phase;
 
@@ -804,12 +834,34 @@ int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
        return cmdinfo.status;
 }
 
+static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
+                       struct nvme_command *cmd,
+                       struct async_cmd_info *cmdinfo, unsigned timeout)
+{
+       int cmdid;
+
+       cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout);
+       if (cmdid < 0)
+               return cmdid;
+       cmdinfo->status = -EINTR;
+       cmd->common.command_id = cmdid;
+       nvme_submit_cmd(nvmeq, cmd);
+       return 0;
+}
+
 int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
                                                                u32 *result)
 {
        return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
 }
 
+static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
+               struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
+{
+       return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
+                                                               ADMIN_TIMEOUT);
+}
+
 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
 {
        int status;
@@ -919,6 +971,56 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
        return nvme_submit_admin_cmd(dev, &c, result);
 }
 
+/**
+ * nvme_abort_cmd - Attempt aborting a command
+ * @cmdid: Command id of a timed out IO
+ * @queue: The queue with timed out IO
+ *
+ * Schedule controller reset if the command was already aborted once before and
+ * still hasn't been returned to the driver, or if this is the admin queue.
+ */
+static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
+{
+       int a_cmdid;
+       struct nvme_command cmd;
+       struct nvme_dev *dev = nvmeq->dev;
+       struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+       if (!nvmeq->qid || info[cmdid].aborted) {
+               if (work_busy(&dev->reset_work))
+                       return;
+               list_del_init(&dev->node);
+               dev_warn(&dev->pci_dev->dev,
+                       "I/O %d QID %d timeout, reset controller\n", cmdid,
+                                                               nvmeq->qid);
+               PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev);
+               queue_work(nvme_workq, &dev->reset_work);
+               return;
+       }
+
+       if (!dev->abort_limit)
+               return;
+
+       a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
+                                                               ADMIN_TIMEOUT);
+       if (a_cmdid < 0)
+               return;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.abort.opcode = nvme_admin_abort_cmd;
+       cmd.abort.cid = cmdid;
+       cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
+       cmd.abort.command_id = a_cmdid;
+
+       --dev->abort_limit;
+       info[cmdid].aborted = 1;
+       info[cmdid].timeout = jiffies + ADMIN_TIMEOUT;
+
+       dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
+                                                       nvmeq->qid);
+       nvme_submit_cmd(dev->queues[0], &cmd);
+}
+
 /**
  * nvme_cancel_ios - Cancel outstanding I/Os
  * @queue: The queue to cancel I/Os on
@@ -942,7 +1044,12 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
                        continue;
                if (info[cmdid].ctx == CMD_CTX_CANCELLED)
                        continue;
-               dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
+               if (timeout && nvmeq->dev->initialized) {
+                       nvme_abort_cmd(cmdid, nvmeq);
+                       continue;
+               }
+               dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
+                                                               nvmeq->qid);
                ctx = cancel_cmdid(nvmeq, cmdid, &fn);
                fn(nvmeq->dev, ctx, &cqe);
        }
@@ -964,26 +1071,31 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
        kfree(nvmeq);
 }
 
-static void nvme_free_queues(struct nvme_dev *dev)
+static void nvme_free_queues(struct nvme_dev *dev, int lowest)
 {
        int i;
 
-       for (i = dev->queue_count - 1; i >= 0; i--) {
+       for (i = dev->queue_count - 1; i >= lowest; i--) {
                nvme_free_queue(dev->queues[i]);
                dev->queue_count--;
                dev->queues[i] = NULL;
        }
 }
 
-static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+/**
+ * nvme_suspend_queue - put queue into suspended state
+ * @nvmeq - queue to suspend
+ *
+ * Returns 1 if already suspended, 0 otherwise.
+ */
+static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 {
-       struct nvme_queue *nvmeq = dev->queues[qid];
-       int vector = dev->entry[nvmeq->cq_vector].vector;
+       int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
 
        spin_lock_irq(&nvmeq->q_lock);
        if (nvmeq->q_suspended) {
                spin_unlock_irq(&nvmeq->q_lock);
-               return;
+               return 1;
        }
        nvmeq->q_suspended = 1;
        spin_unlock_irq(&nvmeq->q_lock);
@@ -991,18 +1103,35 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
        irq_set_affinity_hint(vector, NULL);
        free_irq(vector, nvmeq);
 
-       /* Don't tell the adapter to delete the admin queue */
-       if (qid) {
-               adapter_delete_sq(dev, qid);
-               adapter_delete_cq(dev, qid);
-       }
+       return 0;
+}
 
+static void nvme_clear_queue(struct nvme_queue *nvmeq)
+{
        spin_lock_irq(&nvmeq->q_lock);
        nvme_process_cq(nvmeq);
        nvme_cancel_ios(nvmeq, false);
        spin_unlock_irq(&nvmeq->q_lock);
 }
 
+static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+{
+       struct nvme_queue *nvmeq = dev->queues[qid];
+
+       if (!nvmeq)
+               return;
+       if (nvme_suspend_queue(nvmeq))
+               return;
+
+       /* Don't tell the adapter to delete the admin queue.
+        * Don't tell a removed adapter to delete IO queues. */
+       if (qid && readl(&dev->bar->csts) != -1) {
+               adapter_delete_sq(dev, qid);
+               adapter_delete_cq(dev, qid);
+       }
+       nvme_clear_queue(nvmeq);
+}
+
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
                                                        int depth, int vector)
 {
@@ -1025,15 +1154,18 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
 
        nvmeq->q_dmadev = dmadev;
        nvmeq->dev = dev;
+       snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
+                       dev->instance, qid);
        spin_lock_init(&nvmeq->q_lock);
        nvmeq->cq_head = 0;
        nvmeq->cq_phase = 1;
        init_waitqueue_head(&nvmeq->sq_full);
        init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
        bio_list_init(&nvmeq->sq_cong);
-       nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+       nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
        nvmeq->q_depth = depth;
        nvmeq->cq_vector = vector;
+       nvmeq->qid = qid;
        nvmeq->q_suspended = 1;
        dev->queue_count++;
 
@@ -1052,11 +1184,10 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
 {
        if (use_threaded_interrupts)
                return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
-                                       nvme_irq_check, nvme_irq,
-                                       IRQF_DISABLED | IRQF_SHARED,
+                                       nvme_irq_check, nvme_irq, IRQF_SHARED,
                                        name, nvmeq);
        return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
-                               IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+                               IRQF_SHARED, name, nvmeq);
 }
 
 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1067,7 +1198,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
        nvmeq->sq_tail = 0;
        nvmeq->cq_head = 0;
        nvmeq->cq_phase = 1;
-       nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+       nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
        memset(nvmeq->cmdid_data, 0, extra);
        memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
        nvme_cancel_ios(nvmeq, false);
@@ -1087,13 +1218,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
        if (result < 0)
                goto release_cq;
 
-       result = queue_request_irq(dev, nvmeq, "nvme");
+       result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
        if (result < 0)
                goto release_sq;
 
-       spin_lock(&nvmeq->q_lock);
+       spin_lock_irq(&nvmeq->q_lock);
        nvme_init_queue(nvmeq, qid);
-       spin_unlock(&nvmeq->q_lock);
+       spin_unlock_irq(&nvmeq->q_lock);
 
        return result;
 
@@ -1205,13 +1336,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        if (result)
                return result;
 
-       result = queue_request_irq(dev, nvmeq, "nvme admin");
+       result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
        if (result)
                return result;
 
-       spin_lock(&nvmeq->q_lock);
+       spin_lock_irq(&nvmeq->q_lock);
        nvme_init_queue(nvmeq, 0);
-       spin_unlock(&nvmeq->q_lock);
+       spin_unlock_irq(&nvmeq->q_lock);
        return result;
 }
 
@@ -1487,10 +1618,47 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
        }
 }
 
+#ifdef CONFIG_COMPAT
+static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
+                                       unsigned int cmd, unsigned long arg)
+{
+       struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+       switch (cmd) {
+       case SG_IO:
+               return nvme_sg_io32(ns, arg);
+       }
+       return nvme_ioctl(bdev, mode, cmd, arg);
+}
+#else
+#define nvme_compat_ioctl      NULL
+#endif
+
+static int nvme_open(struct block_device *bdev, fmode_t mode)
+{
+       struct nvme_ns *ns = bdev->bd_disk->private_data;
+       struct nvme_dev *dev = ns->dev;
+
+       kref_get(&dev->kref);
+       return 0;
+}
+
+static void nvme_free_dev(struct kref *kref);
+
+static void nvme_release(struct gendisk *disk, fmode_t mode)
+{
+       struct nvme_ns *ns = disk->private_data;
+       struct nvme_dev *dev = ns->dev;
+
+       kref_put(&dev->kref, nvme_free_dev);
+}
+
 static const struct block_device_operations nvme_fops = {
        .owner          = THIS_MODULE,
        .ioctl          = nvme_ioctl,
-       .compat_ioctl   = nvme_ioctl,
+       .compat_ioctl   = nvme_compat_ioctl,
+       .open           = nvme_open,
+       .release        = nvme_release,
 };
 
 static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
@@ -1514,13 +1682,25 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
 
 static int nvme_kthread(void *data)
 {
-       struct nvme_dev *dev;
+       struct nvme_dev *dev, *next;
 
        while (!kthread_should_stop()) {
                set_current_state(TASK_INTERRUPTIBLE);
                spin_lock(&dev_list_lock);
-               list_for_each_entry(dev, &dev_list, node) {
+               list_for_each_entry_safe(dev, next, &dev_list, node) {
                        int i;
+                       if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
+                                                       dev->initialized) {
+                               if (work_busy(&dev->reset_work))
+                                       continue;
+                               list_del_init(&dev->node);
+                               dev_warn(&dev->pci_dev->dev,
+                                       "Failed status, reset controller\n");
+                               PREPARE_WORK(&dev->reset_work,
+                                                       nvme_reset_failed_dev);
+                               queue_work(nvme_workq, &dev->reset_work);
+                               continue;
+                       }
                        for (i = 0; i < dev->queue_count; i++) {
                                struct nvme_queue *nvmeq = dev->queues[i];
                                if (!nvmeq)
@@ -1541,33 +1721,6 @@ static int nvme_kthread(void *data)
        return 0;
 }
 
-static DEFINE_IDA(nvme_index_ida);
-
-static int nvme_get_ns_idx(void)
-{
-       int index, error;
-
-       do {
-               if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
-                       return -1;
-
-               spin_lock(&dev_list_lock);
-               error = ida_get_new(&nvme_index_ida, &index);
-               spin_unlock(&dev_list_lock);
-       } while (error == -EAGAIN);
-
-       if (error)
-               index = -1;
-       return index;
-}
-
-static void nvme_put_ns_idx(int index)
-{
-       spin_lock(&dev_list_lock);
-       ida_remove(&nvme_index_ida, index);
-       spin_unlock(&dev_list_lock);
-}
-
 static void nvme_config_discard(struct nvme_ns *ns)
 {
        u32 logical_block_size = queue_logical_block_size(ns->queue);
@@ -1601,7 +1754,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
        ns->dev = dev;
        ns->queue->queuedata = ns;
 
-       disk = alloc_disk(NVME_MINORS);
+       disk = alloc_disk(0);
        if (!disk)
                goto out_free_queue;
        ns->ns_id = nsid;
@@ -1614,12 +1767,12 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
                blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
 
        disk->major = nvme_major;
-       disk->minors = NVME_MINORS;
-       disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+       disk->first_minor = 0;
        disk->fops = &nvme_fops;
        disk->private_data = ns;
        disk->queue = ns->queue;
        disk->driverfs_dev = &dev->pci_dev->dev;
+       disk->flags = GENHD_FL_EXT_DEVT;
        sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
        set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
 
@@ -1635,15 +1788,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
        return NULL;
 }
 
-static void nvme_ns_free(struct nvme_ns *ns)
-{
-       int index = ns->disk->first_minor / NVME_MINORS;
-       put_disk(ns->disk);
-       nvme_put_ns_idx(index);
-       blk_cleanup_queue(ns->queue);
-       kfree(ns);
-}
-
 static int set_queue_count(struct nvme_dev *dev, int count)
 {
        int status;
@@ -1659,11 +1803,12 @@ static int set_queue_count(struct nvme_dev *dev, int count)
 
 static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
 {
-       return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+       return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
 }
 
 static int nvme_setup_io_queues(struct nvme_dev *dev)
 {
+       struct nvme_queue *adminq = dev->queues[0];
        struct pci_dev *pdev = dev->pci_dev;
        int result, cpu, i, vecs, nr_io_queues, size, q_depth;
 
@@ -1690,7 +1835,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        }
 
        /* Deregister the admin queue's interrupt */
-       free_irq(dev->entry[0].vector, dev->queues[0]);
+       free_irq(dev->entry[0].vector, adminq);
 
        vecs = nr_io_queues;
        for (i = 0; i < vecs; i++)
@@ -1728,9 +1873,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
         */
        nr_io_queues = vecs;
 
-       result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+       result = queue_request_irq(dev, adminq, adminq->irqname);
        if (result) {
-               dev->queues[0]->q_suspended = 1;
+               adminq->q_suspended = 1;
                goto free_queues;
        }
 
@@ -1739,9 +1884,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
                struct nvme_queue *nvmeq = dev->queues[i];
 
-               spin_lock(&nvmeq->q_lock);
+               spin_lock_irq(&nvmeq->q_lock);
                nvme_cancel_ios(nvmeq, false);
-               spin_unlock(&nvmeq->q_lock);
+               spin_unlock_irq(&nvmeq->q_lock);
 
                nvme_free_queue(nvmeq);
                dev->queue_count--;
@@ -1782,7 +1927,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        return 0;
 
  free_queues:
-       nvme_free_queues(dev);
+       nvme_free_queues(dev, 1);
        return result;
 }
 
@@ -1794,6 +1939,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
  */
 static int nvme_dev_add(struct nvme_dev *dev)
 {
+       struct pci_dev *pdev = dev->pci_dev;
        int res;
        unsigned nn, i;
        struct nvme_ns *ns;
@@ -1803,8 +1949,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
        dma_addr_t dma_addr;
        int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
 
-       mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
-                                                               GFP_KERNEL);
+       mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
        if (!mem)
                return -ENOMEM;
 
@@ -1817,13 +1962,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
        ctrl = mem;
        nn = le32_to_cpup(&ctrl->nn);
        dev->oncs = le16_to_cpup(&ctrl->oncs);
+       dev->abort_limit = ctrl->acl + 1;
        memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
        memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
        memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
        if (ctrl->mdts)
                dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
-       if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
-                       (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
+       if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+                       (pdev->device == 0x0953) && ctrl->vs[3])
                dev->stripe_size = 1 << (ctrl->vs[3] + shift);
 
        id_ns = mem;
@@ -1871,16 +2017,21 @@ static int nvme_dev_map(struct nvme_dev *dev)
            dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
                goto disable;
 
-       pci_set_drvdata(pdev, dev);
        dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
        if (!dev->bar)
                goto disable;
-
-       dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap));
+       if (readl(&dev->bar->csts) == -1) {
+               result = -ENODEV;
+               goto unmap;
+       }
+       dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
        dev->dbs = ((void __iomem *)dev->bar) + 4096;
 
        return 0;
 
+ unmap:
+       iounmap(dev->bar);
+       dev->bar = NULL;
  disable:
        pci_release_regions(pdev);
  disable_pci:
@@ -1898,37 +2049,183 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
        if (dev->bar) {
                iounmap(dev->bar);
                dev->bar = NULL;
+               pci_release_regions(dev->pci_dev);
        }
 
-       pci_release_regions(dev->pci_dev);
        if (pci_is_enabled(dev->pci_dev))
                pci_disable_device(dev->pci_dev);
 }
 
+struct nvme_delq_ctx {
+       struct task_struct *waiter;
+       struct kthread_worker *worker;
+       atomic_t refcount;
+};
+
+static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
+{
+       dq->waiter = current;
+       mb();
+
+       for (;;) {
+               set_current_state(TASK_KILLABLE);
+               if (!atomic_read(&dq->refcount))
+                       break;
+               if (!schedule_timeout(ADMIN_TIMEOUT) ||
+                                       fatal_signal_pending(current)) {
+                       set_current_state(TASK_RUNNING);
+
+                       nvme_disable_ctrl(dev, readq(&dev->bar->cap));
+                       nvme_disable_queue(dev, 0);
+
+                       send_sig(SIGKILL, dq->worker->task, 1);
+                       flush_kthread_worker(dq->worker);
+                       return;
+               }
+       }
+       set_current_state(TASK_RUNNING);
+}
+
+static void nvme_put_dq(struct nvme_delq_ctx *dq)
+{
+       atomic_dec(&dq->refcount);
+       if (dq->waiter)
+               wake_up_process(dq->waiter);
+}
+
+static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
+{
+       atomic_inc(&dq->refcount);
+       return dq;
+}
+
+static void nvme_del_queue_end(struct nvme_queue *nvmeq)
+{
+       struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
+
+       nvme_clear_queue(nvmeq);
+       nvme_put_dq(dq);
+}
+
+static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
+                                               kthread_work_func_t fn)
+{
+       struct nvme_command c;
+
+       memset(&c, 0, sizeof(c));
+       c.delete_queue.opcode = opcode;
+       c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
+
+       init_kthread_work(&nvmeq->cmdinfo.work, fn);
+       return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo);
+}
+
+static void nvme_del_cq_work_handler(struct kthread_work *work)
+{
+       struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+                                                       cmdinfo.work);
+       nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_cq(struct nvme_queue *nvmeq)
+{
+       return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
+                                               nvme_del_cq_work_handler);
+}
+
+static void nvme_del_sq_work_handler(struct kthread_work *work)
+{
+       struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+                                                       cmdinfo.work);
+       int status = nvmeq->cmdinfo.status;
+
+       if (!status)
+               status = nvme_delete_cq(nvmeq);
+       if (status)
+               nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_sq(struct nvme_queue *nvmeq)
+{
+       return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
+                                               nvme_del_sq_work_handler);
+}
+
+static void nvme_del_queue_start(struct kthread_work *work)
+{
+       struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+                                                       cmdinfo.work);
+       allow_signal(SIGKILL);
+       if (nvme_delete_sq(nvmeq))
+               nvme_del_queue_end(nvmeq);
+}
+
+static void nvme_disable_io_queues(struct nvme_dev *dev)
+{
+       int i;
+       DEFINE_KTHREAD_WORKER_ONSTACK(worker);
+       struct nvme_delq_ctx dq;
+       struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
+                                       &worker, "nvme%d", dev->instance);
+
+       if (IS_ERR(kworker_task)) {
+               dev_err(&dev->pci_dev->dev,
+                       "Failed to create queue del task\n");
+               for (i = dev->queue_count - 1; i > 0; i--)
+                       nvme_disable_queue(dev, i);
+               return;
+       }
+
+       dq.waiter = NULL;
+       atomic_set(&dq.refcount, 0);
+       dq.worker = &worker;
+       for (i = dev->queue_count - 1; i > 0; i--) {
+               struct nvme_queue *nvmeq = dev->queues[i];
+
+               if (nvme_suspend_queue(nvmeq))
+                       continue;
+               nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
+               nvmeq->cmdinfo.worker = dq.worker;
+               init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
+               queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
+       }
+       nvme_wait_dq(&dq, dev);
+       kthread_stop(kworker_task);
+}
+
 static void nvme_dev_shutdown(struct nvme_dev *dev)
 {
        int i;
 
-       for (i = dev->queue_count - 1; i >= 0; i--)
-               nvme_disable_queue(dev, i);
+       dev->initialized = 0;
 
        spin_lock(&dev_list_lock);
        list_del_init(&dev->node);
        spin_unlock(&dev_list_lock);
 
-       if (dev->bar)
+       if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
+               for (i = dev->queue_count - 1; i >= 0; i--) {
+                       struct nvme_queue *nvmeq = dev->queues[i];
+                       nvme_suspend_queue(nvmeq);
+                       nvme_clear_queue(nvmeq);
+               }
+       } else {
+               nvme_disable_io_queues(dev);
                nvme_shutdown_ctrl(dev);
+               nvme_disable_queue(dev, 0);
+       }
        nvme_dev_unmap(dev);
 }
 
 static void nvme_dev_remove(struct nvme_dev *dev)
 {
-       struct nvme_ns *ns, *next;
+       struct nvme_ns *ns;
 
-       list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
-               list_del(&ns->list);
-               del_gendisk(ns->disk);
-               nvme_ns_free(ns);
+       list_for_each_entry(ns, &dev->namespaces, list) {
+               if (ns->disk->flags & GENHD_FL_UP)
+                       del_gendisk(ns->disk);
+               if (!blk_queue_dying(ns->queue))
+                       blk_cleanup_queue(ns->queue);
        }
 }
 
@@ -1985,14 +2282,22 @@ static void nvme_release_instance(struct nvme_dev *dev)
        spin_unlock(&dev_list_lock);
 }
 
+static void nvme_free_namespaces(struct nvme_dev *dev)
+{
+       struct nvme_ns *ns, *next;
+
+       list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+               list_del(&ns->list);
+               put_disk(ns->disk);
+               kfree(ns);
+       }
+}
+
 static void nvme_free_dev(struct kref *kref)
 {
        struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
-       nvme_dev_remove(dev);
-       nvme_dev_shutdown(dev);
-       nvme_free_queues(dev);
-       nvme_release_instance(dev);
-       nvme_release_prp_pools(dev);
+
+       nvme_free_namespaces(dev);
        kfree(dev->queues);
        kfree(dev->entry);
        kfree(dev);
@@ -2056,6 +2361,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
        return result;
 
  disable:
+       nvme_disable_queue(dev, 0);
        spin_lock(&dev_list_lock);
        list_del_init(&dev->node);
        spin_unlock(&dev_list_lock);
@@ -2064,6 +2370,71 @@ static int nvme_dev_start(struct nvme_dev *dev)
        return result;
 }
 
+static int nvme_remove_dead_ctrl(void *arg)
+{
+       struct nvme_dev *dev = (struct nvme_dev *)arg;
+       struct pci_dev *pdev = dev->pci_dev;
+
+       if (pci_get_drvdata(pdev))
+               pci_stop_and_remove_bus_device(pdev);
+       kref_put(&dev->kref, nvme_free_dev);
+       return 0;
+}
+
+static void nvme_remove_disks(struct work_struct *ws)
+{
+       int i;
+       struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+
+       nvme_dev_remove(dev);
+       spin_lock(&dev_list_lock);
+       for (i = dev->queue_count - 1; i > 0; i--) {
+               BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
+               nvme_free_queue(dev->queues[i]);
+               dev->queue_count--;
+               dev->queues[i] = NULL;
+       }
+       spin_unlock(&dev_list_lock);
+}
+
+static int nvme_dev_resume(struct nvme_dev *dev)
+{
+       int ret;
+
+       ret = nvme_dev_start(dev);
+       if (ret && ret != -EBUSY)
+               return ret;
+       if (ret == -EBUSY) {
+               spin_lock(&dev_list_lock);
+               PREPARE_WORK(&dev->reset_work, nvme_remove_disks);
+               queue_work(nvme_workq, &dev->reset_work);
+               spin_unlock(&dev_list_lock);
+       }
+       dev->initialized = 1;
+       return 0;
+}
+
+static void nvme_dev_reset(struct nvme_dev *dev)
+{
+       nvme_dev_shutdown(dev);
+       if (nvme_dev_resume(dev)) {
+               dev_err(&dev->pci_dev->dev, "Device failed to resume\n");
+               kref_get(&dev->kref);
+               if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
+                                                       dev->instance))) {
+                       dev_err(&dev->pci_dev->dev,
+                               "Failed to start controller remove task\n");
+                       kref_put(&dev->kref, nvme_free_dev);
+               }
+       }
+}
+
+static void nvme_reset_failed_dev(struct work_struct *ws)
+{
+       struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+       nvme_dev_reset(dev);
+}
+
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int result = -ENOMEM;
@@ -2082,8 +2453,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto free;
 
        INIT_LIST_HEAD(&dev->namespaces);
+       INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
        dev->pci_dev = pdev;
-
+       pci_set_drvdata(pdev, dev);
        result = nvme_set_instance(dev);
        if (result)
                goto free;
@@ -2099,6 +2471,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto release_pools;
        }
 
+       kref_init(&dev->kref);
        result = nvme_dev_add(dev);
        if (result)
                goto shutdown;
@@ -2113,15 +2486,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto remove;
 
-       kref_init(&dev->kref);
+       dev->initialized = 1;
        return 0;
 
  remove:
        nvme_dev_remove(dev);
+       nvme_free_namespaces(dev);
  shutdown:
        nvme_dev_shutdown(dev);
  release_pools:
-       nvme_free_queues(dev);
+       nvme_free_queues(dev, 0);
        nvme_release_prp_pools(dev);
  release:
        nvme_release_instance(dev);
@@ -2132,10 +2506,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return result;
 }
 
+static void nvme_shutdown(struct pci_dev *pdev)
+{
+       struct nvme_dev *dev = pci_get_drvdata(pdev);
+       nvme_dev_shutdown(dev);
+}
+
 static void nvme_remove(struct pci_dev *pdev)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+       spin_lock(&dev_list_lock);
+       list_del_init(&dev->node);
+       spin_unlock(&dev_list_lock);
+
+       pci_set_drvdata(pdev, NULL);
+       flush_work(&dev->reset_work);
        misc_deregister(&dev->miscdev);
+       nvme_dev_remove(dev);
+       nvme_dev_shutdown(dev);
+       nvme_free_queues(dev, 0);
+       nvme_release_instance(dev);
+       nvme_release_prp_pools(dev);
        kref_put(&dev->kref, nvme_free_dev);
 }
 
@@ -2159,13 +2551,12 @@ static int nvme_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
-       int ret;
 
-       ret = nvme_dev_start(ndev);
-       /* XXX: should remove gendisks if resume fails */
-       if (ret)
-               nvme_free_queues(ndev);
-       return ret;
+       if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
+               PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev);
+               queue_work(nvme_workq, &ndev->reset_work);
+       }
+       return 0;
 }
 
 static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
@@ -2192,6 +2583,7 @@ static struct pci_driver nvme_driver = {
        .id_table       = nvme_id_table,
        .probe          = nvme_probe,
        .remove         = nvme_remove,
+       .shutdown       = nvme_shutdown,
        .driver         = {
                .pm     = &nvme_dev_pm_ops,
        },
@@ -2206,9 +2598,14 @@ static int __init nvme_init(void)
        if (IS_ERR(nvme_thread))
                return PTR_ERR(nvme_thread);
 
+       result = -ENOMEM;
+       nvme_workq = create_singlethread_workqueue("nvme");
+       if (!nvme_workq)
+               goto kill_kthread;
+
        result = register_blkdev(nvme_major, "nvme");
        if (result < 0)
-               goto kill_kthread;
+               goto kill_workq;
        else if (result > 0)
                nvme_major = result;
 
@@ -2219,6 +2616,8 @@ static int __init nvme_init(void)
 
  unregister_blkdev:
        unregister_blkdev(nvme_major, "nvme");
+ kill_workq:
+       destroy_workqueue(nvme_workq);
  kill_kthread:
        kthread_stop(nvme_thread);
        return result;
@@ -2228,6 +2627,7 @@ static void __exit nvme_exit(void)
 {
        pci_unregister_driver(&nvme_driver);
        unregister_blkdev(nvme_major, "nvme");
+       destroy_workqueue(nvme_workq);
        kthread_stop(nvme_thread);
 }
 
index 4a4ff4e..4a0ceb6 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/bio.h>
 #include <linux/bitops.h>
 #include <linux/blkdev.h>
+#include <linux/compat.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
@@ -3038,6 +3039,152 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
        return retcode;
 }
 
+#ifdef CONFIG_COMPAT
+typedef struct sg_io_hdr32 {
+       compat_int_t interface_id;      /* [i] 'S' for SCSI generic (required) */
+       compat_int_t dxfer_direction;   /* [i] data transfer direction  */
+       unsigned char cmd_len;          /* [i] SCSI command length ( <= 16 bytes) */
+       unsigned char mx_sb_len;                /* [i] max length to write to sbp */
+       unsigned short iovec_count;     /* [i] 0 implies no scatter gather */
+       compat_uint_t dxfer_len;                /* [i] byte count of data transfer */
+       compat_uint_t dxferp;           /* [i], [*io] points to data transfer memory
+                                             or scatter gather list */
+       compat_uptr_t cmdp;             /* [i], [*i] points to command to perform */
+       compat_uptr_t sbp;              /* [i], [*o] points to sense_buffer memory */
+       compat_uint_t timeout;          /* [i] MAX_UINT->no timeout (unit: millisec) */
+       compat_uint_t flags;            /* [i] 0 -> default, see SG_FLAG... */
+       compat_int_t pack_id;           /* [i->o] unused internally (normally) */
+       compat_uptr_t usr_ptr;          /* [i->o] unused internally */
+       unsigned char status;           /* [o] scsi status */
+       unsigned char masked_status;    /* [o] shifted, masked scsi status */
+       unsigned char msg_status;               /* [o] messaging level data (optional) */
+       unsigned char sb_len_wr;                /* [o] byte count actually written to sbp */
+       unsigned short host_status;     /* [o] errors from host adapter */
+       unsigned short driver_status;   /* [o] errors from software driver */
+       compat_int_t resid;             /* [o] dxfer_len - actual_transferred */
+       compat_uint_t duration;         /* [o] time taken by cmd (unit: millisec) */
+       compat_uint_t info;             /* [o] auxiliary information */
+} sg_io_hdr32_t;  /* 64 bytes long (on sparc32) */
+
+typedef struct sg_iovec32 {
+       compat_uint_t iov_base;
+       compat_uint_t iov_len;
+} sg_iovec32_t;
+
+static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
+{
+       sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
+       sg_iovec32_t __user *iov32 = dxferp;
+       int i;
+
+       for (i = 0; i < iovec_count; i++) {
+               u32 base, len;
+
+               if (get_user(base, &iov32[i].iov_base) ||
+                   get_user(len, &iov32[i].iov_len) ||
+                   put_user(compat_ptr(base), &iov[i].iov_base) ||
+                   put_user(len, &iov[i].iov_len))
+                       return -EFAULT;
+       }
+
+       if (put_user(iov, &sgio->dxferp))
+               return -EFAULT;
+       return 0;
+}
+
+int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg)
+{
+       sg_io_hdr32_t __user *sgio32 = (sg_io_hdr32_t __user *)arg;
+       sg_io_hdr_t __user *sgio;
+       u16 iovec_count;
+       u32 data;
+       void __user *dxferp;
+       int err;
+       int interface_id;
+
+       if (get_user(interface_id, &sgio32->interface_id))
+               return -EFAULT;
+       if (interface_id != 'S')
+               return -EINVAL;
+
+       if (get_user(iovec_count, &sgio32->iovec_count))
+               return -EFAULT;
+
+       {
+               void __user *top = compat_alloc_user_space(0);
+               void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
+                                      (iovec_count * sizeof(sg_iovec_t)));
+               if (new > top)
+                       return -EINVAL;
+
+               sgio = new;
+       }
+
+       /* Ok, now construct.  */
+       if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
+                        (2 * sizeof(int)) +
+                        (2 * sizeof(unsigned char)) +
+                        (1 * sizeof(unsigned short)) +
+                        (1 * sizeof(unsigned int))))
+               return -EFAULT;
+
+       if (get_user(data, &sgio32->dxferp))
+               return -EFAULT;
+       dxferp = compat_ptr(data);
+       if (iovec_count) {
+               if (sg_build_iovec(sgio, dxferp, iovec_count))
+                       return -EFAULT;
+       } else {
+               if (put_user(dxferp, &sgio->dxferp))
+                       return -EFAULT;
+       }
+
+       {
+               unsigned char __user *cmdp;
+               unsigned char __user *sbp;
+
+               if (get_user(data, &sgio32->cmdp))
+                       return -EFAULT;
+               cmdp = compat_ptr(data);
+
+               if (get_user(data, &sgio32->sbp))
+                       return -EFAULT;
+               sbp = compat_ptr(data);
+
+               if (put_user(cmdp, &sgio->cmdp) ||
+                   put_user(sbp, &sgio->sbp))
+                       return -EFAULT;
+       }
+
+       if (copy_in_user(&sgio->timeout, &sgio32->timeout,
+                        3 * sizeof(int)))
+               return -EFAULT;
+
+       if (get_user(data, &sgio32->usr_ptr))
+               return -EFAULT;
+       if (put_user(compat_ptr(data), &sgio->usr_ptr))
+               return -EFAULT;
+
+       err = nvme_sg_io(ns, sgio);
+       if (err >= 0) {
+               void __user *datap;
+
+               if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
+                                sizeof(int)) ||
+                   get_user(datap, &sgio->usr_ptr) ||
+                   put_user((u32)(unsigned long)datap,
+                            &sgio32->usr_ptr) ||
+                   copy_in_user(&sgio32->status, &sgio->status,
+                                (4 * sizeof(unsigned char)) +
+                                (2 * sizeof(unsigned short)) +
+                                (3 * sizeof(int))))
+                       err = -EFAULT;
+       }
+
+       return err;
+}
+#endif
+
 int nvme_sg_get_version_num(int __user *ip)
 {
        return put_user(sg_version_num, ip);
index da18046..4b97b86 100644 (file)
@@ -285,7 +285,8 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
 
                if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
                        !rb_next(&persistent_gnt->node)) {
-                       ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
+                       ret = gnttab_unmap_refs(unmap, NULL, pages,
+                               segs_to_unmap);
                        BUG_ON(ret);
                        put_free_pages(blkif, pages, segs_to_unmap);
                        segs_to_unmap = 0;
@@ -320,7 +321,8 @@ static void unmap_purged_grants(struct work_struct *work)
                pages[segs_to_unmap] = persistent_gnt->page;
 
                if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
-                       ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
+                       ret = gnttab_unmap_refs(unmap, NULL, pages,
+                               segs_to_unmap);
                        BUG_ON(ret);
                        put_free_pages(blkif, pages, segs_to_unmap);
                        segs_to_unmap = 0;
@@ -328,7 +330,7 @@ static void unmap_purged_grants(struct work_struct *work)
                kfree(persistent_gnt);
        }
        if (segs_to_unmap > 0) {
-               ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
+               ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
                BUG_ON(ret);
                put_free_pages(blkif, pages, segs_to_unmap);
        }
@@ -668,14 +670,15 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
                                    GNTMAP_host_map, pages[i]->handle);
                pages[i]->handle = BLKBACK_INVALID_HANDLE;
                if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
-                       ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
+                       ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
+                                               invcount);
                        BUG_ON(ret);
                        put_free_pages(blkif, unmap_pages, invcount);
                        invcount = 0;
                }
        }
        if (invcount) {
-               ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
+               ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
                BUG_ON(ret);
                put_free_pages(blkif, unmap_pages, invcount);
        }
@@ -737,7 +740,7 @@ again:
        }
 
        if (segs_to_map) {
-               ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map);
+               ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
                BUG_ON(ret);
        }
 
index 7e257b2..79606f4 100644 (file)
@@ -57,6 +57,7 @@ struct sample {
        int32_t core_pct_busy;
        u64 aperf;
        u64 mperf;
+       unsigned long long tsc;
        int freq;
 };
 
@@ -96,6 +97,7 @@ struct cpudata {
 
        u64     prev_aperf;
        u64     prev_mperf;
+       unsigned long long prev_tsc;
        int     sample_ptr;
        struct sample samples[SAMPLE_COUNT];
 };
@@ -548,30 +550,41 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
                                        struct sample *sample)
 {
        u64 core_pct;
-       core_pct = div64_u64(int_tofp(sample->aperf * 100),
-                            sample->mperf);
-       sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
+       u64 c0_pct;
 
-       sample->core_pct_busy = core_pct;
+       core_pct = div64_u64(sample->aperf * 100, sample->mperf);
+
+       c0_pct = div64_u64(sample->mperf * 100, sample->tsc);
+       sample->freq = fp_toint(
+               mul_fp(int_tofp(cpu->pstate.max_pstate),
+                       int_tofp(core_pct * 1000)));
+
+       sample->core_pct_busy = mul_fp(int_tofp(core_pct),
+                               div_fp(int_tofp(c0_pct + 1), int_tofp(100)));
 }
 
 static inline void intel_pstate_sample(struct cpudata *cpu)
 {
        u64 aperf, mperf;
+       unsigned long long tsc;
 
        rdmsrl(MSR_IA32_APERF, aperf);
        rdmsrl(MSR_IA32_MPERF, mperf);
+       tsc = native_read_tsc();
 
        cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
        cpu->samples[cpu->sample_ptr].aperf = aperf;
        cpu->samples[cpu->sample_ptr].mperf = mperf;
+       cpu->samples[cpu->sample_ptr].tsc = tsc;
        cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
        cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+       cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
 
        intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
 
        cpu->prev_aperf = aperf;
        cpu->prev_mperf = mperf;
+       cpu->prev_tsc = tsc;
 }
 
 static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
index 3f65dd6..a28640f 100644 (file)
@@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
         * then the BO is being moved and we should
         * store up the damage until later.
         */
-       if (!drm_can_sleep())
+       if (drm_can_sleep())
                ret = ast_bo_reserve(bo, true);
        if (ret) {
                if (ret != -EBUSY)
index 2fd4a92..32bbba0 100644 (file)
@@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
         * then the BO is being moved and we should
         * store up the damage until later.
         */
-       if (!drm_can_sleep())
+       if (drm_can_sleep())
                ret = cirrus_bo_reserve(bo, true);
        if (ret) {
                if (ret != -EBUSY)
index f9adc27..13b7dd8 100644 (file)
@@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
         * then the BO is being moved and we should
         * store up the damage until later.
         */
-       if (!drm_can_sleep())
+       if (drm_can_sleep())
                ret = mgag200_bo_reserve(bo, true);
        if (ret) {
                if (ret != -EBUSY)
index b8583f2..9683747 100644 (file)
@@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
                (mga_vga_calculate_mode_bandwidth(mode, bpp)
                        > (32700 * 1024))) {
                return MODE_BANDWIDTH;
-       } else if (mode->type == G200_EH &&
+       } else if (mdev->type == G200_EH &&
                (mga_vga_calculate_mode_bandwidth(mode, bpp)
                        > (37500 * 1024))) {
                return MODE_BANDWIDTH;
-       } else if (mode->type == G200_ER &&
+       } else if (mdev->type == G200_ER &&
                (mga_vga_calculate_mode_bandwidth(mode,
                        bpp) > (55000 * 1024))) {
                return MODE_BANDWIDTH;
index 7b399dc..2812c7d 100644 (file)
@@ -1007,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case R_008C64_SQ_VSTMP_RING_SIZE:
        case R_0288C8_SQ_GS_VERT_ITEMSIZE:
                /* get value to populate the IB don't remove */
-               tmp =radeon_get_ib_value(p, idx);
-               ib[idx] = 0;
+               /*tmp =radeon_get_ib_value(p, idx);
+                 ib[idx] = 0;*/
+               break;
+       case SQ_ESGS_RING_BASE:
+       case SQ_GSVS_RING_BASE:
+       case SQ_ESTMP_RING_BASE:
+       case SQ_GSTMP_RING_BASE:
+       case SQ_PSTMP_RING_BASE:
+       case SQ_VSTMP_RING_BASE:
+               r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+               if (r) {
+                       dev_warn(p->dev, "bad SET_CONTEXT_REG "
+                                       "0x%04X\n", reg);
+                       return -EINVAL;
+               }
+               ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
                break;
        case SQ_CONFIG:
                track->sq_config = radeon_get_ib_value(p, idx);
index ec8c388..84a1bbb 100644 (file)
  *   2.34.0 - Add CIK tiling mode array query
  *   2.35.0 - Add CIK macrotile mode array query
  *   2.36.0 - Fix CIK DCE tiling setup
+ *   2.37.0 - allow GS ring setup on r6xx/r7xx
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       36
+#define KMS_DRIVER_MINOR       37
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index 20bfbda..ec0c682 100644 (file)
@@ -18,6 +18,7 @@ r600 0x9400
 0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
 0x00028A40 VGT_GS_MODE
 0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028B38 VGT_GS_MAX_VERT_OUT
 0x000088C8 VGT_GS_PER_ES
 0x000088E8 VGT_GS_PER_VS
 0x000088D4 VGT_GS_VERTEX_REUSE
index 3707985..53b51c4 100644 (file)
@@ -292,7 +292,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
 
                if (ret == 0) {
                        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
-                       if (!kref_get_unless_zero(&ref->kref)) {
+                       if (kref_get_unless_zero(&ref->kref)) {
                                rcu_read_unlock();
                                break;
                        }
index 9af9908..75f3190 100644 (file)
@@ -380,6 +380,9 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
        pgoff_t i;
        struct page **page = ttm->pages;
 
+       if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+               return;
+
        for (i = 0; i < ttm->num_pages; ++i) {
                (*page)->mapping = NULL;
                (*page++)->index = 0;
index d95335c..b645647 100644 (file)
@@ -2583,4 +2583,28 @@ typedef union {
    float  f;
 } SVGA3dDevCapResult;
 
+typedef enum {
+   SVGA3DCAPS_RECORD_UNKNOWN        = 0,
+   SVGA3DCAPS_RECORD_DEVCAPS_MIN    = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS        = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS_MAX    = 0x1ff,
+} SVGA3dCapsRecordType;
+
+typedef
+struct SVGA3dCapsRecordHeader {
+   uint32 length;
+   SVGA3dCapsRecordType type;
+}
+SVGA3dCapsRecordHeader;
+
+typedef
+struct SVGA3dCapsRecord {
+   SVGA3dCapsRecordHeader header;
+   uint32 data[1];
+}
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
 #endif /* _SVGA3D_REG_H_ */
index 82c41da..9426c53 100644 (file)
@@ -37,7 +37,7 @@ struct vmw_user_context {
 
 
 
-typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
 
 static void vmw_user_context_free(struct vmw_resource *res);
 static struct vmw_resource *
@@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
                                 bool readback,
                                 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_context_destroy(struct vmw_resource *res);
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+                                          bool rebind);
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
 static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
 static uint64_t vmw_user_context_size;
 
@@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
 
        if (res->func->destroy == vmw_gb_context_destroy) {
                mutex_lock(&dev_priv->cmdbuf_mutex);
+               mutex_lock(&dev_priv->binding_mutex);
+               (void) vmw_context_binding_state_kill
+                       (&container_of(res, struct vmw_user_context, res)->cbs);
                (void) vmw_gb_context_destroy(res);
                if (dev_priv->pinned_bo != NULL &&
                    !dev_priv->query_cid_valid)
                        __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+               mutex_unlock(&dev_priv->binding_mutex);
                mutex_unlock(&dev_priv->cmdbuf_mutex);
                return;
        }
@@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
        BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
        mutex_lock(&dev_priv->binding_mutex);
-       vmw_context_binding_state_kill(&uctx->cbs);
+       vmw_context_binding_state_scrub(&uctx->cbs);
 
        submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 
@@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
                SVGA3dCmdHeader header;
                SVGA3dCmdDestroyGBContext body;
        } *cmd;
-       struct vmw_user_context *uctx =
-               container_of(res, struct vmw_user_context, res);
-
-       BUG_ON(!list_empty(&uctx->cbs.list));
 
        if (likely(res->id == -1))
                return 0;
@@ -528,8 +530,9 @@ out_unlock:
  * vmw_context_scrub_shader - scrub a shader binding from a context.
  *
  * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
  */
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
 {
        struct vmw_private *dev_priv = bi->ctx->dev_priv;
        struct {
@@ -548,7 +551,8 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
        cmd->header.size = sizeof(cmd->body);
        cmd->body.cid = bi->ctx->id;
        cmd->body.type = bi->i1.shader_type;
-       cmd->body.shid = SVGA3D_INVALID_ID;
+       cmd->body.shid =
+               cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
        return 0;
@@ -559,8 +563,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
  * from a context.
  *
  * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
  */
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+                                          bool rebind)
 {
        struct vmw_private *dev_priv = bi->ctx->dev_priv;
        struct {
@@ -579,7 +585,8 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
        cmd->header.size = sizeof(cmd->body);
        cmd->body.cid = bi->ctx->id;
        cmd->body.type = bi->i1.rt_type;
-       cmd->body.target.sid = SVGA3D_INVALID_ID;
+       cmd->body.target.sid =
+               cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
        cmd->body.target.face = 0;
        cmd->body.target.mipmap = 0;
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -591,11 +598,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
  * vmw_context_scrub_texture - scrub a texture binding from a context.
  *
  * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
  *
  * TODO: Possibly complement this function with a function that takes
  * a list of texture bindings and combines them to a single command.
  */
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
+                                    bool rebind)
 {
        struct vmw_private *dev_priv = bi->ctx->dev_priv;
        struct {
@@ -619,7 +628,8 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
        cmd->body.c.cid = bi->ctx->id;
        cmd->body.s1.stage = bi->i1.texture_stage;
        cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
-       cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
+       cmd->body.s1.value =
+               cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
        return 0;
@@ -692,6 +702,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
                vmw_context_binding_drop(loc);
 
        loc->bi = *bi;
+       loc->bi.scrubbed = false;
        list_add_tail(&loc->ctx_list, &cbs->list);
        INIT_LIST_HEAD(&loc->res_list);
 
@@ -727,12 +738,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
        if (loc->bi.ctx != NULL)
                vmw_context_binding_drop(loc);
 
-       loc->bi = *bi;
-       list_add_tail(&loc->ctx_list, &cbs->list);
-       if (bi->res != NULL)
+       if (bi->res != NULL) {
+               loc->bi = *bi;
+               list_add_tail(&loc->ctx_list, &cbs->list);
                list_add_tail(&loc->res_list, &bi->res->binding_head);
-       else
-               INIT_LIST_HEAD(&loc->res_list);
+       }
 }
 
 /**
@@ -746,7 +756,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
  */
 static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
 {
-       (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
+       if (!cb->bi.scrubbed) {
+               (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
+               cb->bi.scrubbed = true;
+       }
        vmw_context_binding_drop(cb);
 }
 
@@ -767,6 +780,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
                vmw_context_binding_kill(entry);
 }
 
+/**
+ * vmw_context_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+       struct vmw_ctx_binding *entry;
+
+       list_for_each_entry(entry, &cbs->list, ctx_list) {
+               if (!entry->bi.scrubbed) {
+                       (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+                       entry->bi.scrubbed = true;
+               }
+       }
+}
+
 /**
  * vmw_context_binding_res_list_kill - Kill all bindings on a
  * resource binding list
@@ -784,6 +818,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head)
                vmw_context_binding_kill(entry);
 }
 
+/**
+ * vmw_context_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_context_binding_res_list_scrub(struct list_head *head)
+{
+       struct vmw_ctx_binding *entry;
+
+       list_for_each_entry(entry, head, res_list) {
+               if (!entry->bi.scrubbed) {
+                       (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+                       entry->bi.scrubbed = true;
+               }
+       }
+}
+
 /**
  * vmw_context_binding_state_transfer - Commit staged binding info
  *
@@ -803,3 +858,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
        list_for_each_entry_safe(entry, next, &from->list, ctx_list)
                vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
 }
+
+/**
+ * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_context_rebind_all(struct vmw_resource *ctx)
+{
+       struct vmw_ctx_binding *entry;
+       struct vmw_user_context *uctx =
+               container_of(ctx, struct vmw_user_context, res);
+       struct vmw_ctx_binding_state *cbs = &uctx->cbs;
+       int ret;
+
+       list_for_each_entry(entry, &cbs->list, ctx_list) {
+               if (likely(!entry->bi.scrubbed))
+                       continue;
+
+               if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
+                           SVGA3D_INVALID_ID))
+                       continue;
+
+               ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
+               if (unlikely(ret != 0))
+                       return ret;
+
+               entry->bi.scrubbed = false;
+       }
+
+       return 0;
+}
+
+/**
+ * vmw_context_binding_list - Return a list of context bindings
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+{
+       return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
+}
index 9893328..3bdc0ad 100644 (file)
@@ -941,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev,
                drm_master_put(&vmw_fp->locked_master);
        }
 
+       vmw_compat_shader_man_destroy(vmw_fp->shman);
        ttm_object_file_release(&vmw_fp->tfile);
        kfree(vmw_fp);
 }
@@ -960,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
        if (unlikely(vmw_fp->tfile == NULL))
                goto out_no_tfile;
 
+       vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
+       if (IS_ERR(vmw_fp->shman))
+               goto out_no_shman;
+
        file_priv->driver_priv = vmw_fp;
        dev_priv->bdev.dev_mapping = dev->dev_mapping;
 
        return 0;
 
+out_no_shman:
+       ttm_object_file_release(&vmw_fp->tfile);
 out_no_tfile:
        kfree(vmw_fp);
        return ret;
index 554e7fa..ecaa302 100644 (file)
 #define VMW_RES_FENCE ttm_driver_type3
 #define VMW_RES_SHADER ttm_driver_type4
 
+struct vmw_compat_shader_manager;
+
 struct vmw_fpriv {
        struct drm_master *locked_master;
        struct ttm_object_file *tfile;
        struct list_head fence_events;
+       bool gb_aware;
+       struct vmw_compat_shader_manager *shman;
 };
 
 struct vmw_dma_buffer {
@@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo {
        struct vmw_resource *ctx;
        struct vmw_resource *res;
        enum vmw_ctx_binding_type bt;
+       bool scrubbed;
        union {
                SVGA3dShaderType shader_type;
                SVGA3dRenderTargetType rt_type;
@@ -318,7 +323,7 @@ struct vmw_sw_context{
        struct drm_open_hash res_ht;
        bool res_ht_initialized;
        bool kernel; /**< is the called made from the kernel */
-       struct ttm_object_file *tfile;
+       struct vmw_fpriv *fp;
        struct list_head validate_nodes;
        struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
        uint32_t cur_reloc;
@@ -336,6 +341,7 @@ struct vmw_sw_context{
        bool needs_post_query_barrier;
        struct vmw_resource *error_resource;
        struct vmw_ctx_binding_state staged_bindings;
+       struct list_head staged_shaders;
 };
 
 struct vmw_legacy_display;
@@ -569,6 +575,8 @@ struct vmw_user_resource_conv;
 
 extern void vmw_resource_unreference(struct vmw_resource **p_res);
 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res);
 extern int vmw_resource_validate(struct vmw_resource *res);
 extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -957,6 +965,9 @@ extern void
 vmw_context_binding_state_transfer(struct vmw_resource *res,
                                   struct vmw_ctx_binding_state *cbs);
 extern void vmw_context_binding_res_list_kill(struct list_head *head);
+extern void vmw_context_binding_res_list_scrub(struct list_head *head);
+extern int vmw_context_rebind_all(struct vmw_resource *ctx);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
 
 /*
  * Surface management - vmwgfx_surface.c
@@ -991,6 +1002,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
                                   struct drm_file *file_priv);
 extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_priv);
+extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+                                   SVGA3dShaderType shader_type,
+                                   u32 *user_key);
+extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+                                     struct list_head *list);
+extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+                                     struct list_head *list);
+extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+                                   u32 user_key,
+                                   SVGA3dShaderType shader_type,
+                                   struct list_head *list);
+extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+                                u32 user_key, const void *bytecode,
+                                SVGA3dShaderType shader_type,
+                                size_t size,
+                                struct ttm_object_file *tfile,
+                                struct list_head *list);
+extern struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv);
+extern void
+vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+
 
 /**
  * Inline helper functions
index 7a5f1eb..269b85c 100644 (file)
@@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list,
                 * persistent context binding tracker.
                 */
                if (unlikely(val->staged_bindings)) {
-                       vmw_context_binding_state_transfer
-                               (val->res, val->staged_bindings);
+                       if (!backoff) {
+                               vmw_context_binding_state_transfer
+                                       (val->res, val->staged_bindings);
+                       }
                        kfree(val->staged_bindings);
                        val->staged_bindings = NULL;
                }
@@ -177,6 +179,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
        return 0;
 }
 
+/**
+ * vmw_resource_context_res_add - Put resources previously bound to a context on
+ * the validation list
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @sw_context: Pointer to a software context used for this command submission
+ * @ctx: Pointer to the context resource
+ *
+ * This function puts all resources that were previously bound to @ctx on
+ * the resource validation list. This is part of the context state reemission
+ */
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+                                       struct vmw_sw_context *sw_context,
+                                       struct vmw_resource *ctx)
+{
+       struct list_head *binding_list;
+       struct vmw_ctx_binding *entry;
+       int ret = 0;
+       struct vmw_resource *res;
+
+       mutex_lock(&dev_priv->binding_mutex);
+       binding_list = vmw_context_binding_list(ctx);
+
+       list_for_each_entry(entry, binding_list, ctx_list) {
+               res = vmw_resource_reference_unless_doomed(entry->bi.res);
+               if (unlikely(res == NULL))
+                       continue;
+
+               ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+               vmw_resource_unreference(&res);
+               if (unlikely(ret != 0))
+                       break;
+       }
+
+       mutex_unlock(&dev_priv->binding_mutex);
+       return ret;
+}
+
 /**
  * vmw_resource_relocation_add - Add a relocation to the relocation list
  *
@@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
 {
        struct vmw_resource_relocation *rel;
 
-       list_for_each_entry(rel, list, head)
-               cb[rel->offset] = rel->res->id;
+       list_for_each_entry(rel, list, head) {
+               if (likely(rel->res != NULL))
+                       cb[rel->offset] = rel->res->id;
+               else
+                       cb[rel->offset] = SVGA_3D_CMD_NOP;
+       }
 }
 
 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 }
 
 /**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
  * on the resource validate list unless it's already there.
  *
  * @dev_priv: Pointer to a device private structure.
  * @sw_context: Pointer to the software context.
  * @res_type: Resource type.
  * @converter: User-space visisble type specific information.
- * @id: Pointer to the location in the command buffer currently being
+ * @id: user-space resource id handle.
+ * @id_loc: Pointer to the location in the command buffer currently being
  * parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
  */
-static int vmw_cmd_res_check(struct vmw_private *dev_priv,
-                            struct vmw_sw_context *sw_context,
-                            enum vmw_res_type res_type,
-                            const struct vmw_user_resource_conv *converter,
-                            uint32_t *id,
-                            struct vmw_resource_val_node **p_val)
+static int
+vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
+                        struct vmw_sw_context *sw_context,
+                        enum vmw_res_type res_type,
+                        const struct vmw_user_resource_conv *converter,
+                        uint32_t id,
+                        uint32_t *id_loc,
+                        struct vmw_resource_val_node **p_val)
 {
        struct vmw_res_cache_entry *rcache =
                &sw_context->res_cache[res_type];
@@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
        struct vmw_resource_val_node *node;
        int ret;
 
-       if (*id == SVGA3D_INVALID_ID) {
+       if (id == SVGA3D_INVALID_ID) {
                if (p_val)
                        *p_val = NULL;
                if (res_type == vmw_res_context) {
@@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
         * resource
         */
 
-       if (likely(rcache->valid && *id == rcache->handle)) {
+       if (likely(rcache->valid && id == rcache->handle)) {
                const struct vmw_resource *res = rcache->res;
 
                rcache->node->first_usage = false;
@@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
 
                return vmw_resource_relocation_add
                        (&sw_context->res_relocations, res,
-                        id - sw_context->buf_start);
+                        id_loc - sw_context->buf_start);
        }
 
        ret = vmw_user_resource_lookup_handle(dev_priv,
-                                             sw_context->tfile,
-                                             *id,
+                                             sw_context->fp->tfile,
+                                             id,
                                              converter,
                                              &res);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Could not find or use resource 0x%08x.\n",
-                         (unsigned) *id);
+                         (unsigned) id);
                dump_stack();
                return ret;
        }
 
        rcache->valid = true;
        rcache->res = res;
-       rcache->handle = *id;
+       rcache->handle = id;
 
        ret = vmw_resource_relocation_add(&sw_context->res_relocations,
                                          res,
-                                         id - sw_context->buf_start);
+                                         id_loc - sw_context->buf_start);
        if (unlikely(ret != 0))
                goto out_no_reloc;
 
@@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
        if (p_val)
                *p_val = node;
 
-       if (node->first_usage && res_type == vmw_res_context) {
+       if (dev_priv->has_mob && node->first_usage &&
+           res_type == vmw_res_context) {
+               ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
+               if (unlikely(ret != 0))
+                       goto out_no_reloc;
                node->staged_bindings =
                        kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
                if (node->staged_bindings == NULL) {
@@ -480,6 +533,59 @@ out_no_reloc:
        return ret;
 }
 
+/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id_loc: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
+ */
+static int
+vmw_cmd_res_check(struct vmw_private *dev_priv,
+                 struct vmw_sw_context *sw_context,
+                 enum vmw_res_type res_type,
+                 const struct vmw_user_resource_conv *converter,
+                 uint32_t *id_loc,
+                 struct vmw_resource_val_node **p_val)
+{
+       return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
+                                       converter, *id_loc, id_loc, p_val);
+}
+
+/**
+ * vmw_rebind_contexts - Rebind all resources previously bound to
+ * referenced contexts.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Rebind context binding points that have been scrubbed because of eviction.
+ */
+static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
+{
+       struct vmw_resource_val_node *val;
+       int ret;
+
+       list_for_each_entry(val, &sw_context->resource_list, head) {
+               if (likely(!val->staged_bindings))
+                       continue;
+
+               ret = vmw_context_rebind_all(val->res);
+               if (unlikely(ret != 0)) {
+                       if (ret != -ERESTARTSYS)
+                               DRM_ERROR("Failed to rebind context.\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
 /**
  * vmw_cmd_cid_check - Check a command header for valid context information.
  *
@@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
        struct vmw_relocation *reloc;
        int ret;
 
-       ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+       ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Could not find or use MOB buffer.\n");
                return -EINVAL;
@@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
        struct vmw_relocation *reloc;
        int ret;
 
-       ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+       ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Could not find or use GMR region.\n");
                return -EINVAL;
@@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
 
        srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
 
-       vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
+       vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
+                            header);
 
 out_no_surface:
        vmw_dmabuf_unreference(&vmw_bo);
@@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
                                 &cmd->body.sid, NULL);
 }
 
+
+/**
+ * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
+                                struct vmw_sw_context *sw_context,
+                                SVGA3dCmdHeader *header)
+{
+       struct vmw_shader_define_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDefineShader body;
+       } *cmd;
+       int ret;
+       size_t size;
+
+       cmd = container_of(header, struct vmw_shader_define_cmd,
+                          header);
+
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+                               user_context_converter, &cmd->body.cid,
+                               NULL);
+       if (unlikely(ret != 0))
+               return ret;
+
+       if (unlikely(!dev_priv->has_mob))
+               return 0;
+
+       size = cmd->header.size - sizeof(cmd->body);
+       ret = vmw_compat_shader_add(sw_context->fp->shman,
+                                   cmd->body.shid, cmd + 1,
+                                   cmd->body.type, size,
+                                   sw_context->fp->tfile,
+                                   &sw_context->staged_shaders);
+       if (unlikely(ret != 0))
+               return ret;
+
+       return vmw_resource_relocation_add(&sw_context->res_relocations,
+                                          NULL, &cmd->header.id -
+                                          sw_context->buf_start);
+
+       return 0;
+}
+
+/**
+ * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
+                                 struct vmw_sw_context *sw_context,
+                                 SVGA3dCmdHeader *header)
+{
+       struct vmw_shader_destroy_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDestroyShader body;
+       } *cmd;
+       int ret;
+
+       cmd = container_of(header, struct vmw_shader_destroy_cmd,
+                          header);
+
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+                               user_context_converter, &cmd->body.cid,
+                               NULL);
+       if (unlikely(ret != 0))
+               return ret;
+
+       if (unlikely(!dev_priv->has_mob))
+               return 0;
+
+       ret = vmw_compat_shader_remove(sw_context->fp->shman,
+                                      cmd->body.shid,
+                                      cmd->body.type,
+                                      &sw_context->staged_shaders);
+       if (unlikely(ret != 0))
+               return ret;
+
+       return vmw_resource_relocation_add(&sw_context->res_relocations,
+                                          NULL, &cmd->header.id -
+                                          sw_context->buf_start);
+
+       return 0;
+}
+
 /**
  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
  * command
@@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
        if (dev_priv->has_mob) {
                struct vmw_ctx_bindinfo bi;
                struct vmw_resource_val_node *res_node;
-
-               ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
-                                       user_shader_converter,
-                                       &cmd->body.shid, &res_node);
+               u32 shid = cmd->body.shid;
+
+               if (shid != SVGA3D_INVALID_ID)
+                       (void) vmw_compat_shader_lookup(sw_context->fp->shman,
+                                                       cmd->body.type,
+                                                       &shid);
+
+               ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
+                                              vmw_res_shader,
+                                              user_shader_converter,
+                                              shid,
+                                              &cmd->body.shid, &res_node);
                if (unlikely(ret != 0))
                        return ret;
 
@@ -1526,6 +1733,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
        return 0;
 }
 
+/**
+ * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
+                                   struct vmw_sw_context *sw_context,
+                                   SVGA3dCmdHeader *header)
+{
+       struct vmw_set_shader_const_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdSetShaderConst body;
+       } *cmd;
+       int ret;
+
+       cmd = container_of(header, struct vmw_set_shader_const_cmd,
+                          header);
+
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+                               user_context_converter, &cmd->body.cid,
+                               NULL);
+       if (unlikely(ret != 0))
+               return ret;
+
+       if (dev_priv->has_mob)
+               header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
+
+       return 0;
+}
+
 /**
  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
  * command
@@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
                    true, false, false),
        VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
                    false, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
-                   true, true, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
-                   true, true, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
+                   true, false, false),
        VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
                    true, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
-                   true, true, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
+                   true, false, false),
        VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
                    true, false, false),
        VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
@@ -2171,7 +2411,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        } else
                sw_context->kernel = true;
 
-       sw_context->tfile = vmw_fpriv(file_priv)->tfile;
+       sw_context->fp = vmw_fpriv(file_priv);
        sw_context->cur_reloc = 0;
        sw_context->cur_val_buf = 0;
        sw_context->fence_flags = 0;
@@ -2188,16 +2428,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                        goto out_unlock;
                sw_context->res_ht_initialized = true;
        }
+       INIT_LIST_HEAD(&sw_context->staged_shaders);
 
        INIT_LIST_HEAD(&resource_list);
        ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
                                command_size);
        if (unlikely(ret != 0))
-               goto out_err;
+               goto out_err_nores;
 
        ret = vmw_resources_reserve(sw_context);
        if (unlikely(ret != 0))
-               goto out_err;
+               goto out_err_nores;
 
        ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
        if (unlikely(ret != 0))
@@ -2225,6 +2466,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                goto out_err;
        }
 
+       if (dev_priv->has_mob) {
+               ret = vmw_rebind_contexts(sw_context);
+               if (unlikely(ret != 0))
+                       goto out_err;
+       }
+
        cmd = vmw_fifo_reserve(dev_priv, command_size);
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Failed reserving fifo space for commands.\n");
@@ -2276,6 +2523,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        }
 
        list_splice_init(&sw_context->resource_list, &resource_list);
+       vmw_compat_shaders_commit(sw_context->fp->shman,
+                                 &sw_context->staged_shaders);
        mutex_unlock(&dev_priv->cmdbuf_mutex);
 
        /*
@@ -2289,10 +2538,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 out_unlock_binding:
        mutex_unlock(&dev_priv->binding_mutex);
 out_err:
-       vmw_resource_relocations_free(&sw_context->res_relocations);
-       vmw_free_relocations(sw_context);
        ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
+out_err_nores:
        vmw_resource_list_unreserve(&sw_context->resource_list, true);
+       vmw_resource_relocations_free(&sw_context->res_relocations);
+       vmw_free_relocations(sw_context);
        vmw_clear_validations(sw_context);
        if (unlikely(dev_priv->pinned_bo != NULL &&
                     !dev_priv->query_cid_valid))
@@ -2301,6 +2551,8 @@ out_unlock:
        list_splice_init(&sw_context->resource_list, &resource_list);
        error_resource = sw_context->error_resource;
        sw_context->error_resource = NULL;
+       vmw_compat_shaders_revert(sw_context->fp->shman,
+                                 &sw_context->staged_shaders);
        mutex_unlock(&dev_priv->cmdbuf_mutex);
 
        /*
index 116c497..f9881f9 100644 (file)
 #include <drm/vmwgfx_drm.h>
 #include "vmwgfx_kms.h"
 
+struct svga_3d_compat_cap {
+       SVGA3dCapsRecordHeader header;
+       SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
+};
+
 int vmw_getparam_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
        struct drm_vmw_getparam_arg *param =
            (struct drm_vmw_getparam_arg *)data;
+       struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 
        switch (param->param) {
        case DRM_VMW_PARAM_NUM_STREAMS:
@@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
                __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
                const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
+               if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
+                       param->value = SVGA3D_HWVERSION_WS8_B1;
+                       break;
+               }
+
                param->value =
                        ioread32(fifo_mem +
                                 ((fifo->capabilities &
@@ -69,17 +80,26 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
                break;
        }
        case DRM_VMW_PARAM_MAX_SURF_MEMORY:
-               param->value = dev_priv->memory_size;
+               if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+                   !vmw_fp->gb_aware)
+                       param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
+               else
+                       param->value = dev_priv->memory_size;
                break;
        case DRM_VMW_PARAM_3D_CAPS_SIZE:
-               if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
-                       param->value = SVGA3D_DEVCAP_MAX;
+               if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+                   vmw_fp->gb_aware)
+                       param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+               else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+                       param->value = sizeof(struct svga_3d_compat_cap) +
+                               sizeof(uint32_t);
                else
                        param->value = (SVGA_FIFO_3D_CAPS_LAST -
-                                       SVGA_FIFO_3D_CAPS + 1);
-               param->value *= sizeof(uint32_t);
+                                       SVGA_FIFO_3D_CAPS + 1) *
+                               sizeof(uint32_t);
                break;
        case DRM_VMW_PARAM_MAX_MOB_MEMORY:
+               vmw_fp->gb_aware = true;
                param->value = dev_priv->max_mob_pages * PAGE_SIZE;
                break;
        default:
@@ -91,6 +111,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
+                              size_t size)
+{
+       struct svga_3d_compat_cap *compat_cap =
+               (struct svga_3d_compat_cap *) bounce;
+       unsigned int i;
+       size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
+       unsigned int max_size;
+
+       if (size < pair_offset)
+               return -EINVAL;
+
+       max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
+
+       if (max_size > SVGA3D_DEVCAP_MAX)
+               max_size = SVGA3D_DEVCAP_MAX;
+
+       compat_cap->header.length =
+               (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
+       compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
+
+       mutex_lock(&dev_priv->hw_mutex);
+       for (i = 0; i < max_size; ++i) {
+               vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+               compat_cap->pairs[i][0] = i;
+               compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+       }
+       mutex_unlock(&dev_priv->hw_mutex);
+
+       return 0;
+}
+
 
 int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
@@ -104,41 +156,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
        void *bounce;
        int ret;
        bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
+       struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 
        if (unlikely(arg->pad64 != 0)) {
                DRM_ERROR("Illegal GET_3D_CAP argument.\n");
                return -EINVAL;
        }
 
-       if (gb_objects)
-               size = SVGA3D_DEVCAP_MAX;
+       if (gb_objects && vmw_fp->gb_aware)
+               size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+       else if (gb_objects)
+               size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
        else
-               size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1);
-
-       size *= sizeof(uint32_t);
+               size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
+                       sizeof(uint32_t);
 
        if (arg->max_size < size)
                size = arg->max_size;
 
-       bounce = vmalloc(size);
+       bounce = vzalloc(size);
        if (unlikely(bounce == NULL)) {
                DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
                return -ENOMEM;
        }
 
-       if (gb_objects) {
-               int i;
+       if (gb_objects && vmw_fp->gb_aware) {
+               int i, num;
                uint32_t *bounce32 = (uint32_t *) bounce;
 
+               num = size / sizeof(uint32_t);
+               if (num > SVGA3D_DEVCAP_MAX)
+                       num = SVGA3D_DEVCAP_MAX;
+
                mutex_lock(&dev_priv->hw_mutex);
-               for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) {
+               for (i = 0; i < num; ++i) {
                        vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
                        *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
                }
                mutex_unlock(&dev_priv->hw_mutex);
-
+       } else if (gb_objects) {
+               ret = vmw_fill_compat_cap(dev_priv, bounce, size);
+               if (unlikely(ret != 0))
+                       goto out_err;
        } else {
-
                fifo_mem = dev_priv->mmio_virt;
                memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
        }
@@ -146,6 +206,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
        ret = copy_to_user(buffer, bounce, size);
        if (ret)
                ret = -EFAULT;
+out_err:
        vfree(bounce);
 
        if (unlikely(ret != 0))
index 4910e7b..d4a5a19 100644 (file)
@@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+               ret = -ENOMEM;
                goto out_no_fifo;
        }
 
index 6fdd82d..2aa4bc6 100644 (file)
@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
        return res;
 }
 
+struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res)
+{
+       return kref_get_unless_zero(&res->kref) ? res : NULL;
+}
 
 /**
  * vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
                vmw_dmabuf_unreference(&res->backup);
        }
 
-       if (likely(res->hw_destroy != NULL))
+       if (likely(res->hw_destroy != NULL)) {
                res->hw_destroy(res);
+               mutex_lock(&dev_priv->binding_mutex);
+               vmw_context_binding_res_list_kill(&res->binding_head);
+               mutex_unlock(&dev_priv->binding_mutex);
+       }
 
        id = res->id;
        if (res->res_free != NULL)
index 1457ec4..217d941 100644 (file)
@@ -29,6 +29,8 @@
 #include "vmwgfx_resource_priv.h"
 #include "ttm/ttm_placement.h"
 
+#define VMW_COMPAT_SHADER_HT_ORDER 12
+
 struct vmw_shader {
        struct vmw_resource res;
        SVGA3dShaderType type;
@@ -40,6 +42,50 @@ struct vmw_user_shader {
        struct vmw_shader shader;
 };
 
+/**
+ * enum vmw_compat_shader_state - Staging state for compat shaders
+ */
+enum vmw_compat_shader_state {
+       VMW_COMPAT_COMMITED,
+       VMW_COMPAT_ADD,
+       VMW_COMPAT_DEL
+};
+
+/**
+ * struct vmw_compat_shader - Metadata for compat shaders.
+ *
+ * @handle: The TTM handle of the guest backed shader.
+ * @tfile: The struct ttm_object_file the guest backed shader is registered
+ * with.
+ * @hash: Hash item for lookup.
+ * @head: List head for staging lists or the compat shader manager list.
+ * @state: Staging state.
+ *
+ * The structure is protected by the cmdbuf lock.
+ */
+struct vmw_compat_shader {
+       u32 handle;
+       struct ttm_object_file *tfile;
+       struct drm_hash_item hash;
+       struct list_head head;
+       enum vmw_compat_shader_state state;
+};
+
+/**
+ * struct vmw_compat_shader_manager - Compat shader manager.
+ *
+ * @shaders: Hash table containing staged and commited compat shaders
+ * @list: List of commited shaders.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @shaders and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_compat_shader_manager {
+       struct drm_open_hash shaders;
+       struct list_head list;
+       struct vmw_private *dev_priv;
+};
+
 static void vmw_user_shader_free(struct vmw_resource *res);
 static struct vmw_resource *
 vmw_user_shader_base_to_res(struct ttm_base_object *base);
@@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
                return 0;
 
        mutex_lock(&dev_priv->binding_mutex);
-       vmw_context_binding_res_list_kill(&res->binding_head);
+       vmw_context_binding_res_list_scrub(&res->binding_head);
 
        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
        if (unlikely(cmd == NULL)) {
@@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
                                         TTM_REF_USAGE);
 }
 
+int vmw_shader_alloc(struct vmw_private *dev_priv,
+                    struct vmw_dma_buffer *buffer,
+                    size_t shader_size,
+                    size_t offset,
+                    SVGA3dShaderType shader_type,
+                    struct ttm_object_file *tfile,
+                    u32 *handle)
+{
+       struct vmw_user_shader *ushader;
+       struct vmw_resource *res, *tmp;
+       int ret;
+
+       /*
+        * Approximate idr memory usage with 128 bytes. It will be limited
+        * by maximum number_of shaders anyway.
+        */
+       if (unlikely(vmw_user_shader_size == 0))
+               vmw_user_shader_size =
+                       ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
+
+       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+                                  vmw_user_shader_size,
+                                  false, true);
+       if (unlikely(ret != 0)) {
+               if (ret != -ERESTARTSYS)
+                       DRM_ERROR("Out of graphics memory for shader "
+                                 "creation.\n");
+               goto out;
+       }
+
+       ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
+       if (unlikely(ushader == NULL)) {
+               ttm_mem_global_free(vmw_mem_glob(dev_priv),
+                                   vmw_user_shader_size);
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       res = &ushader->shader.res;
+       ushader->base.shareable = false;
+       ushader->base.tfile = NULL;
+
+       /*
+        * From here on, the destructor takes over resource freeing.
+        */
+
+       ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+                                offset, shader_type, buffer,
+                                vmw_user_shader_free);
+       if (unlikely(ret != 0))
+               goto out;
+
+       tmp = vmw_resource_reference(res);
+       ret = ttm_base_object_init(tfile, &ushader->base, false,
+                                  VMW_RES_SHADER,
+                                  &vmw_user_shader_base_release, NULL);
+
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&tmp);
+               goto out_err;
+       }
+
+       if (handle)
+               *handle = ushader->base.hash.key;
+out_err:
+       vmw_resource_unreference(&res);
+out:
+       return ret;
+}
+
+
 int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
-       struct vmw_user_shader *ushader;
-       struct vmw_resource *res;
-       struct vmw_resource *tmp;
        struct drm_vmw_shader_create_arg *arg =
                (struct drm_vmw_shader_create_arg *)data;
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -373,69 +487,324 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
                goto out_bad_arg;
        }
 
-       /*
-        * Approximate idr memory usage with 128 bytes. It will be limited
-        * by maximum number_of shaders anyway.
-        */
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (unlikely(ret != 0))
+               goto out_bad_arg;
 
-       if (unlikely(vmw_user_shader_size == 0))
-               vmw_user_shader_size = ttm_round_pot(sizeof(*ushader))
-                       + 128;
+       ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
+                              shader_type, tfile, &arg->shader_handle);
 
-       ret = ttm_read_lock(&vmaster->lock, true);
+       ttm_read_unlock(&vmaster->lock);
+out_bad_arg:
+       vmw_dmabuf_unreference(&buffer);
+       return ret;
+}
+
+/**
+ * vmw_compat_shader_lookup - Look up a compat shader
+ *
+ * @man: Pointer to the compat shader manager.
+ * @shader_type: The shader type, that combined with the user_key identifies
+ * the shader.
+ * @user_key: On entry, this should be a pointer to the user_key.
+ * On successful exit, it will contain the guest-backed shader's TTM handle.
+ *
+ * Returns 0 on success. Non-zero on failure, in which case the value pointed
+ * to by @user_key is unmodified.
+ */
+int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+                            SVGA3dShaderType shader_type,
+                            u32 *user_key)
+{
+       struct drm_hash_item *hash;
+       int ret;
+       unsigned long key = *user_key | (shader_type << 24);
+
+       ret = drm_ht_find_item(&man->shaders, key, &hash);
        if (unlikely(ret != 0))
                return ret;
 
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-                                  vmw_user_shader_size,
-                                  false, true);
-       if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for shader"
-                                 " creation.\n");
-               goto out_unlock;
+       *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
+                                  hash)->handle;
+
+       return 0;
+}
+
+/**
+ * vmw_compat_shader_free - Free a compat shader.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @entry: Pointer to a struct vmw_compat_shader.
+ *
+ * Frees a struct vmw_compat_shder entry and drops its reference to the
+ * guest backed shader.
+ */
+static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
+                                  struct vmw_compat_shader *entry)
+{
+       list_del(&entry->head);
+       WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
+       WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
+                                         TTM_REF_USAGE));
+       kfree(entry);
+}
+
+/**
+ * vmw_compat_shaders_commit - Commit a list of compat shader actions.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function commits a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+                              struct list_head *list)
+{
+       struct vmw_compat_shader *entry, *next;
+
+       list_for_each_entry_safe(entry, next, list, head) {
+               list_del(&entry->head);
+               switch (entry->state) {
+               case VMW_COMPAT_ADD:
+                       entry->state = VMW_COMPAT_COMMITED;
+                       list_add_tail(&entry->head, &man->list);
+                       break;
+               case VMW_COMPAT_DEL:
+                       ttm_ref_object_base_unref(entry->tfile, entry->handle,
+                                                 TTM_REF_USAGE);
+                       kfree(entry);
+                       break;
+               default:
+                       BUG();
+                       break;
+               }
        }
+}
 
-       ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
-       if (unlikely(ushader == NULL)) {
-               ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                                   vmw_user_shader_size);
-               ret = -ENOMEM;
-               goto out_unlock;
+/**
+ * vmw_compat_shaders_revert - Revert a list of compat shader actions
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function reverts a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+                              struct list_head *list)
+{
+       struct vmw_compat_shader *entry, *next;
+       int ret;
+
+       list_for_each_entry_safe(entry, next, list, head) {
+               switch (entry->state) {
+               case VMW_COMPAT_ADD:
+                       vmw_compat_shader_free(man, entry);
+                       break;
+               case VMW_COMPAT_DEL:
+                       ret = drm_ht_insert_item(&man->shaders, &entry->hash);
+                       list_del(&entry->head);
+                       list_add_tail(&entry->head, &man->list);
+                       entry->state = VMW_COMPAT_COMMITED;
+                       break;
+               default:
+                       BUG();
+                       break;
+               }
        }
+}
 
-       res = &ushader->shader.res;
-       ushader->base.shareable = false;
-       ushader->base.tfile = NULL;
+/**
+ * vmw_compat_shader_remove - Stage a compat shader for removal.
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @shader_type: Shader type.
+ * @list: Caller's list of staged shader actions.
+ *
+ * This function stages a compat shader for removal and removes the key from
+ * the shader manager's hash table. If the shader was previously only staged
+ * for addition it is completely removed (But the execbuf code may keep a
+ * reference if it was bound to a context between addition and removal). If
+ * it was previously commited to the manager, it is staged for removal.
+ */
+int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+                            u32 user_key, SVGA3dShaderType shader_type,
+                            struct list_head *list)
+{
+       struct vmw_compat_shader *entry;
+       struct drm_hash_item *hash;
+       int ret;
 
-       /*
-        * From here on, the destructor takes over resource freeing.
-        */
+       ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
+                              &hash);
+       if (likely(ret != 0))
+               return -EINVAL;
 
-       ret = vmw_gb_shader_init(dev_priv, res, arg->size,
-                                arg->offset, shader_type, buffer,
-                                vmw_user_shader_free);
+       entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
+
+       switch (entry->state) {
+       case VMW_COMPAT_ADD:
+               vmw_compat_shader_free(man, entry);
+               break;
+       case VMW_COMPAT_COMMITED:
+               (void) drm_ht_remove_item(&man->shaders, &entry->hash);
+               list_del(&entry->head);
+               entry->state = VMW_COMPAT_DEL;
+               list_add_tail(&entry->head, list);
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       return 0;
+}
+
+/**
+ * vmw_compat_shader_add - Create a compat shader and add the
+ * key to the manager
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @bytecode: Pointer to the bytecode of the shader.
+ * @shader_type: Shader type.
+ * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
+ * to be created with.
+ * @list: Caller's list of staged shader actions.
+ *
+ * Note that only the key is added to the shader manager's hash table.
+ * The shader is not yet added to the shader manager's list of shaders.
+ */
+int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+                         u32 user_key, const void *bytecode,
+                         SVGA3dShaderType shader_type,
+                         size_t size,
+                         struct ttm_object_file *tfile,
+                         struct list_head *list)
+{
+       struct vmw_dma_buffer *buf;
+       struct ttm_bo_kmap_obj map;
+       bool is_iomem;
+       struct vmw_compat_shader *compat;
+       u32 handle;
+       int ret;
+
+       if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
+               return -EINVAL;
+
+       /* Allocate and pin a DMA buffer */
+       buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+       if (unlikely(buf == NULL))
+               return -ENOMEM;
+
+       ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
+                             true, vmw_dmabuf_bo_free);
        if (unlikely(ret != 0))
-               goto out_unlock;
+               goto out;
 
-       tmp = vmw_resource_reference(res);
-       ret = ttm_base_object_init(tfile, &ushader->base, false,
-                                  VMW_RES_SHADER,
-                                  &vmw_user_shader_base_release, NULL);
+       ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
+       if (unlikely(ret != 0))
+               goto no_reserve;
 
+       /* Map and copy shader bytecode. */
+       ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
+                         &map);
        if (unlikely(ret != 0)) {
-               vmw_resource_unreference(&tmp);
-               goto out_err;
+               ttm_bo_unreserve(&buf->base);
+               goto no_reserve;
        }
 
-       arg->shader_handle = ushader->base.hash.key;
-out_err:
-       vmw_resource_unreference(&res);
-out_unlock:
-       ttm_read_unlock(&vmaster->lock);
-out_bad_arg:
-       vmw_dmabuf_unreference(&buffer);
+       memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
+       WARN_ON(is_iomem);
+
+       ttm_bo_kunmap(&map);
+       ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
+       WARN_ON(ret != 0);
+       ttm_bo_unreserve(&buf->base);
+
+       /* Create a guest-backed shader container backed by the dma buffer */
+       ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
+                              tfile, &handle);
+       vmw_dmabuf_unreference(&buf);
+       if (unlikely(ret != 0))
+               goto no_reserve;
+       /*
+        * Create a compat shader structure and stage it for insertion
+        * in the manager
+        */
+       compat = kzalloc(sizeof(*compat), GFP_KERNEL);
+       if (compat == NULL)
+               goto no_compat;
+
+       compat->hash.key = user_key |  (shader_type << 24);
+       ret = drm_ht_insert_item(&man->shaders, &compat->hash);
+       if (unlikely(ret != 0))
+               goto out_invalid_key;
+
+       compat->state = VMW_COMPAT_ADD;
+       compat->handle = handle;
+       compat->tfile = tfile;
+       list_add_tail(&compat->head, list);
 
+       return 0;
+
+out_invalid_key:
+       kfree(compat);
+no_compat:
+       ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+no_reserve:
+out:
        return ret;
+}
+
+/**
+ * vmw_compat_shader_man_create - Create a compat shader manager
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Typically done at file open time. If successful returns a pointer to a
+ * compat shader manager. Otherwise returns an error pointer.
+ */
+struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv)
+{
+       struct vmw_compat_shader_manager *man;
+       int ret;
+
+       man = kzalloc(sizeof(*man), GFP_KERNEL);
+
+       man->dev_priv = dev_priv;
+       INIT_LIST_HEAD(&man->list);
+       ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
+       if (ret == 0)
+               return man;
+
+       kfree(man);
+       return ERR_PTR(ret);
+}
+
+/**
+ * vmw_compat_shader_man_destroy - Destroy a compat shader manager
+ *
+ * @man: Pointer to the shader manager to destroy.
+ *
+ * Typically done at file close time.
+ */
+void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
+{
+       struct vmw_compat_shader *entry, *next;
+
+       mutex_lock(&man->dev_priv->cmdbuf_mutex);
+       list_for_each_entry_safe(entry, next, &man->list, head)
+               vmw_compat_shader_free(man, entry);
 
+       mutex_unlock(&man->dev_priv->cmdbuf_mutex);
+       kfree(man);
 }
index 979da1c..82468d9 100644 (file)
@@ -908,8 +908,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
            rep->size_addr;
 
        if (user_sizes)
-               ret = copy_to_user(user_sizes, srf->sizes,
-                                  srf->num_sizes * sizeof(*srf->sizes));
+               ret = copy_to_user(user_sizes, &srf->base_size,
+                                  sizeof(srf->base_size));
        if (unlikely(ret != 0)) {
                DRM_ERROR("copy_to_user failed %p %u\n",
                          user_sizes, srf->num_sizes);
@@ -1111,7 +1111,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
                return 0;
 
        mutex_lock(&dev_priv->binding_mutex);
-       vmw_context_binding_res_list_kill(&res->binding_head);
+       vmw_context_binding_res_list_scrub(&res->binding_head);
 
        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
        if (unlikely(cmd == NULL)) {
index 029ecab..73b3865 100644 (file)
@@ -278,10 +278,6 @@ static int da9055_hwmon_probe(struct platform_device *pdev)
        if (hwmon_irq < 0)
                return hwmon_irq;
 
-       hwmon_irq = regmap_irq_get_virq(hwmon->da9055->irq_data, hwmon_irq);
-       if (hwmon_irq < 0)
-               return hwmon_irq;
-
        ret = devm_request_threaded_irq(&pdev->dev, hwmon_irq,
                                        NULL, da9055_auxadc_irq,
                                        IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
index 3cbf66e..291d11f 100644 (file)
@@ -90,7 +90,8 @@ struct pmbus_data {
 
        u32 flags;              /* from platform data */
 
-       int exponent;           /* linear mode: exponent for output voltages */
+       int exponent[PMBUS_PAGES];
+                               /* linear mode: exponent for output voltages */
 
        const struct pmbus_driver_info *info;
 
@@ -410,7 +411,7 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
        long val;
 
        if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */
-               exponent = data->exponent;
+               exponent = data->exponent[sensor->page];
                mantissa = (u16) sensor->data;
        } else {                                /* LINEAR11 */
                exponent = ((s16)sensor->data) >> 11;
@@ -516,7 +517,7 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
 #define MIN_MANTISSA   (511 * 1000)
 
 static u16 pmbus_data2reg_linear(struct pmbus_data *data,
-                                enum pmbus_sensor_classes class, long val)
+                                struct pmbus_sensor *sensor, long val)
 {
        s16 exponent = 0, mantissa;
        bool negative = false;
@@ -525,7 +526,7 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
        if (val == 0)
                return 0;
 
-       if (class == PSC_VOLTAGE_OUT) {
+       if (sensor->class == PSC_VOLTAGE_OUT) {
                /* LINEAR16 does not support negative voltages */
                if (val < 0)
                        return 0;
@@ -534,10 +535,10 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
                 * For a static exponents, we don't have a choice
                 * but to adjust the value to it.
                 */
-               if (data->exponent < 0)
-                       val <<= -data->exponent;
+               if (data->exponent[sensor->page] < 0)
+                       val <<= -data->exponent[sensor->page];
                else
-                       val >>= data->exponent;
+                       val >>= data->exponent[sensor->page];
                val = DIV_ROUND_CLOSEST(val, 1000);
                return val & 0xffff;
        }
@@ -548,14 +549,14 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
        }
 
        /* Power is in uW. Convert to mW before converting. */
-       if (class == PSC_POWER)
+       if (sensor->class == PSC_POWER)
                val = DIV_ROUND_CLOSEST(val, 1000L);
 
        /*
         * For simplicity, convert fan data to milli-units
         * before calculating the exponent.
         */
-       if (class == PSC_FAN)
+       if (sensor->class == PSC_FAN)
                val = val * 1000;
 
        /* Reduce large mantissa until it fits into 10 bit */
@@ -585,22 +586,22 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
 }
 
 static u16 pmbus_data2reg_direct(struct pmbus_data *data,
-                                enum pmbus_sensor_classes class, long val)
+                                struct pmbus_sensor *sensor, long val)
 {
        long m, b, R;
 
-       m = data->info->m[class];
-       b = data->info->b[class];
-       R = data->info->R[class];
+       m = data->info->m[sensor->class];
+       b = data->info->b[sensor->class];
+       R = data->info->R[sensor->class];
 
        /* Power is in uW. Adjust R and b. */
-       if (class == PSC_POWER) {
+       if (sensor->class == PSC_POWER) {
                R -= 3;
                b *= 1000;
        }
 
        /* Calculate Y = (m * X + b) * 10^R */
-       if (class != PSC_FAN) {
+       if (sensor->class != PSC_FAN) {
                R -= 3;         /* Adjust R and b for data in milli-units */
                b *= 1000;
        }
@@ -619,7 +620,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
 }
 
 static u16 pmbus_data2reg_vid(struct pmbus_data *data,
-                             enum pmbus_sensor_classes class, long val)
+                             struct pmbus_sensor *sensor, long val)
 {
        val = clamp_val(val, 500, 1600);
 
@@ -627,20 +628,20 @@ static u16 pmbus_data2reg_vid(struct pmbus_data *data,
 }
 
 static u16 pmbus_data2reg(struct pmbus_data *data,
-                         enum pmbus_sensor_classes class, long val)
+                         struct pmbus_sensor *sensor, long val)
 {
        u16 regval;
 
-       switch (data->info->format[class]) {
+       switch (data->info->format[sensor->class]) {
        case direct:
-               regval = pmbus_data2reg_direct(data, class, val);
+               regval = pmbus_data2reg_direct(data, sensor, val);
                break;
        case vid:
-               regval = pmbus_data2reg_vid(data, class, val);
+               regval = pmbus_data2reg_vid(data, sensor, val);
                break;
        case linear:
        default:
-               regval = pmbus_data2reg_linear(data, class, val);
+               regval = pmbus_data2reg_linear(data, sensor, val);
                break;
        }
        return regval;
@@ -746,7 +747,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
                return -EINVAL;
 
        mutex_lock(&data->update_lock);
-       regval = pmbus_data2reg(data, sensor->class, val);
+       regval = pmbus_data2reg(data, sensor, val);
        ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
        if (ret < 0)
                rv = ret;
@@ -1643,12 +1644,13 @@ static int pmbus_find_attributes(struct i2c_client *client,
  * This function is called for all chips.
  */
 static int pmbus_identify_common(struct i2c_client *client,
-                                struct pmbus_data *data)
+                                struct pmbus_data *data, int page)
 {
        int vout_mode = -1;
 
-       if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE))
-               vout_mode = _pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+       if (pmbus_check_byte_register(client, page, PMBUS_VOUT_MODE))
+               vout_mode = _pmbus_read_byte_data(client, page,
+                                                 PMBUS_VOUT_MODE);
        if (vout_mode >= 0 && vout_mode != 0xff) {
                /*
                 * Not all chips support the VOUT_MODE command,
@@ -1659,7 +1661,7 @@ static int pmbus_identify_common(struct i2c_client *client,
                        if (data->info->format[PSC_VOLTAGE_OUT] != linear)
                                return -ENODEV;
 
-                       data->exponent = ((s8)(vout_mode << 3)) >> 3;
+                       data->exponent[page] = ((s8)(vout_mode << 3)) >> 3;
                        break;
                case 1: /* VID mode         */
                        if (data->info->format[PSC_VOLTAGE_OUT] != vid)
@@ -1674,7 +1676,7 @@ static int pmbus_identify_common(struct i2c_client *client,
                }
        }
 
-       pmbus_clear_fault_page(client, 0);
+       pmbus_clear_fault_page(client, page);
        return 0;
 }
 
@@ -1682,7 +1684,7 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
                             struct pmbus_driver_info *info)
 {
        struct device *dev = &client->dev;
-       int ret;
+       int page, ret;
 
        /*
         * Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
@@ -1715,10 +1717,12 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
                return -ENODEV;
        }
 
-       ret = pmbus_identify_common(client, data);
-       if (ret < 0) {
-               dev_err(dev, "Failed to identify chip capabilities\n");
-               return ret;
+       for (page = 0; page < info->pages; page++) {
+               ret = pmbus_identify_common(client, data, page);
+               if (ret < 0) {
+                       dev_err(dev, "Failed to identify chip capabilities\n");
+                       return ret;
+               }
        }
        return 0;
 }
index 86b484c..5194afb 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_SIRF_IRQ)                        += irq-sirfsoc.o
 obj-$(CONFIG_RENESAS_INTC_IRQPIN)      += irq-renesas-intc-irqpin.o
 obj-$(CONFIG_RENESAS_IRQC)             += irq-renesas-irqc.o
 obj-$(CONFIG_VERSATILE_FPGA_IRQ)       += irq-versatile-fpga.o
+obj-$(CONFIG_ARCH_NSPIRE)              += irq-zevio.o
 obj-$(CONFIG_ARCH_VT8500)              += irq-vt8500.o
 obj-$(CONFIG_TB10X_IRQC)               += irq-tb10x.o
 obj-$(CONFIG_XTENSA)                   += irq-xtensa-pic.o
index 9300bc3..5409564 100644 (file)
@@ -381,7 +381,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
                                                ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
                                & PCI_MSI_DOORBELL_MASK;
 
-                       writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base +
+                       writel(~msimask, per_cpu_int_base +
                               ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
 
                        for (msinr = PCI_MSI_DOORBELL_START;
@@ -407,7 +407,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
                                                ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
                                & IPI_DOORBELL_MASK;
 
-                       writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
+                       writel(~ipimask, per_cpu_int_base +
                                ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
 
                        /* Handle all pending doorbells */
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
new file mode 100644 (file)
index 0000000..8ed04c4
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ *  linux/drivers/irqchip/irq-zevio.c
+ *
+ *  Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach/irq.h>
+#include <asm/exception.h>
+
+#include "irqchip.h"
+
+#define IO_STATUS      0x000
+#define IO_RAW_STATUS  0x004
+#define IO_ENABLE      0x008
+#define IO_DISABLE     0x00C
+#define IO_CURRENT     0x020
+#define IO_RESET       0x028
+#define IO_MAX_PRIOTY  0x02C
+
+#define IO_IRQ_BASE    0x000
+#define IO_FIQ_BASE    0x100
+
+#define IO_INVERT_SEL  0x200
+#define IO_STICKY_SEL  0x204
+#define IO_PRIORITY_SEL        0x300
+
+#define MAX_INTRS      32
+#define FIQ_START      MAX_INTRS
+
+static struct irq_domain *zevio_irq_domain;
+static void __iomem *zevio_irq_io;
+
+static void zevio_irq_ack(struct irq_data *irqd)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd);
+       struct irq_chip_regs *regs =
+               &container_of(irqd->chip, struct irq_chip_type, chip)->regs;
+
+       readl(gc->reg_base + regs->ack);
+}
+
+static asmlinkage void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
+{
+       int irqnr;
+
+       while (readl(zevio_irq_io + IO_STATUS)) {
+               irqnr = readl(zevio_irq_io + IO_CURRENT);
+               irqnr = irq_find_mapping(zevio_irq_domain, irqnr);
+               handle_IRQ(irqnr, regs);
+       };
+}
+
+static void __init zevio_init_irq_base(void __iomem *base)
+{
+       /* Disable all interrupts */
+       writel(~0, base + IO_DISABLE);
+
+       /* Accept interrupts of all priorities */
+       writel(0xF, base + IO_MAX_PRIOTY);
+
+       /* Reset existing interrupts */
+       readl(base + IO_RESET);
+}
+
+static int __init zevio_of_init(struct device_node *node,
+                               struct device_node *parent)
+{
+       unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+       struct irq_chip_generic *gc;
+       int ret;
+
+       if (WARN_ON(zevio_irq_io || zevio_irq_domain))
+               return -EBUSY;
+
+       zevio_irq_io = of_iomap(node, 0);
+       BUG_ON(!zevio_irq_io);
+
+       /* Do not invert interrupt status bits */
+       writel(~0, zevio_irq_io + IO_INVERT_SEL);
+
+       /* Disable sticky interrupts */
+       writel(0, zevio_irq_io + IO_STICKY_SEL);
+
+       /* We don't use IRQ priorities. Set each IRQ to highest priority. */
+       memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32));
+
+       /* Init IRQ and FIQ */
+       zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
+       zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
+
+       zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
+                                                &irq_generic_chip_ops, NULL);
+       BUG_ON(!zevio_irq_domain);
+
+       ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
+                                            "zevio_intc", handle_level_irq,
+                                            clr, 0, IRQ_GC_INIT_MASK_CACHE);
+       BUG_ON(ret);
+
+       gc = irq_get_domain_generic_chip(zevio_irq_domain, 0);
+       gc->reg_base                            = zevio_irq_io;
+       gc->chip_types[0].chip.irq_ack          = zevio_irq_ack;
+       gc->chip_types[0].chip.irq_mask         = irq_gc_mask_disable_reg;
+       gc->chip_types[0].chip.irq_unmask       = irq_gc_unmask_enable_reg;
+       gc->chip_types[0].regs.mask             = IO_IRQ_BASE + IO_ENABLE;
+       gc->chip_types[0].regs.enable           = IO_IRQ_BASE + IO_ENABLE;
+       gc->chip_types[0].regs.disable          = IO_IRQ_BASE + IO_DISABLE;
+       gc->chip_types[0].regs.ack              = IO_IRQ_BASE + IO_RESET;
+
+       set_handle_irq(zevio_handle_irq);
+
+       pr_info("TI-NSPIRE classic IRQ controller\n");
+       return 0;
+}
+
+IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init);
index 68f768a..a6c3c9e 100644 (file)
@@ -1176,7 +1176,7 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
 
        switch (demod) {
        case 0:
-               dev_err(&state->priv->i2c->dev,
+               dev_err(&i2c->dev,
                        "%s: Error attaching frontend %d\n",
                        KBUILD_MODNAME, demod);
                goto error1;
@@ -1200,12 +1200,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
        state->demod = demod - 1;
        state->priv = priv;
 
-       /* test i2c bus for ack */
-       if (demod == 0) {
-               if (cx24117_readreg(state, 0x00) < 0)
-                       goto error3;
-       }
-
        dev_info(&state->priv->i2c->dev,
                "%s: Attaching frontend %d\n",
                KBUILD_MODNAME, state->demod);
@@ -1216,8 +1210,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
        state->frontend.demodulator_priv = state;
        return &state->frontend;
 
-error3:
-       kfree(state);
 error2:
        cx24117_release_priv(priv);
 error1:
index 4bf0575..8a8e1ec 100644 (file)
@@ -2,7 +2,7 @@
  *    Support for NXT2002 and NXT2004 - VSB/QAM
  *
  *    Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com>
- *    Copyright (C) 2006 Michael Krufky <mkrufky@m1k.net>
+ *    Copyright (C) 2006-2014 Michael Krufky <mkrufky@linuxtv.org>
  *    based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net>
  *    and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com>
  *
index 1effc21..9bbd665 100644 (file)
@@ -2554,7 +2554,7 @@ static int adv7842_core_init(struct v4l2_subdev *sd)
        sdp_write_and_or(sd, 0xdd, 0xf0, pdata->sdp_free_run_force |
                                         (pdata->sdp_free_run_cbar_en << 1) |
                                         (pdata->sdp_free_run_man_col_en << 2) |
-                                        (pdata->sdp_free_run_force << 3));
+                                        (pdata->sdp_free_run_auto << 3));
 
        /* TODO from platform data */
        cp_write(sd, 0x69, 0x14);   /* Enable CP CSC */
index 4b83811..77e10e0 100644 (file)
@@ -478,25 +478,33 @@ static void s5k5baf_write_arr_seq(struct s5k5baf *state, u16 addr,
                                  u16 count, const u16 *seq)
 {
        struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
-       __be16 buf[count + 1];
-       int ret, n;
+       __be16 buf[65];
 
        s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr);
        if (state->error)
                return;
 
+       v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count,
+                min(2 * count, 64), seq);
+
        buf[0] = __constant_cpu_to_be16(REG_CMD_BUF);
-       for (n = 1; n <= count; ++n)
-               buf[n] = cpu_to_be16(*seq++);
 
-       n *= 2;
-       ret = i2c_master_send(c, (char *)buf, n);
-       v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count,
-                min(2 * count, 64), seq - count);
+       while (count > 0) {
+               int n = min_t(int, count, ARRAY_SIZE(buf) - 1);
+               int ret, i;
 
-       if (ret != n) {
-               v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
-               state->error = ret;
+               for (i = 1; i <= n; ++i)
+                       buf[i] = cpu_to_be16(*seq++);
+
+               i *= 2;
+               ret = i2c_master_send(c, (char *)buf, i);
+               if (ret != i) {
+                       v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
+                       state->error = ret;
+                       break;
+               }
+
+               count -= n;
        }
 }
 
index d85cb0a..6662b49 100644 (file)
@@ -2426,7 +2426,7 @@ struct tvcard bttv_tvcards[] = {
        },
                /* ---- card 0x87---------------------------------- */
        [BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = {
-               /* Michael Krufky <mkrufky@m1k.net> */
+               /* Michael Krufky <mkrufky@linuxtv.org> */
                .name           = "DViCO FusionHDTV 5 Lite",
                .tuner_type     = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */
                .tuner_addr     = ADDR_UNSET,
index 922e823..3f364b7 100644 (file)
@@ -98,7 +98,7 @@ int bttv_sub_add_device(struct bttv_core *core, char *name)
 
        err = device_register(&sub->dev);
        if (0 != err) {
-               kfree(sub);
+               put_device(&sub->dev);
                return err;
        }
        pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev));
index d45e7f6..c9b2350 100644 (file)
@@ -2590,7 +2590,7 @@ struct saa7134_board saa7134_boards[] = {
                }},
        },
        [SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180] = {
-               /* Michael Krufky <mkrufky@m1k.net>
+               /* Michael Krufky <mkrufky@linuxtv.org>
                 * Uses Alps Electric TDHU2, containing NXT2004 ATSC Decoder
                 * AFAIK, there is no analog demod, thus,
                 * no support for analog television.
index a7dfd07..da2fc86 100644 (file)
@@ -1027,7 +1027,8 @@ static int fimc_probe(struct platform_device *pdev)
        return 0;
 
 err_gclk:
-       clk_disable(fimc->clock[CLK_GATE]);
+       if (!pm_runtime_enabled(dev))
+               clk_disable(fimc->clock[CLK_GATE]);
 err_sd:
        fimc_unregister_capture_subdev(fimc);
 err_sclk:
@@ -1036,6 +1037,7 @@ err_sclk:
        return ret;
 }
 
+#ifdef CONFIG_PM_RUNTIME
 static int fimc_runtime_resume(struct device *dev)
 {
        struct fimc_dev *fimc = dev_get_drvdata(dev);
@@ -1068,6 +1070,7 @@ static int fimc_runtime_suspend(struct device *dev)
        dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
        return ret;
 }
+#endif
 
 #ifdef CONFIG_PM_SLEEP
 static int fimc_resume(struct device *dev)
index 1234734..779ec3c 100644 (file)
@@ -1563,7 +1563,7 @@ static int fimc_lite_probe(struct platform_device *pdev)
        if (!pm_runtime_enabled(dev)) {
                ret = clk_enable(fimc->clock);
                if (ret < 0)
-                       goto err_clk_put;
+                       goto err_sd;
        }
 
        fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
@@ -1579,7 +1579,8 @@ static int fimc_lite_probe(struct platform_device *pdev)
        return 0;
 
 err_clk_dis:
-       clk_disable(fimc->clock);
+       if (!pm_runtime_enabled(dev))
+               clk_disable(fimc->clock);
 err_sd:
        fimc_lite_unregister_capture_subdev(fimc);
 err_clk_put:
@@ -1587,6 +1588,7 @@ err_clk_put:
        return ret;
 }
 
+#ifdef CONFIG_PM_RUNTIME
 static int fimc_lite_runtime_resume(struct device *dev)
 {
        struct fimc_lite *fimc = dev_get_drvdata(dev);
@@ -1602,6 +1604,7 @@ static int fimc_lite_runtime_suspend(struct device *dev)
        clk_disable(fimc->clock);
        return 0;
 }
+#endif
 
 #ifdef CONFIG_PM_SLEEP
 static int fimc_lite_resume(struct device *dev)
index a1c78c8..7d68d0b 100644 (file)
@@ -175,7 +175,7 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
        {
                .name           = "YUV 4:2:0 planar, Y/CbCr",
                .fourcc         = V4L2_PIX_FMT_NV12,
-               .depth          = 16,
+               .depth          = 12,
                .colplanes      = 2,
                .h_align        = 1,
                .v_align        = 1,
@@ -188,10 +188,10 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
        {
                .name           = "YUV 4:2:0 planar, Y/CbCr",
                .fourcc         = V4L2_PIX_FMT_NV12,
-               .depth          = 16,
-               .colplanes      = 4,
+               .depth          = 12,
+               .colplanes      = 2,
                .h_align        = 4,
-               .v_align        = 1,
+               .v_align        = 4,
                .flags          = SJPEG_FMT_FLAG_ENC_OUTPUT |
                                  SJPEG_FMT_FLAG_DEC_CAPTURE |
                                  SJPEG_FMT_FLAG_S5P |
index 8f9b2ce..8ede8ea 100644 (file)
@@ -1539,6 +1539,8 @@ static const struct usb_device_id af9035_id_table[] = {
                &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
        { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
                &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
+       { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
+               &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, af9035_id_table);
index d83df4b..0a98d04 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-demod.c - driver for the MaxLinear MXL111SF DVB-T demodulator
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -601,7 +601,7 @@ struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state,
 EXPORT_SYMBOL_GPL(mxl111sf_demod_attach);
 
 MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
 MODULE_LICENSE("GPL");
 MODULE_VERSION("0.1");
 
index 3f3f8bf..2d4530f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-demod.h - driver for the MaxLinear MXL111SF DVB-T demodulator
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index e4121cb..a619410 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-gpio.c - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index 0220f54..b85a577 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-gpio.h - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index 3443455..a101d06 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-i2c.c - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index a57a45f..4657621 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-i2c.h - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index b741b3a..f6b3480 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-phy.c - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index f075607..0643738 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-phy.h - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index 17831b0..89bf115 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-reg.h - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index 879c529..a8d2c70 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-tuner.c - driver for the MaxLinear MXL111SF CMOS tuner
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -512,7 +512,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
 EXPORT_SYMBOL_GPL(mxl111sf_tuner_attach);
 
 MODULE_DESCRIPTION("MaxLinear MxL111SF CMOS tuner driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
 MODULE_LICENSE("GPL");
 MODULE_VERSION("0.1");
 
index 90f583e..2046db2 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-tuner.h - driver for the MaxLinear MXL111SF CMOS tuner
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
 #else
 static inline
 struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
-                                          struct mxl111sf_state *mxl_state
+                                          struct mxl111sf_state *mxl_state,
                                           struct mxl111sf_tuner_config *cfg)
 {
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
index 08240e4..c7304fa 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
  *
  *   This program is free software; you can redistribute it and/or modify it
  *   under the terms of the GNU General Public License as published by the Free
@@ -105,7 +105,7 @@ int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data)
                ret = -EINVAL;
        }
 
-       pr_debug("R: (0x%02x, 0x%02x)\n", addr, *data);
+       pr_debug("R: (0x%02x, 0x%02x)\n", addr, buf[1]);
 fail:
        return ret;
 }
@@ -1421,7 +1421,7 @@ static struct usb_driver mxl111sf_usb_driver = {
 
 module_usb_driver(mxl111sf_usb_driver);
 
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
 MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index 9816de8..8516c01 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
  *
  *   This program is free software; you can redistribute it and/or modify it
  *   under the terms of the GNU General Public License as published by the Free
index 2f0c89c..c563896 100644 (file)
@@ -198,7 +198,6 @@ static int device_authorization(struct hdpvr_device *dev)
        hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
        v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n",
                 print_buf);
-       kfree(print_buf);
 #endif
 
        msleep(100);
@@ -214,6 +213,9 @@ static int device_authorization(struct hdpvr_device *dev)
        retval = ret != 8;
 unlock:
        mutex_unlock(&dev->usbc_mutex);
+#ifdef HDPVR_DEBUG
+       kfree(print_buf);
+#endif
        return retval;
 }
 
index ee52b9f..f7902fe 100644 (file)
@@ -515,6 +515,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
                aspect.denominator = 9;
        }
        image_width = ((image_height * aspect.numerator) / aspect.denominator);
+       image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
 
        /* Horizontal */
        if (default_gtf)
index 65411ad..7e6b209 100644 (file)
@@ -66,14 +66,11 @@ static void __videobuf_dc_free(struct device *dev,
 static void videobuf_vm_open(struct vm_area_struct *vma)
 {
        struct videobuf_mapping *map = vma->vm_private_data;
-       struct videobuf_queue *q = map->q;
 
-       dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
+       dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
                map, map->count, vma->vm_start, vma->vm_end);
 
-       videobuf_queue_lock(q);
        map->count++;
-       videobuf_queue_unlock(q);
 }
 
 static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -85,11 +82,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
        dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
                map, map->count, vma->vm_start, vma->vm_end);
 
-       videobuf_queue_lock(q);
-       if (!--map->count) {
+       map->count--;
+       if (0 == map->count) {
                struct videobuf_dma_contig_memory *mem;
 
                dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
+               videobuf_queue_lock(q);
 
                /* We need first to cancel streams, before unmapping */
                if (q->streaming)
@@ -128,8 +126,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
 
                kfree(map);
 
+               videobuf_queue_unlock(q);
        }
-       videobuf_queue_unlock(q);
 }
 
 static const struct vm_operations_struct videobuf_vm_ops = {
index 9db674c..828e7c1 100644 (file)
@@ -338,14 +338,11 @@ EXPORT_SYMBOL_GPL(videobuf_dma_free);
 static void videobuf_vm_open(struct vm_area_struct *vma)
 {
        struct videobuf_mapping *map = vma->vm_private_data;
-       struct videobuf_queue *q = map->q;
 
        dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
                map->count, vma->vm_start, vma->vm_end);
 
-       videobuf_queue_lock(q);
        map->count++;
-       videobuf_queue_unlock(q);
 }
 
 static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -358,9 +355,10 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
        dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
                map->count, vma->vm_start, vma->vm_end);
 
-       videobuf_queue_lock(q);
-       if (!--map->count) {
+       map->count--;
+       if (0 == map->count) {
                dprintk(1, "munmap %p q=%p\n", map, q);
+               videobuf_queue_lock(q);
                for (i = 0; i < VIDEO_MAX_FRAME; i++) {
                        if (NULL == q->bufs[i])
                                continue;
@@ -376,9 +374,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
                        q->bufs[i]->baddr = 0;
                        q->ops->buf_release(q, q->bufs[i]);
                }
+               videobuf_queue_unlock(q);
                kfree(map);
        }
-       videobuf_queue_unlock(q);
        return;
 }
 
index 1365c65..2ff7fcc 100644 (file)
@@ -54,14 +54,11 @@ MODULE_LICENSE("GPL");
 static void videobuf_vm_open(struct vm_area_struct *vma)
 {
        struct videobuf_mapping *map = vma->vm_private_data;
-       struct videobuf_queue *q = map->q;
 
        dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
                map->count, vma->vm_start, vma->vm_end);
 
-       videobuf_queue_lock(q);
        map->count++;
-       videobuf_queue_unlock(q);
 }
 
 static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -73,11 +70,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
        dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
                map->count, vma->vm_start, vma->vm_end);
 
-       videobuf_queue_lock(q);
-       if (!--map->count) {
+       map->count--;
+       if (0 == map->count) {
                struct videobuf_vmalloc_memory *mem;
 
                dprintk(1, "munmap %p q=%p\n", map, q);
+               videobuf_queue_lock(q);
 
                /* We need first to cancel streams, before unmapping */
                if (q->streaming)
@@ -116,8 +114,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
 
                kfree(map);
 
+               videobuf_queue_unlock(q);
        }
-       videobuf_queue_unlock(q);
 
        return;
 }
index 5a5fb7f..a127925 100644 (file)
@@ -1776,6 +1776,11 @@ static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
                return 0;
        }
 
+       if (!q->num_buffers) {
+               dprintk(1, "streamon: no buffers have been allocated\n");
+               return -EINVAL;
+       }
+
        /*
         * If any buffers were queued before streamon,
         * we can now pass them to driver for processing.
index cd929ae..e2a783f 100644 (file)
@@ -210,10 +210,29 @@ static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
        }
 }
 
+static void dock_event(acpi_handle handle, u32 type, void *data)
+{
+       struct acpiphp_context *context;
+
+       mutex_lock(&acpiphp_context_lock);
+       context = acpiphp_get_context(handle);
+       if (!context || WARN_ON(context->handle != handle)
+           || context->func.parent->is_going_away) {
+               mutex_unlock(&acpiphp_context_lock);
+               return;
+       }
+       get_bridge(context->func.parent);
+       acpiphp_put_context(context);
+       mutex_unlock(&acpiphp_context_lock);
+
+       hotplug_event(handle, type, data);
+
+       put_bridge(context->func.parent);
+}
 
 static const struct acpi_dock_ops acpiphp_dock_ops = {
        .fixup = post_dock_fixups,
-       .handler = hotplug_event,
+       .handler = dock_event,
 };
 
 /* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -441,7 +460,9 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
        list_del(&bridge->list);
        mutex_unlock(&bridge_mutex);
 
+       mutex_lock(&acpiphp_context_lock);
        bridge->is_going_away = true;
+       mutex_unlock(&acpiphp_context_lock);
 }
 
 /**
@@ -742,7 +763,7 @@ static void trim_stale_devices(struct pci_dev *dev)
 
                /* The device is a bridge. so check the bus below it. */
                pm_runtime_get_sync(&dev->dev);
-               list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
+               list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list)
                        trim_stale_devices(child);
 
                pm_runtime_put(&dev->dev);
@@ -773,8 +794,8 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
                        ; /* do nothing */
                } else if (get_slot_status(slot) == ACPI_STA_ALL) {
                        /* remove stale devices if any */
-                       list_for_each_entry_safe(dev, tmp, &bus->devices,
-                                                bus_list)
+                       list_for_each_entry_safe_reverse(dev, tmp,
+                                                        &bus->devices, bus_list)
                                if (PCI_SLOT(dev->devfn) == slot->device)
                                        trim_stale_devices(dev);
 
@@ -805,7 +826,7 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
        int i;
        unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
 
-       list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
+       list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) {
                for (i=0; i<PCI_BRIDGE_RESOURCES; i++) {
                        struct resource *res = &dev->resource[i];
                        if ((res->flags & type_mask) && !res->start &&
@@ -829,7 +850,11 @@ void acpiphp_check_host_bridge(acpi_handle handle)
 
        bridge = acpiphp_handle_to_bridge(handle);
        if (bridge) {
+               pci_lock_rescan_remove();
+
                acpiphp_check_bridge(bridge);
+
+               pci_unlock_rescan_remove();
                put_bridge(bridge);
        }
 }
@@ -852,6 +877,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
 
        mutex_unlock(&acpiphp_context_lock);
 
+       pci_lock_rescan_remove();
        acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
 
        switch (type) {
@@ -905,6 +931,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
                break;
        }
 
+       pci_unlock_rescan_remove();
        if (bridge)
                put_bridge(bridge);
 }
@@ -915,11 +942,9 @@ static void hotplug_event_work(void *data, u32 type)
        acpi_handle handle = context->handle;
 
        acpi_scan_lock_acquire();
-       pci_lock_rescan_remove();
 
        hotplug_event(handle, type, context);
 
-       pci_unlock_rescan_remove();
        acpi_scan_lock_release();
        acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL);
        put_bridge(context->func.parent);
@@ -937,6 +962,7 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
 {
        struct acpiphp_context *context;
        u32 ost_code = ACPI_OST_SC_SUCCESS;
+       acpi_status status;
 
        switch (type) {
        case ACPI_NOTIFY_BUS_CHECK:
@@ -972,13 +998,20 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
 
        mutex_lock(&acpiphp_context_lock);
        context = acpiphp_get_context(handle);
-       if (context && !WARN_ON(context->handle != handle)) {
-               get_bridge(context->func.parent);
-               acpiphp_put_context(context);
-               acpi_hotplug_execute(hotplug_event_work, context, type);
+       if (!context || WARN_ON(context->handle != handle)
+           || context->func.parent->is_going_away)
+               goto err_out;
+
+       get_bridge(context->func.parent);
+       acpiphp_put_context(context);
+       status = acpi_hotplug_execute(hotplug_event_work, context, type);
+       if (ACPI_SUCCESS(status)) {
                mutex_unlock(&acpiphp_context_lock);
                return;
        }
+       put_bridge(context->func.parent);
+
+ err_out:
        mutex_unlock(&acpiphp_context_lock);
        ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
 
index 77b46d0..e10febe 100644 (file)
@@ -498,7 +498,7 @@ static int ab3100_regulator_register(struct platform_device *pdev,
                                     struct ab3100_platform_data *plfdata,
                                     struct regulator_init_data *init_data,
                                     struct device_node *np,
-                                    int id)
+                                    unsigned long id)
 {
        struct regulator_desc *desc;
        struct ab3100_regulator *reg;
@@ -646,7 +646,7 @@ ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
                err = ab3100_regulator_register(
                        pdev, NULL, ab3100_regulator_matches[i].init_data,
                        ab3100_regulator_matches[i].of_node,
-                       (int) ab3100_regulator_matches[i].driver_data);
+                       (unsigned long)ab3100_regulator_matches[i].driver_data);
                if (err) {
                        ab3100_regulators_remove(pdev);
                        return err;
index b38a6b6..16a309e 100644 (file)
@@ -1272,6 +1272,8 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
                                if (r->dev.parent &&
                                        node == r->dev.of_node)
                                        return r;
+                       *ret = -EPROBE_DEFER;
+                       return NULL;
                } else {
                        /*
                         * If we couldn't even get the node then it's
@@ -1312,7 +1314,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
        struct regulator_dev *rdev;
        struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
        const char *devname = NULL;
-       int ret = -EPROBE_DEFER;
+       int ret;
 
        if (id == NULL) {
                pr_err("get() with no identifier\n");
@@ -1322,6 +1324,11 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
        if (dev)
                devname = dev_name(dev);
 
+       if (have_full_constraints())
+               ret = -ENODEV;
+       else
+               ret = -EPROBE_DEFER;
+
        mutex_lock(&regulator_list_mutex);
 
        rdev = regulator_dev_lookup(dev, id, &ret);
index d9e5579..cd0b9e3 100644 (file)
@@ -441,6 +441,7 @@ common_reg:
        for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
                if (!reg_np) {
                        config.init_data = pdata->regulators[i].initdata;
+                       config.of_node = pdata->regulators[i].reg_node;
                } else {
                        config.init_data = rdata[i].init_data;
                        config.of_node = rdata[i].of_node;
index 10bb41c..eecb1f2 100644 (file)
@@ -59,7 +59,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
 
        if (usbdev->descriptor.bNumConfigurations != 1) {
                dev_err(&interface->dev, "can't handle multiple config\n");
-               return -ENODEV;
+               goto failed2;
        }
 
        vendor = le16_to_cpu(usbdev->descriptor.idVendor);
@@ -108,6 +108,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
        return 0;
 
 failed2:
+       usb_put_dev(usbdev);
        dev_err(&interface->dev, "probe failed\n");
        return -ENODEV;
 }
@@ -115,6 +116,7 @@ failed2:
 static void go7007_loader_disconnect(struct usb_interface *interface)
 {
        dev_info(&interface->dev, "disconnect\n");
+       usb_put_dev(interface_to_usbdev(interface));
        usb_set_intfdata(interface, NULL);
 }
 
index 34a2704..073b4a1 100644 (file)
@@ -284,10 +284,8 @@ static int map_grant_pages(struct grant_map *map)
        }
 
        pr_debug("map %d+%d\n", map->index, map->count);
-       err = gnttab_map_refs_userspace(map->map_ops,
-                                       use_ptemod ? map->kmap_ops : NULL,
-                                       map->pages,
-                                       map->count);
+       err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
+                       map->pages, map->count);
        if (err)
                return err;
 
@@ -317,10 +315,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
                }
        }
 
-       err = gnttab_unmap_refs_userspace(map->unmap_ops + offset,
-                                         use_ptemod ? map->kmap_ops + offset : NULL,
-                                         map->pages + offset,
-                                         pages);
+       err = gnttab_unmap_refs(map->unmap_ops + offset,
+                       use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
+                       pages);
        if (err)
                return err;
 
index 8ee13e2..b84e3ab 100644 (file)
@@ -928,17 +928,15 @@ void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
 }
 EXPORT_SYMBOL_GPL(gnttab_batch_copy);
 
-int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                    struct gnttab_map_grant_ref *kmap_ops,
-                   struct page **pages, unsigned int count,
-                   bool m2p_override)
+                   struct page **pages, unsigned int count)
 {
        int i, ret;
        bool lazy = false;
        pte_t *pte;
-       unsigned long mfn, pfn;
+       unsigned long mfn;
 
-       BUG_ON(kmap_ops && !m2p_override);
        ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
        if (ret)
                return ret;
@@ -957,12 +955,10 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                        set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
                                        map_ops[i].dev_bus_addr >> PAGE_SHIFT);
                }
-               return 0;
+               return ret;
        }
 
-       if (m2p_override &&
-           !in_interrupt() &&
-           paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+       if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
                arch_enter_lazy_mmu_mode();
                lazy = true;
        }
@@ -979,20 +975,8 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                } else {
                        mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
                }
-               pfn = page_to_pfn(pages[i]);
-
-               WARN_ON(PagePrivate(pages[i]));
-               SetPagePrivate(pages[i]);
-               set_page_private(pages[i], mfn);
-
-               pages[i]->index = pfn_to_mfn(pfn);
-               if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-               if (m2p_override)
-                       ret = m2p_add_override(mfn, pages[i], kmap_ops ?
-                                              &kmap_ops[i] : NULL);
+               ret = m2p_add_override(mfn, pages[i], kmap_ops ?
+                                      &kmap_ops[i] : NULL);
                if (ret)
                        goto out;
        }
@@ -1003,32 +987,15 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
 
        return ret;
 }
-
-int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
-                   struct page **pages, unsigned int count)
-{
-       return __gnttab_map_refs(map_ops, NULL, pages, count, false);
-}
 EXPORT_SYMBOL_GPL(gnttab_map_refs);
 
-int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
-                             struct gnttab_map_grant_ref *kmap_ops,
-                             struct page **pages, unsigned int count)
-{
-       return __gnttab_map_refs(map_ops, kmap_ops, pages, count, true);
-}
-EXPORT_SYMBOL_GPL(gnttab_map_refs_userspace);
-
-int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
+int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
                      struct gnttab_map_grant_ref *kmap_ops,
-                     struct page **pages, unsigned int count,
-                     bool m2p_override)
+                     struct page **pages, unsigned int count)
 {
        int i, ret;
        bool lazy = false;
-       unsigned long pfn, mfn;
 
-       BUG_ON(kmap_ops && !m2p_override);
        ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
        if (ret)
                return ret;
@@ -1039,33 +1006,17 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
                        set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
                                        INVALID_P2M_ENTRY);
                }
-               return 0;
+               return ret;
        }
 
-       if (m2p_override &&
-           !in_interrupt() &&
-           paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+       if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
                arch_enter_lazy_mmu_mode();
                lazy = true;
        }
 
        for (i = 0; i < count; i++) {
-               pfn = page_to_pfn(pages[i]);
-               mfn = get_phys_to_machine(pfn);
-               if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
-                       ret = -EINVAL;
-                       goto out;
-               }
-
-               set_page_private(pages[i], INVALID_P2M_ENTRY);
-               WARN_ON(!PagePrivate(pages[i]));
-               ClearPagePrivate(pages[i]);
-               set_phys_to_machine(pfn, pages[i]->index);
-               if (m2p_override)
-                       ret = m2p_remove_override(pages[i],
-                                                 kmap_ops ?
-                                                  &kmap_ops[i] : NULL,
-                                                 mfn);
+               ret = m2p_remove_override(pages[i], kmap_ops ?
+                                      &kmap_ops[i] : NULL);
                if (ret)
                        goto out;
        }
@@ -1076,22 +1027,8 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
 
        return ret;
 }
-
-int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *map_ops,
-                   struct page **pages, unsigned int count)
-{
-       return __gnttab_unmap_refs(map_ops, NULL, pages, count, false);
-}
 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
 
-int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *map_ops,
-                               struct gnttab_map_grant_ref *kmap_ops,
-                               struct page **pages, unsigned int count)
-{
-       return __gnttab_unmap_refs(map_ops, kmap_ops, pages, count, true);
-}
-EXPORT_SYMBOL_GPL(gnttab_unmap_refs_userspace);
-
 static unsigned nr_status_frames(unsigned nr_grant_frames)
 {
        BUG_ON(grefs_per_grant_frame == 0);
index 49a62b4..0e8388e 100644 (file)
 #include <linux/slab.h>
 #include <linux/buffer_head.h>
 #include <linux/mutex.h>
-#include <linux/crc32c.h>
 #include <linux/genhd.h>
 #include <linux/blkdev.h>
 #include "ctree.h"
 #include "disk-io.h"
+#include "hash.h"
 #include "transaction.h"
 #include "extent_io.h"
 #include "volumes.h"
@@ -1823,7 +1823,7 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
                size_t sublen = i ? PAGE_CACHE_SIZE :
                                    (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
 
-               crc = crc32c(crc, data, sublen);
+               crc = btrfs_crc32c(crc, data, sublen);
        }
        btrfs_csum_final(crc, csum);
        if (memcmp(csum, h->csum, state->csum_size))
index 0e69295..5215f04 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
-#include <linux/crc32c.h>
 #include <linux/slab.h>
 #include <linux/migrate.h>
 #include <linux/ratelimit.h>
@@ -35,6 +34,7 @@
 #include <asm/unaligned.h>
 #include "ctree.h"
 #include "disk-io.h"
+#include "hash.h"
 #include "transaction.h"
 #include "btrfs_inode.h"
 #include "volumes.h"
@@ -244,7 +244,7 @@ out:
 
 u32 btrfs_csum_data(char *data, u32 seed, size_t len)
 {
-       return crc32c(seed, data, len);
+       return btrfs_crc32c(seed, data, len);
 }
 
 void btrfs_csum_final(u32 crc, char *result)
index 5c4ab9c..184e9cb 100644 (file)
@@ -2629,7 +2629,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
                        EXTENT_DEFRAG, 1, cached_state);
        if (ret) {
                u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
-               if (last_snapshot >= BTRFS_I(inode)->generation)
+               if (0 && last_snapshot >= BTRFS_I(inode)->generation)
                        /* the inode is shared */
                        new = record_old_file_extents(inode, ordered_extent);
 
index 730dce3..cf9107a 100644 (file)
 #include <linux/xattr.h>
 #include <linux/posix_acl_xattr.h>
 #include <linux/radix-tree.h>
-#include <linux/crc32c.h>
 #include <linux/vmalloc.h>
 #include <linux/string.h>
 
 #include "send.h"
 #include "backref.h"
+#include "hash.h"
 #include "locking.h"
 #include "disk-io.h"
 #include "btrfs_inode.h"
@@ -620,7 +620,7 @@ static int send_cmd(struct send_ctx *sctx)
        hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
        hdr->crc = 0;
 
-       crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
+       crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
        hdr->crc = cpu_to_le32(crc);
 
        ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
index c02f633..97cc241 100644 (file)
@@ -1996,7 +1996,7 @@ static void __exit exit_btrfs_fs(void)
        btrfs_hash_exit();
 }
 
-module_init(init_btrfs_fs)
+late_initcall(init_btrfs_fs);
 module_exit(exit_btrfs_fs)
 
 MODULE_LICENSE("GPL");
index 651dba1..27265a8 100644 (file)
@@ -654,14 +654,16 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
 static void __set_page_dirty(struct page *page,
                struct address_space *mapping, int warn)
 {
-       spin_lock_irq(&mapping->tree_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&mapping->tree_lock, flags);
        if (page->mapping) {    /* Race with truncate? */
                WARN_ON_ONCE(warn && !PageUptodate(page));
                account_page_dirtied(page, mapping);
                radix_tree_tag_set(&mapping->page_tree,
                                page_index(page), PAGECACHE_TAG_DIRTY);
        }
-       spin_unlock_irq(&mapping->tree_lock);
+       spin_unlock_irqrestore(&mapping->tree_lock, flags);
        __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 }
 
index e1529b4..3d78fcc 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -748,11 +748,10 @@ EXPORT_SYMBOL(setup_arg_pages);
 
 #endif /* CONFIG_MMU */
 
-struct file *open_exec(const char *name)
+static struct file *do_open_exec(struct filename *name)
 {
        struct file *file;
        int err;
-       struct filename tmp = { .name = name };
        static const struct open_flags open_exec_flags = {
                .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
                .acc_mode = MAY_EXEC | MAY_OPEN,
@@ -760,7 +759,7 @@ struct file *open_exec(const char *name)
                .lookup_flags = LOOKUP_FOLLOW,
        };
 
-       file = do_filp_open(AT_FDCWD, &tmp, &open_exec_flags);
+       file = do_filp_open(AT_FDCWD, name, &open_exec_flags);
        if (IS_ERR(file))
                goto out;
 
@@ -784,6 +783,12 @@ exit:
        fput(file);
        return ERR_PTR(err);
 }
+
+struct file *open_exec(const char *name)
+{
+       struct filename tmp = { .name = name };
+       return do_open_exec(&tmp);
+}
 EXPORT_SYMBOL(open_exec);
 
 int kernel_read(struct file *file, loff_t offset,
@@ -1162,7 +1167,7 @@ int prepare_bprm_creds(struct linux_binprm *bprm)
        return -ENOMEM;
 }
 
-void free_bprm(struct linux_binprm *bprm)
+static void free_bprm(struct linux_binprm *bprm)
 {
        free_arg_pages(bprm);
        if (bprm->cred) {
@@ -1432,7 +1437,7 @@ static int exec_binprm(struct linux_binprm *bprm)
 /*
  * sys_execve() executes a new program.
  */
-static int do_execve_common(const char *filename,
+static int do_execve_common(struct filename *filename,
                                struct user_arg_ptr argv,
                                struct user_arg_ptr envp)
 {
@@ -1441,6 +1446,9 @@ static int do_execve_common(const char *filename,
        struct files_struct *displaced;
        int retval;
 
+       if (IS_ERR(filename))
+               return PTR_ERR(filename);
+
        /*
         * We move the actual failure in case of RLIMIT_NPROC excess from
         * set*uid() to execve() because too many poorly written programs
@@ -1473,7 +1481,7 @@ static int do_execve_common(const char *filename,
        check_unsafe_exec(bprm);
        current->in_execve = 1;
 
-       file = open_exec(filename);
+       file = do_open_exec(filename);
        retval = PTR_ERR(file);
        if (IS_ERR(file))
                goto out_unmark;
@@ -1481,8 +1489,7 @@ static int do_execve_common(const char *filename,
        sched_exec();
 
        bprm->file = file;
-       bprm->filename = filename;
-       bprm->interp = filename;
+       bprm->filename = bprm->interp = filename->name;
 
        retval = bprm_mm_init(bprm);
        if (retval)
@@ -1523,6 +1530,7 @@ static int do_execve_common(const char *filename,
        acct_update_integrals(current);
        task_numa_free(current);
        free_bprm(bprm);
+       putname(filename);
        if (displaced)
                put_files_struct(displaced);
        return retval;
@@ -1544,10 +1552,11 @@ out_files:
        if (displaced)
                reset_files_struct(displaced);
 out_ret:
+       putname(filename);
        return retval;
 }
 
-int do_execve(const char *filename,
+int do_execve(struct filename *filename,
        const char __user *const __user *__argv,
        const char __user *const __user *__envp)
 {
@@ -1557,7 +1566,7 @@ int do_execve(const char *filename,
 }
 
 #ifdef CONFIG_COMPAT
-static int compat_do_execve(const char *filename,
+static int compat_do_execve(struct filename *filename,
        const compat_uptr_t __user *__argv,
        const compat_uptr_t __user *__envp)
 {
@@ -1607,25 +1616,13 @@ SYSCALL_DEFINE3(execve,
                const char __user *const __user *, argv,
                const char __user *const __user *, envp)
 {
-       struct filename *path = getname(filename);
-       int error = PTR_ERR(path);
-       if (!IS_ERR(path)) {
-               error = do_execve(path->name, argv, envp);
-               putname(path);
-       }
-       return error;
+       return do_execve(getname(filename), argv, envp);
 }
 #ifdef CONFIG_COMPAT
 asmlinkage long compat_sys_execve(const char __user * filename,
        const compat_uptr_t __user * argv,
        const compat_uptr_t __user * envp)
 {
-       struct filename *path = getname(filename);
-       int error = PTR_ERR(path);
-       if (!IS_ERR(path)) {
-               error = compat_do_execve(path->name, argv, envp);
-               putname(path);
-       }
-       return error;
+       return compat_do_execve(getname(filename), argv, envp);
 }
 #endif
index d580df2..385f781 100644 (file)
@@ -196,6 +196,7 @@ recopy:
                goto error;
 
        result->uptr = filename;
+       result->aname = NULL;
        audit_getname(result);
        return result;
 
@@ -210,6 +211,35 @@ getname(const char __user * filename)
        return getname_flags(filename, 0, NULL);
 }
 
+/*
+ * The "getname_kernel()" interface doesn't do pathnames longer
+ * than EMBEDDED_NAME_MAX. Deal with it - you're a kernel user.
+ */
+struct filename *
+getname_kernel(const char * filename)
+{
+       struct filename *result;
+       char *kname;
+       int len;
+
+       len = strlen(filename);
+       if (len >= EMBEDDED_NAME_MAX)
+               return ERR_PTR(-ENAMETOOLONG);
+
+       result = __getname();
+       if (unlikely(!result))
+               return ERR_PTR(-ENOMEM);
+
+       kname = (char *)result + sizeof(*result);
+       result->name = kname;
+       result->uptr = NULL;
+       result->aname = NULL;
+       result->separate = false;
+
+       strlcpy(kname, filename, EMBEDDED_NAME_MAX);
+       return result;
+}
+
 #ifdef CONFIG_AUDITSYSCALL
 void putname(struct filename *name)
 {
index 9a5ca03..871d6ed 100644 (file)
@@ -80,7 +80,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type)
        }
 
        if (res.acl_access != NULL) {
-               if (posix_acl_equiv_mode(res.acl_access, NULL) ||
+               if ((posix_acl_equiv_mode(res.acl_access, NULL) == 0) ||
                    res.acl_access->a_count == 0) {
                        posix_acl_release(res.acl_access);
                        res.acl_access = NULL;
@@ -113,7 +113,7 @@ getout:
        return ERR_PTR(status);
 }
 
-int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
+static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
                struct posix_acl *dfacl)
 {
        struct nfs_server *server = NFS_SERVER(inode);
@@ -198,6 +198,15 @@ out:
        return status;
 }
 
+int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
+               struct posix_acl *dfacl)
+{
+       int ret;
+       ret = __nfs3_proc_setacls(inode, acl, dfacl);
+       return (ret == -EOPNOTSUPP) ? 0 : ret;
+
+}
+
 int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
        struct posix_acl *alloc = NULL, *dfacl = NULL;
@@ -225,7 +234,7 @@ int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
                if (IS_ERR(alloc))
                        goto fail;
        }
-       status = nfs3_proc_setacls(inode, acl, dfacl);
+       status = __nfs3_proc_setacls(inode, acl, dfacl);
        posix_acl_release(alloc);
        return status;
 
@@ -233,25 +242,6 @@ fail:
        return PTR_ERR(alloc);
 }
 
-int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
-               umode_t mode)
-{
-       struct posix_acl *default_acl, *acl;
-       int error;
-
-       error = posix_acl_create(dir, &mode, &default_acl, &acl);
-       if (error)
-               return (error == -EOPNOTSUPP) ? 0 : error;
-
-       error = nfs3_proc_setacls(inode, acl, default_acl);
-
-       if (acl)
-               posix_acl_release(acl);
-       if (default_acl)
-               posix_acl_release(default_acl);
-       return error;
-}
-
 const struct xattr_handler *nfs3_xattr_handlers[] = {
        &posix_acl_access_xattr_handler,
        &posix_acl_default_xattr_handler,
index dbb3e1f..860ad26 100644 (file)
@@ -170,7 +170,7 @@ void nfs41_shutdown_client(struct nfs_client *clp)
 void nfs40_shutdown_client(struct nfs_client *clp)
 {
        if (clp->cl_slot_tbl) {
-               nfs4_release_slot_table(clp->cl_slot_tbl);
+               nfs4_shutdown_slot_table(clp->cl_slot_tbl);
                kfree(clp->cl_slot_tbl);
        }
 }
index 42da6af..2da6a69 100644 (file)
@@ -1620,15 +1620,15 @@ static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
 {
        struct nfs4_opendata *data = calldata;
 
-       nfs40_setup_sequence(data->o_arg.server, &data->o_arg.seq_args,
-                               &data->o_res.seq_res, task);
+       nfs40_setup_sequence(data->o_arg.server, &data->c_arg.seq_args,
+                               &data->c_res.seq_res, task);
 }
 
 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
 {
        struct nfs4_opendata *data = calldata;
 
-       nfs40_sequence_done(task, &data->o_res.seq_res);
+       nfs40_sequence_done(task, &data->c_res.seq_res);
 
        data->rpc_status = task->tk_status;
        if (data->rpc_status == 0) {
@@ -1686,7 +1686,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
        };
        int status;
 
-       nfs4_init_sequence(&data->o_arg.seq_args, &data->o_res.seq_res, 1);
+       nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
        kref_get(&data->kref);
        data->rpc_done = 0;
        data->rpc_status = 0;
index cf883c7..e799dc3 100644 (file)
@@ -231,14 +231,23 @@ out:
        return ret;
 }
 
+/*
+ * nfs4_release_slot_table - release all slot table entries
+ */
+static void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
+{
+       nfs4_shrink_slot_table(tbl, 0);
+}
+
 /**
- * nfs4_release_slot_table - release resources attached to a slot table
+ * nfs4_shutdown_slot_table - release resources attached to a slot table
  * @tbl: slot table to shut down
  *
  */
-void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
+void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl)
 {
-       nfs4_shrink_slot_table(tbl, 0);
+       nfs4_release_slot_table(tbl);
+       rpc_destroy_wait_queue(&tbl->slot_tbl_waitq);
 }
 
 /**
@@ -422,7 +431,7 @@ void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
        spin_unlock(&tbl->slot_tbl_lock);
 }
 
-static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
+static void nfs4_release_session_slot_tables(struct nfs4_session *session)
 {
        nfs4_release_slot_table(&session->fc_slot_table);
        nfs4_release_slot_table(&session->bc_slot_table);
@@ -450,7 +459,7 @@ int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
        if (status && tbl->slots == NULL)
                /* Fore and back channel share a connection so get
                 * both slot tables or neither */
-               nfs4_destroy_session_slot_tables(ses);
+               nfs4_release_session_slot_tables(ses);
        return status;
 }
 
@@ -470,6 +479,12 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
        return session;
 }
 
+static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
+{
+       nfs4_shutdown_slot_table(&session->fc_slot_table);
+       nfs4_shutdown_slot_table(&session->bc_slot_table);
+}
+
 void nfs4_destroy_session(struct nfs4_session *session)
 {
        struct rpc_xprt *xprt;
index 2323061..b34ada9 100644 (file)
@@ -74,7 +74,7 @@ enum nfs4_session_state {
 
 extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
                unsigned int max_reqs, const char *queue);
-extern void nfs4_release_slot_table(struct nfs4_slot_table *tbl);
+extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
 extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
 extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
 extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
index 8750ae1..aada580 100644 (file)
@@ -4742,6 +4742,7 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
                                enum ocfs2_alloc_restarted *reason_ret)
 {
        int status = 0, err = 0;
+       int need_free = 0;
        int free_extents;
        enum ocfs2_alloc_restarted reason = RESTART_NONE;
        u32 bit_off, num_bits;
@@ -4796,7 +4797,8 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
                                              OCFS2_JOURNAL_ACCESS_WRITE);
        if (status < 0) {
                mlog_errno(status);
-               goto leave;
+               need_free = 1;
+               goto bail;
        }
 
        block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
@@ -4807,7 +4809,8 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
                                     num_bits, flags, meta_ac);
        if (status < 0) {
                mlog_errno(status);
-               goto leave;
+               need_free = 1;
+               goto bail;
        }
 
        ocfs2_journal_dirty(handle, et->et_root_bh);
@@ -4821,6 +4824,19 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
                reason = RESTART_TRANS;
        }
 
+bail:
+       if (need_free) {
+               if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
+                       ocfs2_free_local_alloc_bits(osb, handle, data_ac,
+                                       bit_off, num_bits);
+               else
+                       ocfs2_free_clusters(handle,
+                                       data_ac->ac_inode,
+                                       data_ac->ac_bh,
+                                       ocfs2_clusters_to_blocks(osb->sb, bit_off),
+                                       num_bits);
+       }
+
 leave:
        if (reason_ret)
                *reason_ret = reason;
@@ -6805,6 +6821,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
                                         struct buffer_head *di_bh)
 {
        int ret, i, has_data, num_pages = 0;
+       int need_free = 0;
+       u32 bit_off, num;
        handle_t *handle;
        u64 uninitialized_var(block);
        struct ocfs2_inode_info *oi = OCFS2_I(inode);
@@ -6850,7 +6868,6 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
        }
 
        if (has_data) {
-               u32 bit_off, num;
                unsigned int page_end;
                u64 phys;
 
@@ -6886,6 +6903,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
                ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
                if (ret) {
                        mlog_errno(ret);
+                       need_free = 1;
                        goto out_commit;
                }
 
@@ -6896,6 +6914,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
                ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
                if (ret) {
                        mlog_errno(ret);
+                       need_free = 1;
                        goto out_commit;
                }
 
@@ -6927,6 +6946,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
                ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL);
                if (ret) {
                        mlog_errno(ret);
+                       need_free = 1;
                        goto out_commit;
                }
 
@@ -6938,6 +6958,18 @@ out_commit:
                dquot_free_space_nodirty(inode,
                                          ocfs2_clusters_to_bytes(osb->sb, 1));
 
+       if (need_free) {
+               if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
+                       ocfs2_free_local_alloc_bits(osb, handle, data_ac,
+                                       bit_off, num);
+               else
+                       ocfs2_free_clusters(handle,
+                                       data_ac->ac_inode,
+                                       data_ac->ac_bh,
+                                       ocfs2_clusters_to_blocks(osb->sb, bit_off),
+                                       num);
+       }
+
        ocfs2_commit_trans(osb, handle);
 
 out_unlock:
index cd5496b..0440134 100644 (file)
@@ -781,6 +781,48 @@ bail:
        return status;
 }
 
+int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
+                               handle_t *handle,
+                               struct ocfs2_alloc_context *ac,
+                               u32 bit_off,
+                               u32 num_bits)
+{
+       int status, start;
+       u32 clear_bits;
+       struct inode *local_alloc_inode;
+       void *bitmap;
+       struct ocfs2_dinode *alloc;
+       struct ocfs2_local_alloc *la;
+
+       BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
+
+       local_alloc_inode = ac->ac_inode;
+       alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
+       la = OCFS2_LOCAL_ALLOC(alloc);
+
+       bitmap = la->la_bitmap;
+       start = bit_off - le32_to_cpu(la->la_bm_off);
+       clear_bits = num_bits;
+
+       status = ocfs2_journal_access_di(handle,
+                       INODE_CACHE(local_alloc_inode),
+                       osb->local_alloc_bh,
+                       OCFS2_JOURNAL_ACCESS_WRITE);
+       if (status < 0) {
+               mlog_errno(status);
+               goto bail;
+       }
+
+       while (clear_bits--)
+               ocfs2_clear_bit(start++, bitmap);
+
+       le32_add_cpu(&alloc->id1.bitmap1.i_used, -num_bits);
+       ocfs2_journal_dirty(handle, osb->local_alloc_bh);
+
+bail:
+       return status;
+}
+
 static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
 {
        u32 count;
index 1be9b58..44a7d1f 100644 (file)
@@ -55,6 +55,12 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
                                 u32 *bit_off,
                                 u32 *num_bits);
 
+int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
+                               handle_t *handle,
+                               struct ocfs2_alloc_context *ac,
+                               u32 bit_off,
+                               u32 num_bits);
+
 void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb,
                                      unsigned int num_clusters);
 void ocfs2_la_enable_worker(struct work_struct *work);
index 38bae5a..11c54fd 100644 (file)
@@ -521,8 +521,11 @@ posix_acl_chmod(struct inode *inode, umode_t mode)
                return -EOPNOTSUPP;
 
        acl = get_acl(inode, ACL_TYPE_ACCESS);
-       if (IS_ERR_OR_NULL(acl))
+       if (IS_ERR_OR_NULL(acl)) {
+               if (acl == ERR_PTR(-EOPNOTSUPP))
+                       return 0;
                return PTR_ERR(acl);
+       }
 
        ret = __posix_acl_chmod(&acl, GFP_KERNEL, mode);
        if (ret)
@@ -544,14 +547,15 @@ posix_acl_create(struct inode *dir, umode_t *mode,
                goto no_acl;
 
        p = get_acl(dir, ACL_TYPE_DEFAULT);
-       if (IS_ERR(p))
+       if (IS_ERR(p)) {
+               if (p == ERR_PTR(-EOPNOTSUPP))
+                       goto apply_umask;
                return PTR_ERR(p);
-
-       if (!p) {
-               *mode &= ~current_umask();
-               goto no_acl;
        }
 
+       if (!p)
+               goto apply_umask;
+
        *acl = posix_acl_clone(p, GFP_NOFS);
        if (!*acl)
                return -ENOMEM;
@@ -575,6 +579,8 @@ posix_acl_create(struct inode *dir, umode_t *mode,
        }
        return 0;
 
+apply_umask:
+       *mode &= ~current_umask();
 no_acl:
        *default_acl = NULL;
        *acl = NULL;
index fd8bf32..b4a745d 100644 (file)
@@ -115,7 +115,6 @@ extern int copy_strings_kernel(int argc, const char *const *argv,
 extern int prepare_bprm_creds(struct linux_binprm *bprm);
 extern void install_exec_creds(struct linux_binprm *bprm);
 extern void set_binfmt(struct linux_binfmt *new);
-extern void free_bprm(struct linux_binprm *);
 extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
 
 #endif /* _LINUX_BINFMTS_H */
index 09f553c..d79678c 100644 (file)
@@ -2079,6 +2079,7 @@ extern struct file * dentry_open(const struct path *, int, const struct cred *);
 extern int filp_close(struct file *, fl_owner_t id);
 
 extern struct filename *getname(const char __user *);
+extern struct filename *getname_kernel(const char *);
 
 enum {
        FILE_CREATED = 1,
index 3ccfcec..b2fb167 100644 (file)
@@ -379,12 +379,14 @@ struct nfs_openres {
  * Arguments to the open_confirm call.
  */
 struct nfs_open_confirmargs {
+       struct nfs4_sequence_args       seq_args;
        const struct nfs_fh *   fh;
        nfs4_stateid *          stateid;
        struct nfs_seqid *      seqid;
 };
 
 struct nfs_open_confirmres {
+       struct nfs4_sequence_res        seq_res;
        nfs4_stateid            stateid;
        struct nfs_seqid *      seqid;
 };
index 26ebcf4..69ae03f 100644 (file)
@@ -80,13 +80,14 @@ struct nvme_dev {
        struct dma_pool *prp_small_pool;
        int instance;
        int queue_count;
-       int db_stride;
+       u32 db_stride;
        u32 ctrl_config;
        struct msix_entry *entry;
        struct nvme_bar __iomem *bar;
        struct list_head namespaces;
        struct kref kref;
        struct miscdevice miscdev;
+       struct work_struct reset_work;
        char name[12];
        char serial[20];
        char model[40];
@@ -94,6 +95,8 @@ struct nvme_dev {
        u32 max_hw_sectors;
        u32 stripe_size;
        u16 oncs;
+       u16 abort_limit;
+       u8 initialized;
 };
 
 /*
@@ -165,6 +168,7 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
 struct sg_io_hdr;
 
 int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
+int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
 int nvme_sg_get_version_num(int __user *ip);
 
 #endif /* _LINUX_NVME_H */
index e464b4e..d1fe1a7 100644 (file)
@@ -228,9 +228,9 @@ PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
 TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
 PAGEFLAG(MappedToDisk, mappedtodisk)
 
-/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
+/* PG_readahead is only used for reads; PG_reclaim is only for writes */
 PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
-PAGEFLAG(Readahead, reclaim)           /* Reminder to do async read-ahead */
+PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
 
 #ifdef CONFIG_HIGHMEM
 /*
index 68a0e84..a781dec 100644 (file)
@@ -128,6 +128,7 @@ struct bio_list;
 struct fs_struct;
 struct perf_event_context;
 struct blk_plug;
+struct filename;
 
 /*
  * List of flags we want to share for kernel threads,
@@ -2311,7 +2312,7 @@ extern void do_group_exit(int);
 extern int allow_signal(int);
 extern int disallow_signal(int);
 
-extern int do_execve(const char *,
+extern int do_execve(struct filename *,
                     const char __user * const __user *,
                     const char __user * const __user *);
 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
index 989c04e..e5ab622 100644 (file)
@@ -350,6 +350,16 @@ struct nvme_delete_queue {
        __u32                   rsvd11[5];
 };
 
+struct nvme_abort_cmd {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __u32                   rsvd1[9];
+       __le16                  sqid;
+       __u16                   cid;
+       __u32                   rsvd11[5];
+};
+
 struct nvme_download_firmware {
        __u8                    opcode;
        __u8                    flags;
@@ -384,6 +394,7 @@ struct nvme_command {
                struct nvme_download_firmware dlfw;
                struct nvme_format_cmd format;
                struct nvme_dsm_cmd dsm;
+               struct nvme_abort_cmd abort;
        };
 };
 
index 7ad033d..a5af2a2 100644 (file)
@@ -191,15 +191,11 @@ void gnttab_free_auto_xlat_frames(void);
 #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
 
 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+                   struct gnttab_map_grant_ref *kmap_ops,
                    struct page **pages, unsigned int count);
-int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
-                             struct gnttab_map_grant_ref *kmap_ops,
-                             struct page **pages, unsigned int count);
 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
+                     struct gnttab_map_grant_ref *kunmap_ops,
                      struct page **pages, unsigned int count);
-int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *unmap_ops,
-                               struct gnttab_map_grant_ref *kunmap_ops,
-                               struct page **pages, unsigned int count);
 
 /* Perform a batch of grant map/copy operations. Retry every batch slot
  * for which the hypervisor returns GNTST_eagain. This is typically due
index 2fd9cef..eb03090 100644 (file)
@@ -812,7 +812,7 @@ void __init load_default_modules(void)
 static int run_init_process(const char *init_filename)
 {
        argv_init[0] = init_filename;
-       return do_execve(init_filename,
+       return do_execve(getname_kernel(init_filename),
                (const char __user *const __user *)argv_init,
                (const char __user *const __user *)envp_init);
 }
index 10176cd..7aef2f4 100644 (file)
@@ -1719,7 +1719,7 @@ void audit_putname(struct filename *name)
        struct audit_context *context = current->audit_context;
 
        BUG_ON(!context);
-       if (!context->in_syscall) {
+       if (!name->aname || !context->in_syscall) {
 #if AUDIT_DEBUG == 2
                printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n",
                       __FILE__, __LINE__, context->serial, name);
index b086006..6b375af 100644 (file)
@@ -239,7 +239,7 @@ static int ____call_usermodehelper(void *data)
 
        commit_creds(new);
 
-       retval = do_execve(sub_info->path,
+       retval = do_execve(getname_kernel(sub_info->path),
                           (const char __user *const __user *)sub_info->argv,
                           (const char __user *const __user *)sub_info->envp);
        if (!retval)
index dbf94a7..a48abea 100644 (file)
@@ -119,7 +119,7 @@ menu "Compile-time checks and compiler options"
 
 config DEBUG_INFO
        bool "Compile the kernel with debug info"
-       depends on DEBUG_KERNEL
+       depends on DEBUG_KERNEL && !COMPILE_TEST
        help
           If you say Y here the resulting kernel image will include
          debugging info resulting in a larger kernel image.
index 2d30e2c..7106cb1 100644 (file)
@@ -2173,11 +2173,12 @@ int __set_page_dirty_nobuffers(struct page *page)
        if (!TestSetPageDirty(page)) {
                struct address_space *mapping = page_mapping(page);
                struct address_space *mapping2;
+               unsigned long flags;
 
                if (!mapping)
                        return 1;
 
-               spin_lock_irq(&mapping->tree_lock);
+               spin_lock_irqsave(&mapping->tree_lock, flags);
                mapping2 = page_mapping(page);
                if (mapping2) { /* Race with truncate? */
                        BUG_ON(mapping2 != mapping);
@@ -2186,7 +2187,7 @@ int __set_page_dirty_nobuffers(struct page *page)
                        radix_tree_tag_set(&mapping->page_tree,
                                page_index(page), PAGECACHE_TAG_DIRTY);
                }
-               spin_unlock_irq(&mapping->tree_lock);
+               spin_unlock_irqrestore(&mapping->tree_lock, flags);
                if (mapping->host) {
                        /* !PageAnon && !swapper_space */
                        __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
index 98e85e9..e76ace3 100644 (file)
@@ -63,6 +63,8 @@ unsigned long total_swapcache_pages(void)
        return ret;
 }
 
+static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
+
 void show_swap_cache_info(void)
 {
        printk("%lu pages in swap cache\n", total_swapcache_pages());
@@ -286,8 +288,11 @@ struct page * lookup_swap_cache(swp_entry_t entry)
 
        page = find_get_page(swap_address_space(entry), entry.val);
 
-       if (page)
+       if (page) {
                INC_CACHE_INFO(find_success);
+               if (TestClearPageReadahead(page))
+                       atomic_inc(&swapin_readahead_hits);
+       }
 
        INC_CACHE_INFO(find_total);
        return page;
@@ -389,6 +394,50 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
        return found_page;
 }
 
+static unsigned long swapin_nr_pages(unsigned long offset)
+{
+       static unsigned long prev_offset;
+       unsigned int pages, max_pages, last_ra;
+       static atomic_t last_readahead_pages;
+
+       max_pages = 1 << ACCESS_ONCE(page_cluster);
+       if (max_pages <= 1)
+               return 1;
+
+       /*
+        * This heuristic has been found to work well on both sequential and
+        * random loads, swapping to hard disk or to SSD: please don't ask
+        * what the "+ 2" means, it just happens to work well, that's all.
+        */
+       pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
+       if (pages == 2) {
+               /*
+                * We can have no readahead hits to judge by: but must not get
+                * stuck here forever, so check for an adjacent offset instead
+                * (and don't even bother to check whether swap type is same).
+                */
+               if (offset != prev_offset + 1 && offset != prev_offset - 1)
+                       pages = 1;
+               prev_offset = offset;
+       } else {
+               unsigned int roundup = 4;
+               while (roundup < pages)
+                       roundup <<= 1;
+               pages = roundup;
+       }
+
+       if (pages > max_pages)
+               pages = max_pages;
+
+       /* Don't shrink readahead too fast */
+       last_ra = atomic_read(&last_readahead_pages) / 2;
+       if (pages < last_ra)
+               pages = last_ra;
+       atomic_set(&last_readahead_pages, pages);
+
+       return pages;
+}
+
 /**
  * swapin_readahead - swap in pages in hope we need them soon
  * @entry: swap entry of this memory
@@ -412,11 +461,16 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
                        struct vm_area_struct *vma, unsigned long addr)
 {
        struct page *page;
-       unsigned long offset = swp_offset(entry);
+       unsigned long entry_offset = swp_offset(entry);
+       unsigned long offset = entry_offset;
        unsigned long start_offset, end_offset;
-       unsigned long mask = (1UL << page_cluster) - 1;
+       unsigned long mask;
        struct blk_plug plug;
 
+       mask = swapin_nr_pages(offset) - 1;
+       if (!mask)
+               goto skip;
+
        /* Read a page_cluster sized and aligned cluster around offset. */
        start_offset = offset & ~mask;
        end_offset = offset | mask;
@@ -430,10 +484,13 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
                                                gfp_mask, vma, addr);
                if (!page)
                        continue;
+               if (offset != entry_offset)
+                       SetPageReadahead(page);
                page_cache_release(page);
        }
        blk_finish_plug(&plug);
 
        lru_add_drain();        /* Push any new pages onto the LRU now */
+skip:
        return read_swap_cache_async(entry, gfp_mask, vma, addr);
 }
index c6c13b0..4a7f7e6 100644 (file)
@@ -1923,7 +1923,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        p->swap_map = NULL;
        cluster_info = p->cluster_info;
        p->cluster_info = NULL;
-       p->flags = 0;
        frontswap_map = frontswap_map_get(p);
        spin_unlock(&p->lock);
        spin_unlock(&swap_lock);
@@ -1949,6 +1948,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
                mutex_unlock(&inode->i_mutex);
        }
        filp_close(swap_file, NULL);
+
+       /*
+        * Clear the SWP_USED flag after all resources are freed so that swapon
+        * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
+        * not hold p->lock after we cleared its SWP_WRITEOK.
+        */
+       spin_lock(&swap_lock);
+       p->flags = 0;
+       spin_unlock(&swap_lock);
+
        err = 0;
        atomic_inc(&proc_poll_event);
        wake_up_interruptible(&proc_poll_wait);
index 7a426ed..df3652a 100644 (file)
@@ -244,6 +244,19 @@ static void ad_fixup_inv_jack_detect(struct hda_codec *codec,
        }
 }
 
+/* Toshiba Satellite L40 implements EAPD in a standard way unlike others */
+static void ad1986a_fixup_eapd(struct hda_codec *codec,
+                              const struct hda_fixup *fix, int action)
+{
+       struct ad198x_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               codec->inv_eapd = 0;
+               spec->gen.keep_eapd_on = 1;
+               spec->eapd_nid = 0x1b;
+       }
+}
+
 enum {
        AD1986A_FIXUP_INV_JACK_DETECT,
        AD1986A_FIXUP_ULTRA,
@@ -251,6 +264,7 @@ enum {
        AD1986A_FIXUP_3STACK,
        AD1986A_FIXUP_LAPTOP,
        AD1986A_FIXUP_LAPTOP_IMIC,
+       AD1986A_FIXUP_EAPD,
 };
 
 static const struct hda_fixup ad1986a_fixups[] = {
@@ -311,6 +325,10 @@ static const struct hda_fixup ad1986a_fixups[] = {
                .chained_before = 1,
                .chain_id = AD1986A_FIXUP_LAPTOP,
        },
+       [AD1986A_FIXUP_EAPD] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = ad1986a_fixup_eapd,
+       },
 };
 
 static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
@@ -318,6 +336,7 @@ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
        SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
        SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK),
        SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_FIXUP_3STACK),
+       SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba Satellite L40", AD1986A_FIXUP_EAPD),
        SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP),
        SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG),
        SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA),
@@ -472,6 +491,8 @@ static int ad1983_add_spdif_mux_ctl(struct hda_codec *codec)
 static int patch_ad1983(struct hda_codec *codec)
 {
        struct ad198x_spec *spec;
+       static hda_nid_t conn_0c[] = { 0x08 };
+       static hda_nid_t conn_0d[] = { 0x09 };
        int err;
 
        err = alloc_ad_spec(codec);
@@ -479,8 +500,14 @@ static int patch_ad1983(struct hda_codec *codec)
                return err;
        spec = codec->spec;
 
+       spec->gen.mixer_nid = 0x0e;
        spec->gen.beep_nid = 0x10;
        set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
+
+       /* limit the loopback routes not to confuse the parser */
+       snd_hda_override_conn_list(codec, 0x0c, ARRAY_SIZE(conn_0c), conn_0c);
+       snd_hda_override_conn_list(codec, 0x0d, ARRAY_SIZE(conn_0d), conn_0d);
+
        err = ad198x_parse_auto_config(codec, false);
        if (err < 0)
                goto error;
index 56a8f18..d9693ca 100644 (file)
@@ -1821,6 +1821,7 @@ enum {
        ALC889_FIXUP_IMAC91_VREF,
        ALC889_FIXUP_MBA11_VREF,
        ALC889_FIXUP_MBA21_VREF,
+       ALC889_FIXUP_MP11_VREF,
        ALC882_FIXUP_INV_DMIC,
        ALC882_FIXUP_NO_PRIMARY_HP,
        ALC887_FIXUP_ASUS_BASS,
@@ -2190,6 +2191,12 @@ static const struct hda_fixup alc882_fixups[] = {
                .chained = true,
                .chain_id = ALC889_FIXUP_MBP_VREF,
        },
+       [ALC889_FIXUP_MP11_VREF] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc889_fixup_mba11_vref,
+               .chained = true,
+               .chain_id = ALC885_FIXUP_MACPRO_GPIO,
+       },
        [ALC882_FIXUP_INV_DMIC] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_inv_dmic_0x12,
@@ -2253,7 +2260,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
        SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC889_FIXUP_MBP_VREF),
        SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
-       SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC885_FIXUP_MACPRO_GPIO),
+       SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC889_FIXUP_MP11_VREF),
        SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_FIXUP_MACPRO_GPIO),
        SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_FIXUP_MACPRO_GPIO),
        SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC889_FIXUP_MBP_VREF),
@@ -4427,6 +4434,9 @@ static void alc269_fill_coef(struct hda_codec *codec)
 
        if (spec->codec_variant != ALC269_TYPE_ALC269VB)
                return;
+       /* ALC271X doesn't seem to support these COEFs (bko#52181) */
+       if (!strcmp(codec->chip_name, "ALC271X"))
+               return;
 
        if ((alc_get_coef0(codec) & 0x00ff) < 0x015) {
                alc_write_coef_idx(codec, 0xf, 0x960b);
index de9408b..e05a86b 100644 (file)
@@ -14,6 +14,7 @@ config SND_USB_AUDIO
        select SND_HWDEP
        select SND_RAWMIDI
        select SND_PCM
+       select BITREVERSE
        help
          Say Y here to include support for USB audio and USB MIDI
          devices.