Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 27 Mar 2009 21:48:34 +0000 (14:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 27 Mar 2009 21:48:34 +0000 (14:48 -0700)
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-quota-2.6: (27 commits)
  ext2: Zero our b_size in ext2_quota_read()
  trivial: fix typos/grammar errors in fs/Kconfig
  quota: Coding style fixes
  quota: Remove superfluous inlines
  quota: Remove uppercase aliases for quota functions.
  nfsd: Use lowercase names of quota functions
  jfs: Use lowercase names of quota functions
  udf: Use lowercase names of quota functions
  ufs: Use lowercase names of quota functions
  reiserfs: Use lowercase names of quota functions
  ext4: Use lowercase names of quota functions
  ext3: Use lowercase names of quota functions
  ext2: Use lowercase names of quota functions
  ramfs: Remove quota call
  vfs: Use lowercase names of quota functions
  quota: Remove dqbuf_t and other cleanups
  quota: Remove NODQUOT macro
  quota: Make global quota locks cacheline aligned
  quota: Move quota files into separate directory
  ext4: quota reservation for delayed allocation
  ...

66 files changed:
fs/Kconfig
fs/Makefile
fs/attr.c
fs/dquot.c [deleted file]
fs/ext2/balloc.c
fs/ext2/ialloc.c
fs/ext2/inode.c
fs/ext2/super.c
fs/ext2/xattr.c
fs/ext3/balloc.c
fs/ext3/ialloc.c
fs/ext3/inode.c
fs/ext3/namei.c
fs/ext3/super.c
fs/ext3/xattr.c
fs/ext4/balloc.c
fs/ext4/ext4.h
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/namei.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/inode.c
fs/jfs/acl.c
fs/jfs/inode.c
fs/jfs/jfs_dtree.c
fs/jfs/jfs_extent.c
fs/jfs/jfs_inode.c
fs/jfs/jfs_xtree.c
fs/jfs/namei.c
fs/jfs/xattr.c
fs/namei.c
fs/nfsd/vfs.c
fs/open.c
fs/quota.c [deleted file]
fs/quota/Kconfig [new file with mode: 0644]
fs/quota/Makefile [new file with mode: 0644]
fs/quota/dquot.c [new file with mode: 0644]
fs/quota/quota.c [new file with mode: 0644]
fs/quota/quota_tree.c [new file with mode: 0644]
fs/quota/quota_tree.h [new file with mode: 0644]
fs/quota/quota_v1.c [new file with mode: 0644]
fs/quota/quota_v2.c [new file with mode: 0644]
fs/quota/quotaio_v1.h [new file with mode: 0644]
fs/quota/quotaio_v2.h [new file with mode: 0644]
fs/quota_tree.c [deleted file]
fs/quota_tree.h [deleted file]
fs/quota_v1.c [deleted file]
fs/quota_v2.c [deleted file]
fs/quotaio_v1.h [deleted file]
fs/quotaio_v2.h [deleted file]
fs/ramfs/file-nommu.c
fs/reiserfs/bitmap.c
fs/reiserfs/inode.c
fs/reiserfs/namei.c
fs/reiserfs/stree.c
fs/reiserfs/super.c
fs/super.c
fs/sync.c
fs/udf/balloc.c
fs/udf/ialloc.c
fs/ufs/balloc.c
fs/ufs/ialloc.c
include/linux/quota.h
include/linux/quotaops.h

index 93945dd..cef8b18 100644 (file)
@@ -56,61 +56,7 @@ endif # BLOCK
 
 source "fs/notify/Kconfig"
 
-config QUOTA
-       bool "Quota support"
-       help
-         If you say Y here, you will be able to set per user limits for disk
-         usage (also called disk quotas). Currently, it works for the
-         ext2, ext3, and reiserfs file system. ext3 also supports journalled
-         quotas for which you don't need to run quotacheck(8) after an unclean
-         shutdown.
-         For further details, read the Quota mini-HOWTO, available from
-         <http://www.tldp.org/docs.html#howto>, or the documentation provided
-         with the quota tools. Probably the quota support is only useful for
-         multi user systems. If unsure, say N.
-
-config QUOTA_NETLINK_INTERFACE
-       bool "Report quota messages through netlink interface"
-       depends on QUOTA && NET
-       help
-         If you say Y here, quota warnings (about exceeding softlimit, reaching
-         hardlimit, etc.) will be reported through netlink interface. If unsure,
-         say Y.
-
-config PRINT_QUOTA_WARNING
-       bool "Print quota warnings to console (OBSOLETE)"
-       depends on QUOTA
-       default y
-       help
-         If you say Y here, quota warnings (about exceeding softlimit, reaching
-         hardlimit, etc.) will be printed to the process' controlling terminal.
-         Note that this behavior is currently deprecated and may go away in
-         future. Please use notification via netlink socket instead.
-
-# Generic support for tree structured quota files. Seleted when needed.
-config QUOTA_TREE
-        tristate
-
-config QFMT_V1
-       tristate "Old quota format support"
-       depends on QUOTA
-       help
-         This quota format was (is) used by kernels earlier than 2.4.22. If
-         you have quota working and you don't want to convert to new quota
-         format say Y here.
-
-config QFMT_V2
-       tristate "Quota format v2 support"
-       depends on QUOTA
-       select QUOTA_TREE
-       help
-         This quota format allows using quotas with 32-bit UIDs/GIDs. If you
-         need this functionality say Y here.
-
-config QUOTACTL
-       bool
-       depends on XFS_QUOTA || QUOTA
-       default y
+source "fs/quota/Kconfig"
 
 source "fs/autofs/Kconfig"
 source "fs/autofs4/Kconfig"
index dc20db3..6e82a30 100644 (file)
@@ -51,11 +51,7 @@ obj-$(CONFIG_FS_POSIX_ACL)   += posix_acl.o xattr_acl.o
 obj-$(CONFIG_NFS_COMMON)       += nfs_common/
 obj-$(CONFIG_GENERIC_ACL)      += generic_acl.o
 
-obj-$(CONFIG_QUOTA)            += dquot.o
-obj-$(CONFIG_QFMT_V1)          += quota_v1.o
-obj-$(CONFIG_QFMT_V2)          += quota_v2.o
-obj-$(CONFIG_QUOTA_TREE)       += quota_tree.o
-obj-$(CONFIG_QUOTACTL)         += quota.o
+obj-y                          += quota/
 
 obj-$(CONFIG_PROC_FS)          += proc/
 obj-y                          += partitions/
index f436019..9fe1b1b 100644 (file)
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -173,7 +173,8 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
                if (!error) {
                        if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
                            (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
-                               error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+                               error = vfs_dq_transfer(inode, attr) ?
+                                       -EDQUOT : 0;
                        if (!error)
                                error = inode_setattr(inode, attr);
                }
diff --git a/fs/dquot.c b/fs/dquot.c
deleted file mode 100644 (file)
index d6add0b..0000000
+++ /dev/null
@@ -1,2407 +0,0 @@
-/*
- * Implementation of the diskquota system for the LINUX operating system. QUOTA
- * is implemented using the BSD system call interface as the means of
- * communication with the user level. This file contains the generic routines
- * called by the different filesystems on allocation of an inode or block.
- * These routines take care of the administration needed to have a consistent
- * diskquota tracking system. The ideas of both user and group quotas are based
- * on the Melbourne quota system as used on BSD derived systems. The internal
- * implementation is based on one of the several variants of the LINUX
- * inode-subsystem with added complexity of the diskquota system.
- * 
- * Author:     Marco van Wieringen <mvw@planets.elm.net>
- *
- * Fixes:   Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
- *
- *             Revised list management to avoid races
- *             -- Bill Hawes, <whawes@star.net>, 9/98
- *
- *             Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
- *             As the consequence the locking was moved from dquot_decr_...(),
- *             dquot_incr_...() to calling functions.
- *             invalidate_dquots() now writes modified dquots.
- *             Serialized quota_off() and quota_on() for mount point.
- *             Fixed a few bugs in grow_dquots().
- *             Fixed deadlock in write_dquot() - we no longer account quotas on
- *             quota files
- *             remove_dquot_ref() moved to inode.c - it now traverses through inodes
- *             add_dquot_ref() restarts after blocking
- *             Added check for bogus uid and fixed check for group in quotactl.
- *             Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
- *
- *             Used struct list_head instead of own list struct
- *             Invalidation of referenced dquots is no longer possible
- *             Improved free_dquots list management
- *             Quota and i_blocks are now updated in one place to avoid races
- *             Warnings are now delayed so we won't block in critical section
- *             Write updated not to require dquot lock
- *             Jan Kara, <jack@suse.cz>, 9/2000
- *
- *             Added dynamic quota structure allocation
- *             Jan Kara <jack@suse.cz> 12/2000
- *
- *             Rewritten quota interface. Implemented new quota format and
- *             formats registering.
- *             Jan Kara, <jack@suse.cz>, 2001,2002
- *
- *             New SMP locking.
- *             Jan Kara, <jack@suse.cz>, 10/2002
- *
- *             Added journalled quota support, fix lock inversion problems
- *             Jan Kara, <jack@suse.cz>, 2003,2004
- *
- * (C) Copyright 1994 - 1997 Marco van Wieringen 
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/mm.h>
-#include <linux/time.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/fcntl.h>
-#include <linux/stat.h>
-#include <linux/tty.h>
-#include <linux/file.h>
-#include <linux/slab.h>
-#include <linux/sysctl.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/security.h>
-#include <linux/kmod.h>
-#include <linux/namei.h>
-#include <linux/buffer_head.h>
-#include <linux/capability.h>
-#include <linux/quotaops.h>
-#include <linux/writeback.h> /* for inode_lock, oddly enough.. */
-#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
-#include <net/netlink.h>
-#include <net/genetlink.h>
-#endif
-
-#include <asm/uaccess.h>
-
-#define __DQUOT_PARANOIA
-
-/*
- * There are three quota SMP locks. dq_list_lock protects all lists with quotas
- * and quota formats, dqstats structure containing statistics about the lists
- * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
- * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
- * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
- * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
- * modifications of quota state (on quotaon and quotaoff) and readers who care
- * about latest values take it as well.
- *
- * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
- *   dq_list_lock > dq_state_lock
- *
- * Note that some things (eg. sb pointer, type, id) doesn't change during
- * the life of the dquot structure and so needn't to be protected by a lock
- *
- * Any operation working on dquots via inode pointers must hold dqptr_sem.  If
- * operation is just reading pointers from inode (or not using them at all) the
- * read lock is enough. If pointers are altered function must hold write lock
- * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
- * for altering the flag i_mutex is also needed).
- *
- * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
- * from inodes (dquot_alloc_space() and such don't check the dq_lock).
- * Currently dquot is locked only when it is being read to memory (or space for
- * it is being allocated) on the first dqget() and when it is being released on
- * the last dqput(). The allocation and release oparations are serialized by
- * the dq_lock and by checking the use count in dquot_release().  Write
- * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
- * spinlock to internal buffers before writing.
- *
- * Lock ordering (including related VFS locks) is the following:
- *   i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
- *   dqio_mutex
- * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
- * dqptr_sem. But filesystem has to count with the fact that functions such as
- * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
- * from inside a transaction to keep filesystem consistency after a crash. Also
- * filesystems usually want to do some IO on dquot from ->mark_dirty which is
- * called with dqptr_sem held.
- * i_mutex on quota files is special (it's below dqio_mutex)
- */
-
-static DEFINE_SPINLOCK(dq_list_lock);
-static DEFINE_SPINLOCK(dq_state_lock);
-DEFINE_SPINLOCK(dq_data_lock);
-
-static char *quotatypes[] = INITQFNAMES;
-static struct quota_format_type *quota_formats;        /* List of registered formats */
-static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
-
-/* SLAB cache for dquot structures */
-static struct kmem_cache *dquot_cachep;
-
-int register_quota_format(struct quota_format_type *fmt)
-{
-       spin_lock(&dq_list_lock);
-       fmt->qf_next = quota_formats;
-       quota_formats = fmt;
-       spin_unlock(&dq_list_lock);
-       return 0;
-}
-
-void unregister_quota_format(struct quota_format_type *fmt)
-{
-       struct quota_format_type **actqf;
-
-       spin_lock(&dq_list_lock);
-       for (actqf = &quota_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next);
-       if (*actqf)
-               *actqf = (*actqf)->qf_next;
-       spin_unlock(&dq_list_lock);
-}
-
-static struct quota_format_type *find_quota_format(int id)
-{
-       struct quota_format_type *actqf;
-
-       spin_lock(&dq_list_lock);
-       for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
-       if (!actqf || !try_module_get(actqf->qf_owner)) {
-               int qm;
-
-               spin_unlock(&dq_list_lock);
-               
-               for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++);
-               if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name))
-                       return NULL;
-
-               spin_lock(&dq_list_lock);
-               for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
-               if (actqf && !try_module_get(actqf->qf_owner))
-                       actqf = NULL;
-       }
-       spin_unlock(&dq_list_lock);
-       return actqf;
-}
-
-static void put_quota_format(struct quota_format_type *fmt)
-{
-       module_put(fmt->qf_owner);
-}
-
-/*
- * Dquot List Management:
- * The quota code uses three lists for dquot management: the inuse_list,
- * free_dquots, and dquot_hash[] array. A single dquot structure may be
- * on all three lists, depending on its current state.
- *
- * All dquots are placed to the end of inuse_list when first created, and this
- * list is used for invalidate operation, which must look at every dquot.
- *
- * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
- * and this list is searched whenever we need an available dquot.  Dquots are
- * removed from the list as soon as they are used again, and
- * dqstats.free_dquots gives the number of dquots on the list. When
- * dquot is invalidated it's completely released from memory.
- *
- * Dquots with a specific identity (device, type and id) are placed on
- * one of the dquot_hash[] hash chains. The provides an efficient search
- * mechanism to locate a specific dquot.
- */
-
-static LIST_HEAD(inuse_list);
-static LIST_HEAD(free_dquots);
-static unsigned int dq_hash_bits, dq_hash_mask;
-static struct hlist_head *dquot_hash;
-
-struct dqstats dqstats;
-
-static inline unsigned int
-hashfn(const struct super_block *sb, unsigned int id, int type)
-{
-       unsigned long tmp;
-
-       tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
-       return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
-}
-
-/*
- * Following list functions expect dq_list_lock to be held
- */
-static inline void insert_dquot_hash(struct dquot *dquot)
-{
-       struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
-       hlist_add_head(&dquot->dq_hash, head);
-}
-
-static inline void remove_dquot_hash(struct dquot *dquot)
-{
-       hlist_del_init(&dquot->dq_hash);
-}
-
-static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type)
-{
-       struct hlist_node *node;
-       struct dquot *dquot;
-
-       hlist_for_each (node, dquot_hash+hashent) {
-               dquot = hlist_entry(node, struct dquot, dq_hash);
-               if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type)
-                       return dquot;
-       }
-       return NODQUOT;
-}
-
-/* Add a dquot to the tail of the free list */
-static inline void put_dquot_last(struct dquot *dquot)
-{
-       list_add_tail(&dquot->dq_free, &free_dquots);
-       dqstats.free_dquots++;
-}
-
-static inline void remove_free_dquot(struct dquot *dquot)
-{
-       if (list_empty(&dquot->dq_free))
-               return;
-       list_del_init(&dquot->dq_free);
-       dqstats.free_dquots--;
-}
-
-static inline void put_inuse(struct dquot *dquot)
-{
-       /* We add to the back of inuse list so we don't have to restart
-        * when traversing this list and we block */
-       list_add_tail(&dquot->dq_inuse, &inuse_list);
-       dqstats.allocated_dquots++;
-}
-
-static inline void remove_inuse(struct dquot *dquot)
-{
-       dqstats.allocated_dquots--;
-       list_del(&dquot->dq_inuse);
-}
-/*
- * End of list functions needing dq_list_lock
- */
-
-static void wait_on_dquot(struct dquot *dquot)
-{
-       mutex_lock(&dquot->dq_lock);
-       mutex_unlock(&dquot->dq_lock);
-}
-
-static inline int dquot_dirty(struct dquot *dquot)
-{
-       return test_bit(DQ_MOD_B, &dquot->dq_flags);
-}
-
-static inline int mark_dquot_dirty(struct dquot *dquot)
-{
-       return dquot->dq_sb->dq_op->mark_dirty(dquot);
-}
-
-int dquot_mark_dquot_dirty(struct dquot *dquot)
-{
-       spin_lock(&dq_list_lock);
-       if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags))
-               list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
-                               info[dquot->dq_type].dqi_dirty_list);
-       spin_unlock(&dq_list_lock);
-       return 0;
-}
-
-/* This function needs dq_list_lock */
-static inline int clear_dquot_dirty(struct dquot *dquot)
-{
-       if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
-               return 0;
-       list_del_init(&dquot->dq_dirty);
-       return 1;
-}
-
-void mark_info_dirty(struct super_block *sb, int type)
-{
-       set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
-}
-EXPORT_SYMBOL(mark_info_dirty);
-
-/*
- *     Read dquot from disk and alloc space for it
- */
-
-int dquot_acquire(struct dquot *dquot)
-{
-       int ret = 0, ret2 = 0;
-       struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
-
-       mutex_lock(&dquot->dq_lock);
-       mutex_lock(&dqopt->dqio_mutex);
-       if (!test_bit(DQ_READ_B, &dquot->dq_flags))
-               ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
-       if (ret < 0)
-               goto out_iolock;
-       set_bit(DQ_READ_B, &dquot->dq_flags);
-       /* Instantiate dquot if needed */
-       if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
-               ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
-               /* Write the info if needed */
-               if (info_dirty(&dqopt->info[dquot->dq_type]))
-                       ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
-               if (ret < 0)
-                       goto out_iolock;
-               if (ret2 < 0) {
-                       ret = ret2;
-                       goto out_iolock;
-               }
-       }
-       set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
-out_iolock:
-       mutex_unlock(&dqopt->dqio_mutex);
-       mutex_unlock(&dquot->dq_lock);
-       return ret;
-}
-
-/*
- *     Write dquot to disk
- */
-int dquot_commit(struct dquot *dquot)
-{
-       int ret = 0, ret2 = 0;
-       struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
-
-       mutex_lock(&dqopt->dqio_mutex);
-       spin_lock(&dq_list_lock);
-       if (!clear_dquot_dirty(dquot)) {
-               spin_unlock(&dq_list_lock);
-               goto out_sem;
-       }
-       spin_unlock(&dq_list_lock);
-       /* Inactive dquot can be only if there was error during read/init
-        * => we have better not writing it */
-       if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
-               ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
-               if (info_dirty(&dqopt->info[dquot->dq_type]))
-                       ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
-               if (ret >= 0)
-                       ret = ret2;
-       }
-out_sem:
-       mutex_unlock(&dqopt->dqio_mutex);
-       return ret;
-}
-
-/*
- *     Release dquot
- */
-int dquot_release(struct dquot *dquot)
-{
-       int ret = 0, ret2 = 0;
-       struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
-
-       mutex_lock(&dquot->dq_lock);
-       /* Check whether we are not racing with some other dqget() */
-       if (atomic_read(&dquot->dq_count) > 1)
-               goto out_dqlock;
-       mutex_lock(&dqopt->dqio_mutex);
-       if (dqopt->ops[dquot->dq_type]->release_dqblk) {
-               ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
-               /* Write the info */
-               if (info_dirty(&dqopt->info[dquot->dq_type]))
-                       ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
-               if (ret >= 0)
-                       ret = ret2;
-       }
-       clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
-       mutex_unlock(&dqopt->dqio_mutex);
-out_dqlock:
-       mutex_unlock(&dquot->dq_lock);
-       return ret;
-}
-
-void dquot_destroy(struct dquot *dquot)
-{
-       kmem_cache_free(dquot_cachep, dquot);
-}
-EXPORT_SYMBOL(dquot_destroy);
-
-static inline void do_destroy_dquot(struct dquot *dquot)
-{
-       dquot->dq_sb->dq_op->destroy_dquot(dquot);
-}
-
-/* Invalidate all dquots on the list. Note that this function is called after
- * quota is disabled and pointers from inodes removed so there cannot be new
- * quota users. There can still be some users of quotas due to inodes being
- * just deleted or pruned by prune_icache() (those are not attached to any
- * list) or parallel quotactl call. We have to wait for such users.
- */
-static void invalidate_dquots(struct super_block *sb, int type)
-{
-       struct dquot *dquot, *tmp;
-
-restart:
-       spin_lock(&dq_list_lock);
-       list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
-               if (dquot->dq_sb != sb)
-                       continue;
-               if (dquot->dq_type != type)
-                       continue;
-               /* Wait for dquot users */
-               if (atomic_read(&dquot->dq_count)) {
-                       DEFINE_WAIT(wait);
-
-                       atomic_inc(&dquot->dq_count);
-                       prepare_to_wait(&dquot->dq_wait_unused, &wait,
-                                       TASK_UNINTERRUPTIBLE);
-                       spin_unlock(&dq_list_lock);
-                       /* Once dqput() wakes us up, we know it's time to free
-                        * the dquot.
-                        * IMPORTANT: we rely on the fact that there is always
-                        * at most one process waiting for dquot to free.
-                        * Otherwise dq_count would be > 1 and we would never
-                        * wake up.
-                        */
-                       if (atomic_read(&dquot->dq_count) > 1)
-                               schedule();
-                       finish_wait(&dquot->dq_wait_unused, &wait);
-                       dqput(dquot);
-                       /* At this moment dquot() need not exist (it could be
-                        * reclaimed by prune_dqcache(). Hence we must
-                        * restart. */
-                       goto restart;
-               }
-               /*
-                * Quota now has no users and it has been written on last
-                * dqput()
-                */
-               remove_dquot_hash(dquot);
-               remove_free_dquot(dquot);
-               remove_inuse(dquot);
-               do_destroy_dquot(dquot);
-       }
-       spin_unlock(&dq_list_lock);
-}
-
-/* Call callback for every active dquot on given filesystem */
-int dquot_scan_active(struct super_block *sb,
-                     int (*fn)(struct dquot *dquot, unsigned long priv),
-                     unsigned long priv)
-{
-       struct dquot *dquot, *old_dquot = NULL;
-       int ret = 0;
-
-       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
-       spin_lock(&dq_list_lock);
-       list_for_each_entry(dquot, &inuse_list, dq_inuse) {
-               if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
-                       continue;
-               if (dquot->dq_sb != sb)
-                       continue;
-               /* Now we have active dquot so we can just increase use count */
-               atomic_inc(&dquot->dq_count);
-               dqstats.lookups++;
-               spin_unlock(&dq_list_lock);
-               dqput(old_dquot);
-               old_dquot = dquot;
-               ret = fn(dquot, priv);
-               if (ret < 0)
-                       goto out;
-               spin_lock(&dq_list_lock);
-               /* We are safe to continue now because our dquot could not
-                * be moved out of the inuse list while we hold the reference */
-       }
-       spin_unlock(&dq_list_lock);
-out:
-       dqput(old_dquot);
-       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
-       return ret;
-}
-
-int vfs_quota_sync(struct super_block *sb, int type)
-{
-       struct list_head *dirty;
-       struct dquot *dquot;
-       struct quota_info *dqopt = sb_dqopt(sb);
-       int cnt;
-
-       mutex_lock(&dqopt->dqonoff_mutex);
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (type != -1 && cnt != type)
-                       continue;
-               if (!sb_has_quota_active(sb, cnt))
-                       continue;
-               spin_lock(&dq_list_lock);
-               dirty = &dqopt->info[cnt].dqi_dirty_list;
-               while (!list_empty(dirty)) {
-                       dquot = list_first_entry(dirty, struct dquot, dq_dirty);
-                       /* Dirty and inactive can be only bad dquot... */
-                       if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
-                               clear_dquot_dirty(dquot);
-                               continue;
-                       }
-                       /* Now we have active dquot from which someone is
-                        * holding reference so we can safely just increase
-                        * use count */
-                       atomic_inc(&dquot->dq_count);
-                       dqstats.lookups++;
-                       spin_unlock(&dq_list_lock);
-                       sb->dq_op->write_dquot(dquot);
-                       dqput(dquot);
-                       spin_lock(&dq_list_lock);
-               }
-               spin_unlock(&dq_list_lock);
-       }
-
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-               if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
-                   && info_dirty(&dqopt->info[cnt]))
-                       sb->dq_op->write_info(sb, cnt);
-       spin_lock(&dq_list_lock);
-       dqstats.syncs++;
-       spin_unlock(&dq_list_lock);
-       mutex_unlock(&dqopt->dqonoff_mutex);
-
-       return 0;
-}
-
-/* Free unused dquots from cache */
-static void prune_dqcache(int count)
-{
-       struct list_head *head;
-       struct dquot *dquot;
-
-       head = free_dquots.prev;
-       while (head != &free_dquots && count) {
-               dquot = list_entry(head, struct dquot, dq_free);
-               remove_dquot_hash(dquot);
-               remove_free_dquot(dquot);
-               remove_inuse(dquot);
-               do_destroy_dquot(dquot);
-               count--;
-               head = free_dquots.prev;
-       }
-}
-
-/*
- * This is called from kswapd when we think we need some
- * more memory
- */
-
-static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
-{
-       if (nr) {
-               spin_lock(&dq_list_lock);
-               prune_dqcache(nr);
-               spin_unlock(&dq_list_lock);
-       }
-       return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
-}
-
-static struct shrinker dqcache_shrinker = {
-       .shrink = shrink_dqcache_memory,
-       .seeks = DEFAULT_SEEKS,
-};
-
-/*
- * Put reference to dquot
- * NOTE: If you change this function please check whether dqput_blocks() works right...
- */
-void dqput(struct dquot *dquot)
-{
-       int ret;
-
-       if (!dquot)
-               return;
-#ifdef __DQUOT_PARANOIA
-       if (!atomic_read(&dquot->dq_count)) {
-               printk("VFS: dqput: trying to free free dquot\n");
-               printk("VFS: device %s, dquot of %s %d\n",
-                       dquot->dq_sb->s_id,
-                       quotatypes[dquot->dq_type],
-                       dquot->dq_id);
-               BUG();
-       }
-#endif
-       
-       spin_lock(&dq_list_lock);
-       dqstats.drops++;
-       spin_unlock(&dq_list_lock);
-we_slept:
-       spin_lock(&dq_list_lock);
-       if (atomic_read(&dquot->dq_count) > 1) {
-               /* We have more than one user... nothing to do */
-               atomic_dec(&dquot->dq_count);
-               /* Releasing dquot during quotaoff phase? */
-               if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) &&
-                   atomic_read(&dquot->dq_count) == 1)
-                       wake_up(&dquot->dq_wait_unused);
-               spin_unlock(&dq_list_lock);
-               return;
-       }
-       /* Need to release dquot? */
-       if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
-               spin_unlock(&dq_list_lock);
-               /* Commit dquot before releasing */
-               ret = dquot->dq_sb->dq_op->write_dquot(dquot);
-               if (ret < 0) {
-                       printk(KERN_ERR "VFS: cannot write quota structure on "
-                               "device %s (error %d). Quota may get out of "
-                               "sync!\n", dquot->dq_sb->s_id, ret);
-                       /*
-                        * We clear dirty bit anyway, so that we avoid
-                        * infinite loop here
-                        */
-                       spin_lock(&dq_list_lock);
-                       clear_dquot_dirty(dquot);
-                       spin_unlock(&dq_list_lock);
-               }
-               goto we_slept;
-       }
-       /* Clear flag in case dquot was inactive (something bad happened) */
-       clear_dquot_dirty(dquot);
-       if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
-               spin_unlock(&dq_list_lock);
-               dquot->dq_sb->dq_op->release_dquot(dquot);
-               goto we_slept;
-       }
-       atomic_dec(&dquot->dq_count);
-#ifdef __DQUOT_PARANOIA
-       /* sanity check */
-       BUG_ON(!list_empty(&dquot->dq_free));
-#endif
-       put_dquot_last(dquot);
-       spin_unlock(&dq_list_lock);
-}
-
-struct dquot *dquot_alloc(struct super_block *sb, int type)
-{
-       return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
-}
-EXPORT_SYMBOL(dquot_alloc);
-
-static struct dquot *get_empty_dquot(struct super_block *sb, int type)
-{
-       struct dquot *dquot;
-
-       dquot = sb->dq_op->alloc_dquot(sb, type);
-       if(!dquot)
-               return NODQUOT;
-
-       mutex_init(&dquot->dq_lock);
-       INIT_LIST_HEAD(&dquot->dq_free);
-       INIT_LIST_HEAD(&dquot->dq_inuse);
-       INIT_HLIST_NODE(&dquot->dq_hash);
-       INIT_LIST_HEAD(&dquot->dq_dirty);
-       init_waitqueue_head(&dquot->dq_wait_unused);
-       dquot->dq_sb = sb;
-       dquot->dq_type = type;
-       atomic_set(&dquot->dq_count, 1);
-
-       return dquot;
-}
-
-/*
- * Get reference to dquot
- *
- * Locking is slightly tricky here. We are guarded from parallel quotaoff()
- * destroying our dquot by:
- *   a) checking for quota flags under dq_list_lock and
- *   b) getting a reference to dquot before we release dq_list_lock
- */
-struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
-{
-       unsigned int hashent = hashfn(sb, id, type);
-       struct dquot *dquot = NODQUOT, *empty = NODQUOT;
-
-        if (!sb_has_quota_active(sb, type))
-               return NODQUOT;
-we_slept:
-       spin_lock(&dq_list_lock);
-       spin_lock(&dq_state_lock);
-       if (!sb_has_quota_active(sb, type)) {
-               spin_unlock(&dq_state_lock);
-               spin_unlock(&dq_list_lock);
-               goto out;
-       }
-       spin_unlock(&dq_state_lock);
-
-       if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) {
-               if (empty == NODQUOT) {
-                       spin_unlock(&dq_list_lock);
-                       if ((empty = get_empty_dquot(sb, type)) == NODQUOT)
-                               schedule();     /* Try to wait for a moment... */
-                       goto we_slept;
-               }
-               dquot = empty;
-               empty = NODQUOT;
-               dquot->dq_id = id;
-               /* all dquots go on the inuse_list */
-               put_inuse(dquot);
-               /* hash it first so it can be found */
-               insert_dquot_hash(dquot);
-               dqstats.lookups++;
-               spin_unlock(&dq_list_lock);
-       } else {
-               if (!atomic_read(&dquot->dq_count))
-                       remove_free_dquot(dquot);
-               atomic_inc(&dquot->dq_count);
-               dqstats.cache_hits++;
-               dqstats.lookups++;
-               spin_unlock(&dq_list_lock);
-       }
-       /* Wait for dq_lock - after this we know that either dquot_release() is already
-        * finished or it will be canceled due to dq_count > 1 test */
-       wait_on_dquot(dquot);
-       /* Read the dquot and instantiate it (everything done only if needed) */
-       if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) {
-               dqput(dquot);
-               dquot = NODQUOT;
-               goto out;
-       }
-#ifdef __DQUOT_PARANOIA
-       BUG_ON(!dquot->dq_sb);  /* Has somebody invalidated entry under us? */
-#endif
-out:
-       if (empty)
-               do_destroy_dquot(empty);
-
-       return dquot;
-}
-
-static int dqinit_needed(struct inode *inode, int type)
-{
-       int cnt;
-
-       if (IS_NOQUOTA(inode))
-               return 0;
-       if (type != -1)
-               return inode->i_dquot[type] == NODQUOT;
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-               if (inode->i_dquot[cnt] == NODQUOT)
-                       return 1;
-       return 0;
-}
-
-/* This routine is guarded by dqonoff_mutex mutex */
-static void add_dquot_ref(struct super_block *sb, int type)
-{
-       struct inode *inode, *old_inode = NULL;
-
-       spin_lock(&inode_lock);
-       list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
-               if (!atomic_read(&inode->i_writecount))
-                       continue;
-               if (!dqinit_needed(inode, type))
-                       continue;
-               if (inode->i_state & (I_FREEING|I_WILL_FREE))
-                       continue;
-
-               __iget(inode);
-               spin_unlock(&inode_lock);
-
-               iput(old_inode);
-               sb->dq_op->initialize(inode, type);
-               /* We hold a reference to 'inode' so it couldn't have been
-                * removed from s_inodes list while we dropped the inode_lock.
-                * We cannot iput the inode now as we can be holding the last
-                * reference and we cannot iput it under inode_lock. So we
-                * keep the reference and iput it later. */
-               old_inode = inode;
-               spin_lock(&inode_lock);
-       }
-       spin_unlock(&inode_lock);
-       iput(old_inode);
-}
-
-/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
-static inline int dqput_blocks(struct dquot *dquot)
-{
-       if (atomic_read(&dquot->dq_count) <= 1)
-               return 1;
-       return 0;
-}
-
-/* Remove references to dquots from inode - add dquot to list for freeing if needed */
-/* We can't race with anybody because we hold dqptr_sem for writing... */
-static int remove_inode_dquot_ref(struct inode *inode, int type,
-                                 struct list_head *tofree_head)
-{
-       struct dquot *dquot = inode->i_dquot[type];
-
-       inode->i_dquot[type] = NODQUOT;
-       if (dquot != NODQUOT) {
-               if (dqput_blocks(dquot)) {
-#ifdef __DQUOT_PARANOIA
-                       if (atomic_read(&dquot->dq_count) != 1)
-                               printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
-#endif
-                       spin_lock(&dq_list_lock);
-                       list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */
-                       spin_unlock(&dq_list_lock);
-                       return 1;
-               }
-               else
-                       dqput(dquot);   /* We have guaranteed we won't block */
-       }
-       return 0;
-}
-
-/* Free list of dquots - called from inode.c */
-/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */
-static void put_dquot_list(struct list_head *tofree_head)
-{
-       struct list_head *act_head;
-       struct dquot *dquot;
-
-       act_head = tofree_head->next;
-       /* So now we have dquots on the list... Just free them */
-       while (act_head != tofree_head) {
-               dquot = list_entry(act_head, struct dquot, dq_free);
-               act_head = act_head->next;
-               list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */
-               dqput(dquot);
-       }
-}
-
-static void remove_dquot_ref(struct super_block *sb, int type,
-               struct list_head *tofree_head)
-{
-       struct inode *inode;
-
-       spin_lock(&inode_lock);
-       list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
-               if (!IS_NOQUOTA(inode))
-                       remove_inode_dquot_ref(inode, type, tofree_head);
-       }
-       spin_unlock(&inode_lock);
-}
-
-/* Gather all references from inodes and drop them */
-static void drop_dquot_ref(struct super_block *sb, int type)
-{
-       LIST_HEAD(tofree_head);
-
-       if (sb->dq_op) {
-               down_write(&sb_dqopt(sb)->dqptr_sem);
-               remove_dquot_ref(sb, type, &tofree_head);
-               up_write(&sb_dqopt(sb)->dqptr_sem);
-               put_dquot_list(&tofree_head);
-       }
-}
-
-static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
-{
-       dquot->dq_dqb.dqb_curinodes += number;
-}
-
-static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
-{
-       dquot->dq_dqb.dqb_curspace += number;
-}
-
-static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
-{
-       if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
-           dquot->dq_dqb.dqb_curinodes >= number)
-               dquot->dq_dqb.dqb_curinodes -= number;
-       else
-               dquot->dq_dqb.dqb_curinodes = 0;
-       if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
-               dquot->dq_dqb.dqb_itime = (time_t) 0;
-       clear_bit(DQ_INODES_B, &dquot->dq_flags);
-}
-
-static inline void dquot_decr_space(struct dquot *dquot, qsize_t number)
-{
-       if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
-           dquot->dq_dqb.dqb_curspace >= number)
-               dquot->dq_dqb.dqb_curspace -= number;
-       else
-               dquot->dq_dqb.dqb_curspace = 0;
-       if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
-               dquot->dq_dqb.dqb_btime = (time_t) 0;
-       clear_bit(DQ_BLKS_B, &dquot->dq_flags);
-}
-
-static int warning_issued(struct dquot *dquot, const int warntype)
-{
-       int flag = (warntype == QUOTA_NL_BHARDWARN ||
-               warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
-               ((warntype == QUOTA_NL_IHARDWARN ||
-               warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
-
-       if (!flag)
-               return 0;
-       return test_and_set_bit(flag, &dquot->dq_flags);
-}
-
-#ifdef CONFIG_PRINT_QUOTA_WARNING
-static int flag_print_warnings = 1;
-
-static inline int need_print_warning(struct dquot *dquot)
-{
-       if (!flag_print_warnings)
-               return 0;
-
-       switch (dquot->dq_type) {
-               case USRQUOTA:
-                       return current_fsuid() == dquot->dq_id;
-               case GRPQUOTA:
-                       return in_group_p(dquot->dq_id);
-       }
-       return 0;
-}
-
-/* Print warning to user which exceeded quota */
-static void print_warning(struct dquot *dquot, const int warntype)
-{
-       char *msg = NULL;
-       struct tty_struct *tty;
-
-       if (warntype == QUOTA_NL_IHARDBELOW ||
-           warntype == QUOTA_NL_ISOFTBELOW ||
-           warntype == QUOTA_NL_BHARDBELOW ||
-           warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot))
-               return;
-
-       tty = get_current_tty();
-       if (!tty)
-               return;
-       tty_write_message(tty, dquot->dq_sb->s_id);
-       if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
-               tty_write_message(tty, ": warning, ");
-       else
-               tty_write_message(tty, ": write failed, ");
-       tty_write_message(tty, quotatypes[dquot->dq_type]);
-       switch (warntype) {
-               case QUOTA_NL_IHARDWARN:
-                       msg = " file limit reached.\r\n";
-                       break;
-               case QUOTA_NL_ISOFTLONGWARN:
-                       msg = " file quota exceeded too long.\r\n";
-                       break;
-               case QUOTA_NL_ISOFTWARN:
-                       msg = " file quota exceeded.\r\n";
-                       break;
-               case QUOTA_NL_BHARDWARN:
-                       msg = " block limit reached.\r\n";
-                       break;
-               case QUOTA_NL_BSOFTLONGWARN:
-                       msg = " block quota exceeded too long.\r\n";
-                       break;
-               case QUOTA_NL_BSOFTWARN:
-                       msg = " block quota exceeded.\r\n";
-                       break;
-       }
-       tty_write_message(tty, msg);
-       tty_kref_put(tty);
-}
-#endif
-
-#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
-
-/* Netlink family structure for quota */
-static struct genl_family quota_genl_family = {
-       .id = GENL_ID_GENERATE,
-       .hdrsize = 0,
-       .name = "VFS_DQUOT",
-       .version = 1,
-       .maxattr = QUOTA_NL_A_MAX,
-};
-
-/* Send warning to userspace about user which exceeded quota */
-static void send_warning(const struct dquot *dquot, const char warntype)
-{
-       static atomic_t seq;
-       struct sk_buff *skb;
-       void *msg_head;
-       int ret;
-       int msg_size = 4 * nla_total_size(sizeof(u32)) +
-                      2 * nla_total_size(sizeof(u64));
-
-       /* We have to allocate using GFP_NOFS as we are called from a
-        * filesystem performing write and thus further recursion into
-        * the fs to free some data could cause deadlocks. */
-       skb = genlmsg_new(msg_size, GFP_NOFS);
-       if (!skb) {
-               printk(KERN_ERR
-                 "VFS: Not enough memory to send quota warning.\n");
-               return;
-       }
-       msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
-                       &quota_genl_family, 0, QUOTA_NL_C_WARNING);
-       if (!msg_head) {
-               printk(KERN_ERR
-                 "VFS: Cannot store netlink header in quota warning.\n");
-               goto err_out;
-       }
-       ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type);
-       if (ret)
-               goto attr_err_out;
-       ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id);
-       if (ret)
-               goto attr_err_out;
-       ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
-       if (ret)
-               goto attr_err_out;
-       ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR,
-               MAJOR(dquot->dq_sb->s_dev));
-       if (ret)
-               goto attr_err_out;
-       ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR,
-               MINOR(dquot->dq_sb->s_dev));
-       if (ret)
-               goto attr_err_out;
-       ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
-       if (ret)
-               goto attr_err_out;
-       genlmsg_end(skb, msg_head);
-
-       genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
-       return;
-attr_err_out:
-       printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
-err_out:
-       kfree_skb(skb);
-}
-#endif
-
-static inline void flush_warnings(struct dquot * const *dquots, char *warntype)
-{
-       int i;
-
-       for (i = 0; i < MAXQUOTAS; i++)
-               if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN &&
-                   !warning_issued(dquots[i], warntype[i])) {
-#ifdef CONFIG_PRINT_QUOTA_WARNING
-                       print_warning(dquots[i], warntype[i]);
-#endif
-#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
-                       send_warning(dquots[i], warntype[i]);
-#endif
-               }
-}
-
-static inline char ignore_hardlimit(struct dquot *dquot)
-{
-       struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
-
-       return capable(CAP_SYS_RESOURCE) &&
-           (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH));
-}
-
-/* needs dq_data_lock */
-static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
-{
-       *warntype = QUOTA_NL_NOWARN;
-       if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
-           test_bit(DQ_FAKE_B, &dquot->dq_flags))
-               return QUOTA_OK;
-
-       if (dquot->dq_dqb.dqb_ihardlimit &&
-          (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit &&
-            !ignore_hardlimit(dquot)) {
-               *warntype = QUOTA_NL_IHARDWARN;
-               return NO_QUOTA;
-       }
-
-       if (dquot->dq_dqb.dqb_isoftlimit &&
-          (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
-           dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime &&
-            !ignore_hardlimit(dquot)) {
-               *warntype = QUOTA_NL_ISOFTLONGWARN;
-               return NO_QUOTA;
-       }
-
-       if (dquot->dq_dqb.dqb_isoftlimit &&
-          (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
-           dquot->dq_dqb.dqb_itime == 0) {
-               *warntype = QUOTA_NL_ISOFTWARN;
-               dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
-       }
-
-       return QUOTA_OK;
-}
-
-/* needs dq_data_lock */
-static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
-{
-       *warntype = QUOTA_NL_NOWARN;
-       if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
-           test_bit(DQ_FAKE_B, &dquot->dq_flags))
-               return QUOTA_OK;
-
-       if (dquot->dq_dqb.dqb_bhardlimit &&
-           dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bhardlimit &&
-            !ignore_hardlimit(dquot)) {
-               if (!prealloc)
-                       *warntype = QUOTA_NL_BHARDWARN;
-               return NO_QUOTA;
-       }
-
-       if (dquot->dq_dqb.dqb_bsoftlimit &&
-           dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit &&
-           dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime &&
-            !ignore_hardlimit(dquot)) {
-               if (!prealloc)
-                       *warntype = QUOTA_NL_BSOFTLONGWARN;
-               return NO_QUOTA;
-       }
-
-       if (dquot->dq_dqb.dqb_bsoftlimit &&
-           dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit &&
-           dquot->dq_dqb.dqb_btime == 0) {
-               if (!prealloc) {
-                       *warntype = QUOTA_NL_BSOFTWARN;
-                       dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
-               }
-               else
-                       /*
-                        * We don't allow preallocation to exceed softlimit so exceeding will
-                        * be always printed
-                        */
-                       return NO_QUOTA;
-       }
-
-       return QUOTA_OK;
-}
-
-static int info_idq_free(struct dquot *dquot, qsize_t inodes)
-{
-       if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
-           dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
-           !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
-               return QUOTA_NL_NOWARN;
-
-       if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit)
-               return QUOTA_NL_ISOFTBELOW;
-       if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
-           dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit)
-               return QUOTA_NL_IHARDBELOW;
-       return QUOTA_NL_NOWARN;
-}
-
-static int info_bdq_free(struct dquot *dquot, qsize_t space)
-{
-       if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
-           dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
-               return QUOTA_NL_NOWARN;
-
-       if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
-               return QUOTA_NL_BSOFTBELOW;
-       if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit &&
-           dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit)
-               return QUOTA_NL_BHARDBELOW;
-       return QUOTA_NL_NOWARN;
-}
-/*
- *     Initialize quota pointers in inode
- *     We do things in a bit complicated way but by that we avoid calling
- *     dqget() and thus filesystem callbacks under dqptr_sem.
- */
-int dquot_initialize(struct inode *inode, int type)
-{
-       unsigned int id = 0;
-       int cnt, ret = 0;
-       struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT };
-       struct super_block *sb = inode->i_sb;
-
-       /* First test before acquiring mutex - solves deadlocks when we
-         * re-enter the quota code and are already holding the mutex */
-       if (IS_NOQUOTA(inode))
-               return 0;
-
-       /* First get references to structures we might need. */
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (type != -1 && cnt != type)
-                       continue;
-               switch (cnt) {
-               case USRQUOTA:
-                       id = inode->i_uid;
-                       break;
-               case GRPQUOTA:
-                       id = inode->i_gid;
-                       break;
-               }
-               got[cnt] = dqget(sb, id, cnt);
-       }
-
-       down_write(&sb_dqopt(sb)->dqptr_sem);
-       /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
-       if (IS_NOQUOTA(inode))
-               goto out_err;
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (type != -1 && cnt != type)
-                       continue;
-               /* Avoid races with quotaoff() */
-               if (!sb_has_quota_active(sb, cnt))
-                       continue;
-               if (inode->i_dquot[cnt] == NODQUOT) {
-                       inode->i_dquot[cnt] = got[cnt];
-                       got[cnt] = NODQUOT;
-               }
-       }
-out_err:
-       up_write(&sb_dqopt(sb)->dqptr_sem);
-       /* Drop unused references */
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-               dqput(got[cnt]);
-       return ret;
-}
-
-/*
- *     Release all quotas referenced by inode
- */
-int dquot_drop(struct inode *inode)
-{
-       int cnt;
-       struct dquot *put[MAXQUOTAS];
-
-       down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               put[cnt] = inode->i_dquot[cnt];
-               inode->i_dquot[cnt] = NODQUOT;
-       }
-       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
-
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-               dqput(put[cnt]);
-       return 0;
-}
-
-/* Wrapper to remove references to quota structures from inode */
-void vfs_dq_drop(struct inode *inode)
-{
-       /* Here we can get arbitrary inode from clear_inode() so we have
-        * to be careful. OTOH we don't need locking as quota operations
-        * are allowed to change only at mount time */
-       if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op
-           && inode->i_sb->dq_op->drop) {
-               int cnt;
-               /* Test before calling to rule out calls from proc and such
-                 * where we are not allowed to block. Note that this is
-                * actually reliable test even without the lock - the caller
-                * must assure that nobody can come after the DQUOT_DROP and
-                * add quota pointers back anyway */
-               for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-                       if (inode->i_dquot[cnt] != NODQUOT)
-                               break;
-               if (cnt < MAXQUOTAS)
-                       inode->i_sb->dq_op->drop(inode);
-       }
-}
-
-/*
- * Following four functions update i_blocks+i_bytes fields and
- * quota information (together with appropriate checks)
- * NOTE: We absolutely rely on the fact that caller dirties
- * the inode (usually macros in quotaops.h care about this) and
- * holds a handle for the current transaction so that dquot write and
- * inode write go into the same transaction.
- */
-
-/*
- * This operation can block, but only after everything is updated
- */
-int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
-{
-       int cnt, ret = NO_QUOTA;
-       char warntype[MAXQUOTAS];
-
-       /* First test before acquiring mutex - solves deadlocks when we
-         * re-enter the quota code and are already holding the mutex */
-       if (IS_NOQUOTA(inode)) {
-out_add:
-               inode_add_bytes(inode, number);
-               return QUOTA_OK;
-       }
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-               warntype[cnt] = QUOTA_NL_NOWARN;
-
-       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-       if (IS_NOQUOTA(inode)) {        /* Now we can do reliable test... */
-               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-               goto out_add;
-       }
-       spin_lock(&dq_data_lock);
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (inode->i_dquot[cnt] == NODQUOT)
-                       continue;
-               if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA)
-                       goto warn_put_all;
-       }
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (inode->i_dquot[cnt] == NODQUOT)
-                       continue;
-               dquot_incr_space(inode->i_dquot[cnt], number);
-       }
-       inode_add_bytes(inode, number);
-       ret = QUOTA_OK;
-warn_put_all:
-       spin_unlock(&dq_data_lock);
-       if (ret == QUOTA_OK)
-               /* Dirtify all the dquots - this can block when journalling */
-               for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-                       if (inode->i_dquot[cnt])
-                               mark_dquot_dirty(inode->i_dquot[cnt]);
-       flush_warnings(inode->i_dquot, warntype);
-       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-       return ret;
-}
-
-/*
- * This operation can block, but only after everything is updated
- */
-int dquot_alloc_inode(const struct inode *inode, qsize_t number)
-{
-       int cnt, ret = NO_QUOTA;
-       char warntype[MAXQUOTAS];
-
-       /* First test before acquiring mutex - solves deadlocks when we
-         * re-enter the quota code and are already holding the mutex */
-       if (IS_NOQUOTA(inode))
-               return QUOTA_OK;
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-               warntype[cnt] = QUOTA_NL_NOWARN;
-       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-       if (IS_NOQUOTA(inode)) {
-               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-               return QUOTA_OK;
-       }
-       spin_lock(&dq_data_lock);
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (inode->i_dquot[cnt] == NODQUOT)
-                       continue;
-               if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA)
-                       goto warn_put_all;
-       }
-
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (inode->i_dquot[cnt] == NODQUOT)
-                       continue;
-               dquot_incr_inodes(inode->i_dquot[cnt], number);
-       }
-       ret = QUOTA_OK;
-warn_put_all:
-       spin_unlock(&dq_data_lock);
-       if (ret == QUOTA_OK)
-               /* Dirtify all the dquots - this can block when journalling */
-               for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-                       if (inode->i_dquot[cnt])
-                               mark_dquot_dirty(inode->i_dquot[cnt]);
-       flush_warnings(inode->i_dquot, warntype);
-       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-       return ret;
-}
-
-/*
- * This operation can block, but only after everything is updated
- */
-int dquot_free_space(struct inode *inode, qsize_t number)
-{
-       unsigned int cnt;
-       char warntype[MAXQUOTAS];
-
-       /* First test before acquiring mutex - solves deadlocks when we
-         * re-enter the quota code and are already holding the mutex */
-       if (IS_NOQUOTA(inode)) {
-out_sub:
-               inode_sub_bytes(inode, number);
-               return QUOTA_OK;
-       }
-
-       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-       /* Now recheck reliably when holding dqptr_sem */
-       if (IS_NOQUOTA(inode)) {
-               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-               goto out_sub;
-       }
-       spin_lock(&dq_data_lock);
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (inode->i_dquot[cnt] == NODQUOT)
-                       continue;
-               warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
-               dquot_decr_space(inode->i_dquot[cnt], number);
-       }
-       inode_sub_bytes(inode, number);
-       spin_unlock(&dq_data_lock);
-       /* Dirtify all the dquots - this can block when journalling */
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-               if (inode->i_dquot[cnt])
-                       mark_dquot_dirty(inode->i_dquot[cnt]);
-       flush_warnings(inode->i_dquot, warntype);
-       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-       return QUOTA_OK;
-}
-
-/*
- * This operation can block, but only after everything is updated
- */
-int dquot_free_inode(const struct inode *inode, qsize_t number)
-{
-       unsigned int cnt;
-       char warntype[MAXQUOTAS];
-
-       /* First test before acquiring mutex - solves deadlocks when we
-         * re-enter the quota code and are already holding the mutex */
-       if (IS_NOQUOTA(inode))
-               return QUOTA_OK;
-
-       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-       /* Now recheck reliably when holding dqptr_sem */
-       if (IS_NOQUOTA(inode)) {
-               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-               return QUOTA_OK;
-       }
-       spin_lock(&dq_data_lock);
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (inode->i_dquot[cnt] == NODQUOT)
-                       continue;
-               warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
-               dquot_decr_inodes(inode->i_dquot[cnt], number);
-       }
-       spin_unlock(&dq_data_lock);
-       /* Dirtify all the dquots - this can block when journalling */
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-               if (inode->i_dquot[cnt])
-                       mark_dquot_dirty(inode->i_dquot[cnt]);
-       flush_warnings(inode->i_dquot, warntype);
-       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-       return QUOTA_OK;
-}
-
-/*
- * Transfer the number of inode and blocks from one diskquota to an other.
- *
- * This operation can block, but only after everything is updated
- * A transaction must be started when entering this function.
- */
-int dquot_transfer(struct inode *inode, struct iattr *iattr)
-{
-       qsize_t space;
-       struct dquot *transfer_from[MAXQUOTAS];
-       struct dquot *transfer_to[MAXQUOTAS];
-       int cnt, ret = QUOTA_OK;
-       int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid,
-           chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid;
-       char warntype_to[MAXQUOTAS];
-       char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
-
-       /* First test before acquiring mutex - solves deadlocks when we
-         * re-enter the quota code and are already holding the mutex */
-       if (IS_NOQUOTA(inode))
-               return QUOTA_OK;
-       /* Initialize the arrays */
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               transfer_from[cnt] = NODQUOT;
-               transfer_to[cnt] = NODQUOT;
-               warntype_to[cnt] = QUOTA_NL_NOWARN;
-               switch (cnt) {
-                       case USRQUOTA:
-                               if (!chuid)
-                                       continue;
-                               transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt);
-                               break;
-                       case GRPQUOTA:
-                               if (!chgid)
-                                       continue;
-                               transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt);
-                               break;
-               }
-       }
-
-       down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
-       /* Now recheck reliably when holding dqptr_sem */
-       if (IS_NOQUOTA(inode)) {        /* File without quota accounting? */
-               up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
-               goto put_all;
-       }
-       spin_lock(&dq_data_lock);
-       space = inode_get_bytes(inode);
-       /* Build the transfer_from list and check the limits */
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (transfer_to[cnt] == NODQUOT)
-                       continue;
-               transfer_from[cnt] = inode->i_dquot[cnt];
-               if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
-                   NO_QUOTA || check_bdq(transfer_to[cnt], space, 0,
-                   warntype_to + cnt) == NO_QUOTA)
-                       goto over_quota;
-       }
-
-       /*
-        * Finally perform the needed transfer from transfer_from to transfer_to
-        */
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               /*
-                * Skip changes for same uid or gid or for turned off quota-type.
-                */
-               if (transfer_to[cnt] == NODQUOT)
-                       continue;
-
-               /* Due to IO error we might not have transfer_from[] structure */
-               if (transfer_from[cnt]) {
-                       warntype_from_inodes[cnt] =
-                               info_idq_free(transfer_from[cnt], 1);
-                       warntype_from_space[cnt] =
-                               info_bdq_free(transfer_from[cnt], space);
-                       dquot_decr_inodes(transfer_from[cnt], 1);
-                       dquot_decr_space(transfer_from[cnt], space);
-               }
-
-               dquot_incr_inodes(transfer_to[cnt], 1);
-               dquot_incr_space(transfer_to[cnt], space);
-
-               inode->i_dquot[cnt] = transfer_to[cnt];
-       }
-       spin_unlock(&dq_data_lock);
-       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
-
-       /* Dirtify all the dquots - this can block when journalling */
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (transfer_from[cnt])
-                       mark_dquot_dirty(transfer_from[cnt]);
-               if (transfer_to[cnt]) {
-                       mark_dquot_dirty(transfer_to[cnt]);
-                       /* The reference we got is transferred to the inode */
-                       transfer_to[cnt] = NODQUOT;
-               }
-       }
-warn_put_all:
-       flush_warnings(transfer_to, warntype_to);
-       flush_warnings(transfer_from, warntype_from_inodes);
-       flush_warnings(transfer_from, warntype_from_space);
-put_all:
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               dqput(transfer_from[cnt]);
-               dqput(transfer_to[cnt]);
-       }
-       return ret;
-over_quota:
-       spin_unlock(&dq_data_lock);
-       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
-       /* Clear dquot pointers we don't want to dqput() */
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-               transfer_from[cnt] = NODQUOT;
-       ret = NO_QUOTA;
-       goto warn_put_all;
-}
-
-/* Wrapper for transferring ownership of an inode */
-int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
-{
-       if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) {
-               vfs_dq_init(inode);
-               if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA)
-                       return 1;
-       }
-       return 0;
-}
-
-
-/*
- * Write info of quota file to disk
- */
-int dquot_commit_info(struct super_block *sb, int type)
-{
-       int ret;
-       struct quota_info *dqopt = sb_dqopt(sb);
-
-       mutex_lock(&dqopt->dqio_mutex);
-       ret = dqopt->ops[type]->write_file_info(sb, type);
-       mutex_unlock(&dqopt->dqio_mutex);
-       return ret;
-}
-
-/*
- * Definitions of diskquota operations.
- */
-struct dquot_operations dquot_operations = {
-       .initialize     = dquot_initialize,
-       .drop           = dquot_drop,
-       .alloc_space    = dquot_alloc_space,
-       .alloc_inode    = dquot_alloc_inode,
-       .free_space     = dquot_free_space,
-       .free_inode     = dquot_free_inode,
-       .transfer       = dquot_transfer,
-       .write_dquot    = dquot_commit,
-       .acquire_dquot  = dquot_acquire,
-       .release_dquot  = dquot_release,
-       .mark_dirty     = dquot_mark_dquot_dirty,
-       .write_info     = dquot_commit_info,
-       .alloc_dquot    = dquot_alloc,
-       .destroy_dquot  = dquot_destroy,
-};
-
-/*
- * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
- */
-int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
-{
-       int cnt, ret = 0;
-       struct quota_info *dqopt = sb_dqopt(sb);
-       struct inode *toputinode[MAXQUOTAS];
-
-       /* Cannot turn off usage accounting without turning off limits, or
-        * suspend quotas and simultaneously turn quotas off. */
-       if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
-           || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
-           DQUOT_USAGE_ENABLED)))
-               return -EINVAL;
-
-       /* We need to serialize quota_off() for device */
-       mutex_lock(&dqopt->dqonoff_mutex);
-
-       /*
-        * Skip everything if there's nothing to do. We have to do this because
-        * sometimes we are called when fill_super() failed and calling
-        * sync_fs() in such cases does no good.
-        */
-       if (!sb_any_quota_loaded(sb)) {
-               mutex_unlock(&dqopt->dqonoff_mutex);
-               return 0;
-       }
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               toputinode[cnt] = NULL;
-               if (type != -1 && cnt != type)
-                       continue;
-               if (!sb_has_quota_loaded(sb, cnt))
-                       continue;
-
-               if (flags & DQUOT_SUSPENDED) {
-                       spin_lock(&dq_state_lock);
-                       dqopt->flags |=
-                               dquot_state_flag(DQUOT_SUSPENDED, cnt);
-                       spin_unlock(&dq_state_lock);
-               } else {
-                       spin_lock(&dq_state_lock);
-                       dqopt->flags &= ~dquot_state_flag(flags, cnt);
-                       /* Turning off suspended quotas? */
-                       if (!sb_has_quota_loaded(sb, cnt) &&
-                           sb_has_quota_suspended(sb, cnt)) {
-                               dqopt->flags &= ~dquot_state_flag(
-                                                       DQUOT_SUSPENDED, cnt);
-                               spin_unlock(&dq_state_lock);
-                               iput(dqopt->files[cnt]);
-                               dqopt->files[cnt] = NULL;
-                               continue;
-                       }
-                       spin_unlock(&dq_state_lock);
-               }
-
-               /* We still have to keep quota loaded? */
-               if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
-                       continue;
-
-               /* Note: these are blocking operations */
-               drop_dquot_ref(sb, cnt);
-               invalidate_dquots(sb, cnt);
-               /*
-                * Now all dquots should be invalidated, all writes done so we should be only
-                * users of the info. No locks needed.
-                */
-               if (info_dirty(&dqopt->info[cnt]))
-                       sb->dq_op->write_info(sb, cnt);
-               if (dqopt->ops[cnt]->free_file_info)
-                       dqopt->ops[cnt]->free_file_info(sb, cnt);
-               put_quota_format(dqopt->info[cnt].dqi_format);
-
-               toputinode[cnt] = dqopt->files[cnt];
-               if (!sb_has_quota_loaded(sb, cnt))
-                       dqopt->files[cnt] = NULL;
-               dqopt->info[cnt].dqi_flags = 0;
-               dqopt->info[cnt].dqi_igrace = 0;
-               dqopt->info[cnt].dqi_bgrace = 0;
-               dqopt->ops[cnt] = NULL;
-       }
-       mutex_unlock(&dqopt->dqonoff_mutex);
-
-       /* Skip syncing and setting flags if quota files are hidden */
-       if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
-               goto put_inodes;
-
-       /* Sync the superblock so that buffers with quota data are written to
-        * disk (and so userspace sees correct data afterwards). */
-       if (sb->s_op->sync_fs)
-               sb->s_op->sync_fs(sb, 1);
-       sync_blockdev(sb->s_bdev);
-       /* Now the quota files are just ordinary files and we can set the
-        * inode flags back. Moreover we discard the pagecache so that
-        * userspace sees the writes we did bypassing the pagecache. We
-        * must also discard the blockdev buffers so that we see the
-        * changes done by userspace on the next quotaon() */
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-               if (toputinode[cnt]) {
-                       mutex_lock(&dqopt->dqonoff_mutex);
-                       /* If quota was reenabled in the meantime, we have
-                        * nothing to do */
-                       if (!sb_has_quota_loaded(sb, cnt)) {
-                               mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA);
-                               toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
-                                 S_NOATIME | S_NOQUOTA);
-                               truncate_inode_pages(&toputinode[cnt]->i_data, 0);
-                               mutex_unlock(&toputinode[cnt]->i_mutex);
-                               mark_inode_dirty(toputinode[cnt]);
-                       }
-                       mutex_unlock(&dqopt->dqonoff_mutex);
-               }
-       if (sb->s_bdev)
-               invalidate_bdev(sb->s_bdev);
-put_inodes:
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-               if (toputinode[cnt]) {
-                       /* On remount RO, we keep the inode pointer so that we
-                        * can reenable quota on the subsequent remount RW. We
-                        * have to check 'flags' variable and not use sb_has_
-                        * function because another quotaon / quotaoff could
-                        * change global state before we got here. We refuse
-                        * to suspend quotas when there is pending delete on
-                        * the quota file... */
-                       if (!(flags & DQUOT_SUSPENDED))
-                               iput(toputinode[cnt]);
-                       else if (!toputinode[cnt]->i_nlink)
-                               ret = -EBUSY;
-               }
-       return ret;
-}
-
-int vfs_quota_off(struct super_block *sb, int type, int remount)
-{
-       return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED :
-                                (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED));
-}
-
-/*
- *     Turn quotas on on a device
- */
-
-/*
- * Helper function to turn quotas on when we already have the inode of
- * quota file and no quota information is loaded.
- */
-static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
-       unsigned int flags)
-{
-       struct quota_format_type *fmt = find_quota_format(format_id);
-       struct super_block *sb = inode->i_sb;
-       struct quota_info *dqopt = sb_dqopt(sb);
-       int error;
-       int oldflags = -1;
-
-       if (!fmt)
-               return -ESRCH;
-       if (!S_ISREG(inode->i_mode)) {
-               error = -EACCES;
-               goto out_fmt;
-       }
-       if (IS_RDONLY(inode)) {
-               error = -EROFS;
-               goto out_fmt;
-       }
-       if (!sb->s_op->quota_write || !sb->s_op->quota_read) {
-               error = -EINVAL;
-               goto out_fmt;
-       }
-       /* Usage always has to be set... */
-       if (!(flags & DQUOT_USAGE_ENABLED)) {
-               error = -EINVAL;
-               goto out_fmt;
-       }
-
-       if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
-               /* As we bypass the pagecache we must now flush the inode so
-                * that we see all the changes from userspace... */
-               write_inode_now(inode, 1);
-               /* And now flush the block cache so that kernel sees the
-                * changes */
-               invalidate_bdev(sb->s_bdev);
-       }
-       mutex_lock(&inode->i_mutex);
-       mutex_lock(&dqopt->dqonoff_mutex);
-       if (sb_has_quota_loaded(sb, type)) {
-               error = -EBUSY;
-               goto out_lock;
-       }
-
-       if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
-               /* We don't want quota and atime on quota files (deadlocks
-                * possible) Also nobody should write to the file - we use
-                * special IO operations which ignore the immutable bit. */
-               down_write(&dqopt->dqptr_sem);
-               oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA);
-               inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
-               up_write(&dqopt->dqptr_sem);
-               sb->dq_op->drop(inode);
-       }
-
-       error = -EIO;
-       dqopt->files[type] = igrab(inode);
-       if (!dqopt->files[type])
-               goto out_lock;
-       error = -EINVAL;
-       if (!fmt->qf_ops->check_quota_file(sb, type))
-               goto out_file_init;
-
-       dqopt->ops[type] = fmt->qf_ops;
-       dqopt->info[type].dqi_format = fmt;
-       dqopt->info[type].dqi_fmt_id = format_id;
-       INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
-       mutex_lock(&dqopt->dqio_mutex);
-       if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
-               mutex_unlock(&dqopt->dqio_mutex);
-               goto out_file_init;
-       }
-       mutex_unlock(&dqopt->dqio_mutex);
-       mutex_unlock(&inode->i_mutex);
-       spin_lock(&dq_state_lock);
-       dqopt->flags |= dquot_state_flag(flags, type);
-       spin_unlock(&dq_state_lock);
-
-       add_dquot_ref(sb, type);
-       mutex_unlock(&dqopt->dqonoff_mutex);
-
-       return 0;
-
-out_file_init:
-       dqopt->files[type] = NULL;
-       iput(inode);
-out_lock:
-       mutex_unlock(&dqopt->dqonoff_mutex);
-       if (oldflags != -1) {
-               down_write(&dqopt->dqptr_sem);
-               /* Set the flags back (in the case of accidental quotaon()
-                * on a wrong file we don't want to mess up the flags) */
-               inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
-               inode->i_flags |= oldflags;
-               up_write(&dqopt->dqptr_sem);
-       }
-       mutex_unlock(&inode->i_mutex);
-out_fmt:
-       put_quota_format(fmt);
-
-       return error; 
-}
-
-/* Reenable quotas on remount RW */
-static int vfs_quota_on_remount(struct super_block *sb, int type)
-{
-       struct quota_info *dqopt = sb_dqopt(sb);
-       struct inode *inode;
-       int ret;
-       unsigned int flags;
-
-       mutex_lock(&dqopt->dqonoff_mutex);
-       if (!sb_has_quota_suspended(sb, type)) {
-               mutex_unlock(&dqopt->dqonoff_mutex);
-               return 0;
-       }
-       inode = dqopt->files[type];
-       dqopt->files[type] = NULL;
-       spin_lock(&dq_state_lock);
-       flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
-                                               DQUOT_LIMITS_ENABLED, type);
-       dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type);
-       spin_unlock(&dq_state_lock);
-       mutex_unlock(&dqopt->dqonoff_mutex);
-
-       flags = dquot_generic_flag(flags, type);
-       ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id,
-                                  flags);
-       iput(inode);
-
-       return ret;
-}
-
-int vfs_quota_on_path(struct super_block *sb, int type, int format_id,
-                     struct path *path)
-{
-       int error = security_quota_on(path->dentry);
-       if (error)
-               return error;
-       /* Quota file not on the same filesystem? */
-       if (path->mnt->mnt_sb != sb)
-               error = -EXDEV;
-       else
-               error = vfs_load_quota_inode(path->dentry->d_inode, type,
-                                            format_id, DQUOT_USAGE_ENABLED |
-                                            DQUOT_LIMITS_ENABLED);
-       return error;
-}
-
-int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
-                int remount)
-{
-       struct path path;
-       int error;
-
-       if (remount)
-               return vfs_quota_on_remount(sb, type);
-
-       error = kern_path(name, LOOKUP_FOLLOW, &path);
-       if (!error) {
-               error = vfs_quota_on_path(sb, type, format_id, &path);
-               path_put(&path);
-       }
-       return error;
-}
-
-/*
- * More powerful function for turning on quotas allowing setting
- * of individual quota flags
- */
-int vfs_quota_enable(struct inode *inode, int type, int format_id,
-               unsigned int flags)
-{
-       int ret = 0;
-       struct super_block *sb = inode->i_sb;
-       struct quota_info *dqopt = sb_dqopt(sb);
-
-       /* Just unsuspend quotas? */
-       if (flags & DQUOT_SUSPENDED)
-               return vfs_quota_on_remount(sb, type);
-       if (!flags)
-               return 0;
-       /* Just updating flags needed? */
-       if (sb_has_quota_loaded(sb, type)) {
-               mutex_lock(&dqopt->dqonoff_mutex);
-               /* Now do a reliable test... */
-               if (!sb_has_quota_loaded(sb, type)) {
-                       mutex_unlock(&dqopt->dqonoff_mutex);
-                       goto load_quota;
-               }
-               if (flags & DQUOT_USAGE_ENABLED &&
-                   sb_has_quota_usage_enabled(sb, type)) {
-                       ret = -EBUSY;
-                       goto out_lock;
-               }
-               if (flags & DQUOT_LIMITS_ENABLED &&
-                   sb_has_quota_limits_enabled(sb, type)) {
-                       ret = -EBUSY;
-                       goto out_lock;
-               }
-               spin_lock(&dq_state_lock);
-               sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
-               spin_unlock(&dq_state_lock);
-out_lock:
-               mutex_unlock(&dqopt->dqonoff_mutex);
-               return ret;
-       }
-
-load_quota:
-       return vfs_load_quota_inode(inode, type, format_id, flags);
-}
-
-/*
- * This function is used when filesystem needs to initialize quotas
- * during mount time.
- */
-int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
-               int format_id, int type)
-{
-       struct dentry *dentry;
-       int error;
-
-       dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
-       if (IS_ERR(dentry))
-               return PTR_ERR(dentry);
-
-       if (!dentry->d_inode) {
-               error = -ENOENT;
-               goto out;
-       }
-
-       error = security_quota_on(dentry);
-       if (!error)
-               error = vfs_load_quota_inode(dentry->d_inode, type, format_id,
-                               DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
-
-out:
-       dput(dentry);
-       return error;
-}
-
-/* Wrapper to turn on quotas when remounting rw */
-int vfs_dq_quota_on_remount(struct super_block *sb)
-{
-       int cnt;
-       int ret = 0, err;
-
-       if (!sb->s_qcop || !sb->s_qcop->quota_on)
-               return -ENOSYS;
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1);
-               if (err < 0 && !ret)
-                       ret = err;
-       }
-       return ret;
-}
-
-static inline qsize_t qbtos(qsize_t blocks)
-{
-       return blocks << QIF_DQBLKSIZE_BITS;
-}
-
-static inline qsize_t stoqb(qsize_t space)
-{
-       return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
-}
-
-/* Generic routine for getting common part of quota structure */
-static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
-{
-       struct mem_dqblk *dm = &dquot->dq_dqb;
-
-       spin_lock(&dq_data_lock);
-       di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
-       di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
-       di->dqb_curspace = dm->dqb_curspace;
-       di->dqb_ihardlimit = dm->dqb_ihardlimit;
-       di->dqb_isoftlimit = dm->dqb_isoftlimit;
-       di->dqb_curinodes = dm->dqb_curinodes;
-       di->dqb_btime = dm->dqb_btime;
-       di->dqb_itime = dm->dqb_itime;
-       di->dqb_valid = QIF_ALL;
-       spin_unlock(&dq_data_lock);
-}
-
-int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
-{
-       struct dquot *dquot;
-
-       dquot = dqget(sb, id, type);
-       if (dquot == NODQUOT)
-               return -ESRCH;
-       do_get_dqblk(dquot, di);
-       dqput(dquot);
-
-       return 0;
-}
-
-/* Generic routine for setting common part of quota structure */
-static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
-{
-       struct mem_dqblk *dm = &dquot->dq_dqb;
-       int check_blim = 0, check_ilim = 0;
-       struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
-
-       if ((di->dqb_valid & QIF_BLIMITS &&
-            (di->dqb_bhardlimit > dqi->dqi_maxblimit ||
-             di->dqb_bsoftlimit > dqi->dqi_maxblimit)) ||
-           (di->dqb_valid & QIF_ILIMITS &&
-            (di->dqb_ihardlimit > dqi->dqi_maxilimit ||
-             di->dqb_isoftlimit > dqi->dqi_maxilimit)))
-               return -ERANGE;
-
-       spin_lock(&dq_data_lock);
-       if (di->dqb_valid & QIF_SPACE) {
-               dm->dqb_curspace = di->dqb_curspace;
-               check_blim = 1;
-               __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
-       }
-       if (di->dqb_valid & QIF_BLIMITS) {
-               dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
-               dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
-               check_blim = 1;
-               __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
-       }
-       if (di->dqb_valid & QIF_INODES) {
-               dm->dqb_curinodes = di->dqb_curinodes;
-               check_ilim = 1;
-               __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
-       }
-       if (di->dqb_valid & QIF_ILIMITS) {
-               dm->dqb_isoftlimit = di->dqb_isoftlimit;
-               dm->dqb_ihardlimit = di->dqb_ihardlimit;
-               check_ilim = 1;
-               __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
-       }
-       if (di->dqb_valid & QIF_BTIME) {
-               dm->dqb_btime = di->dqb_btime;
-               check_blim = 1;
-               __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
-       }
-       if (di->dqb_valid & QIF_ITIME) {
-               dm->dqb_itime = di->dqb_itime;
-               check_ilim = 1;
-               __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
-       }
-
-       if (check_blim) {
-               if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) {
-                       dm->dqb_btime = 0;
-                       clear_bit(DQ_BLKS_B, &dquot->dq_flags);
-               }
-               else if (!(di->dqb_valid & QIF_BTIME))  /* Set grace only if user hasn't provided his own... */
-                       dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
-       }
-       if (check_ilim) {
-               if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) {
-                       dm->dqb_itime = 0;
-                       clear_bit(DQ_INODES_B, &dquot->dq_flags);
-               }
-               else if (!(di->dqb_valid & QIF_ITIME))  /* Set grace only if user hasn't provided his own... */
-                       dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
-       }
-       if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit)
-               clear_bit(DQ_FAKE_B, &dquot->dq_flags);
-       else
-               set_bit(DQ_FAKE_B, &dquot->dq_flags);
-       spin_unlock(&dq_data_lock);
-       mark_dquot_dirty(dquot);
-
-       return 0;
-}
-
-int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
-{
-       struct dquot *dquot;
-       int rc;
-
-       dquot = dqget(sb, id, type);
-       if (!dquot) {
-               rc = -ESRCH;
-               goto out;
-       }
-       rc = do_set_dqblk(dquot, di);
-       dqput(dquot);
-out:
-       return rc;
-}
-
-/* Generic routine for getting common part of quota file information */
-int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
-{
-       struct mem_dqinfo *mi;
-  
-       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
-       if (!sb_has_quota_active(sb, type)) {
-               mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
-               return -ESRCH;
-       }
-       mi = sb_dqopt(sb)->info + type;
-       spin_lock(&dq_data_lock);
-       ii->dqi_bgrace = mi->dqi_bgrace;
-       ii->dqi_igrace = mi->dqi_igrace;
-       ii->dqi_flags = mi->dqi_flags & DQF_MASK;
-       ii->dqi_valid = IIF_ALL;
-       spin_unlock(&dq_data_lock);
-       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
-       return 0;
-}
-
-/* Generic routine for setting common part of quota file information */
-int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
-{
-       struct mem_dqinfo *mi;
-       int err = 0;
-
-       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
-       if (!sb_has_quota_active(sb, type)) {
-               err = -ESRCH;
-               goto out;
-       }
-       mi = sb_dqopt(sb)->info + type;
-       spin_lock(&dq_data_lock);
-       if (ii->dqi_valid & IIF_BGRACE)
-               mi->dqi_bgrace = ii->dqi_bgrace;
-       if (ii->dqi_valid & IIF_IGRACE)
-               mi->dqi_igrace = ii->dqi_igrace;
-       if (ii->dqi_valid & IIF_FLAGS)
-               mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK);
-       spin_unlock(&dq_data_lock);
-       mark_info_dirty(sb, type);
-       /* Force write to disk */
-       sb->dq_op->write_info(sb, type);
-out:
-       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
-       return err;
-}
-
-struct quotactl_ops vfs_quotactl_ops = {
-       .quota_on       = vfs_quota_on,
-       .quota_off      = vfs_quota_off,
-       .quota_sync     = vfs_quota_sync,
-       .get_info       = vfs_get_dqinfo,
-       .set_info       = vfs_set_dqinfo,
-       .get_dqblk      = vfs_get_dqblk,
-       .set_dqblk      = vfs_set_dqblk
-};
-
-static ctl_table fs_dqstats_table[] = {
-       {
-               .ctl_name       = FS_DQ_LOOKUPS,
-               .procname       = "lookups",
-               .data           = &dqstats.lookups,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = FS_DQ_DROPS,
-               .procname       = "drops",
-               .data           = &dqstats.drops,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = FS_DQ_READS,
-               .procname       = "reads",
-               .data           = &dqstats.reads,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = FS_DQ_WRITES,
-               .procname       = "writes",
-               .data           = &dqstats.writes,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = FS_DQ_CACHE_HITS,
-               .procname       = "cache_hits",
-               .data           = &dqstats.cache_hits,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = FS_DQ_ALLOCATED,
-               .procname       = "allocated_dquots",
-               .data           = &dqstats.allocated_dquots,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = FS_DQ_FREE,
-               .procname       = "free_dquots",
-               .data           = &dqstats.free_dquots,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = FS_DQ_SYNCS,
-               .procname       = "syncs",
-               .data           = &dqstats.syncs,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = &proc_dointvec,
-       },
-#ifdef CONFIG_PRINT_QUOTA_WARNING
-       {
-               .ctl_name       = FS_DQ_WARNINGS,
-               .procname       = "warnings",
-               .data           = &flag_print_warnings,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec,
-       },
-#endif
-       { .ctl_name = 0 },
-};
-
-static ctl_table fs_table[] = {
-       {
-               .ctl_name       = FS_DQSTATS,
-               .procname       = "quota",
-               .mode           = 0555,
-               .child          = fs_dqstats_table,
-       },
-       { .ctl_name = 0 },
-};
-
-static ctl_table sys_table[] = {
-       {
-               .ctl_name       = CTL_FS,
-               .procname       = "fs",
-               .mode           = 0555,
-               .child          = fs_table,
-       },
-       { .ctl_name = 0 },
-};
-
-static int __init dquot_init(void)
-{
-       int i;
-       unsigned long nr_hash, order;
-
-       printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
-
-       register_sysctl_table(sys_table);
-
-       dquot_cachep = kmem_cache_create("dquot",
-                       sizeof(struct dquot), sizeof(unsigned long) * 4,
-                       (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
-                               SLAB_MEM_SPREAD|SLAB_PANIC),
-                       NULL);
-
-       order = 0;
-       dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
-       if (!dquot_hash)
-               panic("Cannot create dquot hash table");
-
-       /* Find power-of-two hlist_heads which can fit into allocation */
-       nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
-       dq_hash_bits = 0;
-       do {
-               dq_hash_bits++;
-       } while (nr_hash >> dq_hash_bits);
-       dq_hash_bits--;
-
-       nr_hash = 1UL << dq_hash_bits;
-       dq_hash_mask = nr_hash - 1;
-       for (i = 0; i < nr_hash; i++)
-               INIT_HLIST_HEAD(dquot_hash + i);
-
-       printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
-                       nr_hash, order, (PAGE_SIZE << order));
-
-       register_shrinker(&dqcache_shrinker);
-
-#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
-       if (genl_register_family(&quota_genl_family) != 0)
-               printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n");
-#endif
-
-       return 0;
-}
-module_init(dquot_init);
-
-EXPORT_SYMBOL(register_quota_format);
-EXPORT_SYMBOL(unregister_quota_format);
-EXPORT_SYMBOL(dqstats);
-EXPORT_SYMBOL(dq_data_lock);
-EXPORT_SYMBOL(vfs_quota_enable);
-EXPORT_SYMBOL(vfs_quota_on);
-EXPORT_SYMBOL(vfs_quota_on_path);
-EXPORT_SYMBOL(vfs_quota_on_mount);
-EXPORT_SYMBOL(vfs_quota_disable);
-EXPORT_SYMBOL(vfs_quota_off);
-EXPORT_SYMBOL(dquot_scan_active);
-EXPORT_SYMBOL(vfs_quota_sync);
-EXPORT_SYMBOL(vfs_get_dqinfo);
-EXPORT_SYMBOL(vfs_set_dqinfo);
-EXPORT_SYMBOL(vfs_get_dqblk);
-EXPORT_SYMBOL(vfs_set_dqblk);
-EXPORT_SYMBOL(dquot_commit);
-EXPORT_SYMBOL(dquot_commit_info);
-EXPORT_SYMBOL(dquot_acquire);
-EXPORT_SYMBOL(dquot_release);
-EXPORT_SYMBOL(dquot_mark_dquot_dirty);
-EXPORT_SYMBOL(dquot_initialize);
-EXPORT_SYMBOL(dquot_drop);
-EXPORT_SYMBOL(vfs_dq_drop);
-EXPORT_SYMBOL(dqget);
-EXPORT_SYMBOL(dqput);
-EXPORT_SYMBOL(dquot_alloc_space);
-EXPORT_SYMBOL(dquot_alloc_inode);
-EXPORT_SYMBOL(dquot_free_space);
-EXPORT_SYMBOL(dquot_free_inode);
-EXPORT_SYMBOL(dquot_transfer);
-EXPORT_SYMBOL(vfs_dq_transfer);
-EXPORT_SYMBOL(vfs_dq_quota_on_remount);
index 4a29d63..7f8d2e5 100644 (file)
@@ -570,7 +570,7 @@ do_more:
 error_return:
        brelse(bitmap_bh);
        release_blocks(sb, freed);
-       DQUOT_FREE_BLOCK(inode, freed);
+       vfs_dq_free_block(inode, freed);
 }
 
 /**
@@ -1247,7 +1247,7 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
        /*
         * Check quota for allocation of this block.
         */
-       if (DQUOT_ALLOC_BLOCK(inode, num)) {
+       if (vfs_dq_alloc_block(inode, num)) {
                *errp = -EDQUOT;
                return 0;
        }
@@ -1409,7 +1409,7 @@ allocated:
 
        *errp = 0;
        brelse(bitmap_bh);
-       DQUOT_FREE_BLOCK(inode, *count-num);
+       vfs_dq_free_block(inode, *count-num);
        *count = num;
        return ret_block;
 
@@ -1420,7 +1420,7 @@ out:
         * Undo the block allocation
         */
        if (!performed_allocation)
-               DQUOT_FREE_BLOCK(inode, *count);
+               vfs_dq_free_block(inode, *count);
        brelse(bitmap_bh);
        return 0;
 }
index 66321a8..15387c9 100644 (file)
@@ -121,8 +121,8 @@ void ext2_free_inode (struct inode * inode)
        if (!is_bad_inode(inode)) {
                /* Quota is already initialized in iput() */
                ext2_xattr_delete_inode(inode);
-               DQUOT_FREE_INODE(inode);
-               DQUOT_DROP(inode);
+               vfs_dq_free_inode(inode);
+               vfs_dq_drop(inode);
        }
 
        es = EXT2_SB(sb)->s_es;
@@ -586,7 +586,7 @@ got:
                goto fail_drop;
        }
 
-       if (DQUOT_ALLOC_INODE(inode)) {
+       if (vfs_dq_alloc_inode(inode)) {
                err = -EDQUOT;
                goto fail_drop;
        }
@@ -605,10 +605,10 @@ got:
        return inode;
 
 fail_free_drop:
-       DQUOT_FREE_INODE(inode);
+       vfs_dq_free_inode(inode);
 
 fail_drop:
-       DQUOT_DROP(inode);
+       vfs_dq_drop(inode);
        inode->i_flags |= S_NOQUOTA;
        inode->i_nlink = 0;
        unlock_new_inode(inode);
index 23fff2f..b43b955 100644 (file)
@@ -1444,7 +1444,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
                return error;
        if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
            (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
-               error = DQUOT_TRANSFER(inode, iattr) ? -EDQUOT : 0;
+               error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0;
                if (error)
                        return error;
        }
index 7c6e360..f983225 100644 (file)
@@ -1331,6 +1331,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
                                sb->s_blocksize - offset : toread;
 
                tmp_bh.b_state = 0;
+               tmp_bh.b_size = sb->s_blocksize;
                err = ext2_get_block(inode, blk, &tmp_bh, 0);
                if (err < 0)
                        return err;
index 987a526..7913531 100644 (file)
@@ -642,7 +642,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
                                ea_bdebug(new_bh, "reusing block");
 
                                error = -EDQUOT;
-                               if (DQUOT_ALLOC_BLOCK(inode, 1)) {
+                               if (vfs_dq_alloc_block(inode, 1)) {
                                        unlock_buffer(new_bh);
                                        goto cleanup;
                                }
@@ -699,7 +699,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
                 * as if nothing happened and cleanup the unused block */
                if (error && error != -ENOSPC) {
                        if (new_bh && new_bh != old_bh)
-                               DQUOT_FREE_BLOCK(inode, 1);
+                               vfs_dq_free_block(inode, 1);
                        goto cleanup;
                }
        } else
@@ -731,7 +731,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
                        le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
                        if (ce)
                                mb_cache_entry_release(ce);
-                       DQUOT_FREE_BLOCK(inode, 1);
+                       vfs_dq_free_block(inode, 1);
                        mark_buffer_dirty(old_bh);
                        ea_bdebug(old_bh, "refcount now=%d",
                                le32_to_cpu(HDR(old_bh)->h_refcount));
@@ -794,7 +794,7 @@ ext2_xattr_delete_inode(struct inode *inode)
                mark_buffer_dirty(bh);
                if (IS_SYNC(inode))
                        sync_dirty_buffer(bh);
-               DQUOT_FREE_BLOCK(inode, 1);
+               vfs_dq_free_block(inode, 1);
        }
        EXT2_I(inode)->i_file_acl = 0;
 
index 0dbf1c0..225202d 100644 (file)
@@ -676,7 +676,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
        }
        ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
        if (dquot_freed_blocks)
-               DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
+               vfs_dq_free_block(inode, dquot_freed_blocks);
        return;
 }
 
@@ -1502,7 +1502,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
        /*
         * Check quota for allocation of this block.
         */
-       if (DQUOT_ALLOC_BLOCK(inode, num)) {
+       if (vfs_dq_alloc_block(inode, num)) {
                *errp = -EDQUOT;
                return 0;
        }
@@ -1714,7 +1714,7 @@ allocated:
 
        *errp = 0;
        brelse(bitmap_bh);
-       DQUOT_FREE_BLOCK(inode, *count-num);
+       vfs_dq_free_block(inode, *count-num);
        *count = num;
        return ret_block;
 
@@ -1729,7 +1729,7 @@ out:
         * Undo the block allocation
         */
        if (!performed_allocation)
-               DQUOT_FREE_BLOCK(inode, *count);
+               vfs_dq_free_block(inode, *count);
        brelse(bitmap_bh);
        return 0;
 }
index 8de6c72..dd13d60 100644 (file)
@@ -123,10 +123,10 @@ void ext3_free_inode (handle_t *handle, struct inode * inode)
         * Note: we must free any quota before locking the superblock,
         * as writing the quota to disk may need the lock as well.
         */
-       DQUOT_INIT(inode);
+       vfs_dq_init(inode);
        ext3_xattr_delete_inode(handle, inode);
-       DQUOT_FREE_INODE(inode);
-       DQUOT_DROP(inode);
+       vfs_dq_free_inode(inode);
+       vfs_dq_drop(inode);
 
        is_directory = S_ISDIR(inode->i_mode);
 
@@ -589,7 +589,7 @@ got:
                sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
 
        ret = inode;
-       if(DQUOT_ALLOC_INODE(inode)) {
+       if (vfs_dq_alloc_inode(inode)) {
                err = -EDQUOT;
                goto fail_drop;
        }
@@ -620,10 +620,10 @@ really_out:
        return ret;
 
 fail_free_drop:
-       DQUOT_FREE_INODE(inode);
+       vfs_dq_free_inode(inode);
 
 fail_drop:
-       DQUOT_DROP(inode);
+       vfs_dq_drop(inode);
        inode->i_flags |= S_NOQUOTA;
        inode->i_nlink = 0;
        unlock_new_inode(inode);
index 05e5c2e..4a09ff1 100644 (file)
@@ -3063,7 +3063,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
                        error = PTR_ERR(handle);
                        goto err_out;
                }
-               error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+               error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
                if (error) {
                        ext3_journal_stop(handle);
                        return error;
@@ -3154,7 +3154,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode)
                ret = 2 * (bpp + indirects) + 2;
 
 #ifdef CONFIG_QUOTA
-       /* We know that structure was already allocated during DQUOT_INIT so
+       /* We know that structure was already allocated during vfs_dq_init so
         * we will be updating only the data blocks + inodes */
        ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
 #endif
@@ -3245,7 +3245,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
  * i_size has been changed by generic_commit_write() and we thus need
  * to include the updated inode in the current transaction.
  *
- * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
+ * Also, vfs_dq_alloc_space() will always dirty the inode when blocks
  * are allocated to the file.
  *
  * If the inode is marked synchronous, we don't honour that here - doing
index 4db4ffa..e2fc63c 100644 (file)
@@ -2049,7 +2049,7 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
 
        /* Initialize quotas before so that eventual writes go in
         * separate transaction */
-       DQUOT_INIT(dentry->d_inode);
+       vfs_dq_init(dentry->d_inode);
        handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
        if (IS_ERR(handle))
                return PTR_ERR(handle);
@@ -2108,7 +2108,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry)
 
        /* Initialize quotas before so that eventual writes go
         * in separate transaction */
-       DQUOT_INIT(dentry->d_inode);
+       vfs_dq_init(dentry->d_inode);
        handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
        if (IS_ERR(handle))
                return PTR_ERR(handle);
@@ -2272,7 +2272,7 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
        /* Initialize quotas before so that eventual writes go
         * in separate transaction */
        if (new_dentry->d_inode)
-               DQUOT_INIT(new_dentry->d_inode);
+               vfs_dq_init(new_dentry->d_inode);
        handle = ext3_journal_start(old_dir, 2 *
                                        EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
                                        EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
index 4a97041..9e5b8e3 100644 (file)
@@ -707,8 +707,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
 #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
 #define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
 
-static int ext3_dquot_initialize(struct inode *inode, int type);
-static int ext3_dquot_drop(struct inode *inode);
 static int ext3_write_dquot(struct dquot *dquot);
 static int ext3_acquire_dquot(struct dquot *dquot);
 static int ext3_release_dquot(struct dquot *dquot);
@@ -723,8 +721,8 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
                                const char *data, size_t len, loff_t off);
 
 static struct dquot_operations ext3_quota_operations = {
-       .initialize     = ext3_dquot_initialize,
-       .drop           = ext3_dquot_drop,
+       .initialize     = dquot_initialize,
+       .drop           = dquot_drop,
        .alloc_space    = dquot_alloc_space,
        .alloc_inode    = dquot_alloc_inode,
        .free_space     = dquot_free_space,
@@ -1438,7 +1436,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
                }
 
                list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
-               DQUOT_INIT(inode);
+               vfs_dq_init(inode);
                if (inode->i_nlink) {
                        printk(KERN_DEBUG
                                "%s: truncating inode %lu to %Ld bytes\n",
@@ -2702,7 +2700,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
  * Process 1                         Process 2
  * ext3_create()                     quota_sync()
  *   journal_start()                   write_dquot()
- *   DQUOT_INIT()                        down(dqio_mutex)
+ *   vfs_dq_init()                       down(dqio_mutex)
  *     down(dqio_mutex)                    journal_start()
  *
  */
@@ -2714,44 +2712,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
        return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
 }
 
-static int ext3_dquot_initialize(struct inode *inode, int type)
-{
-       handle_t *handle;
-       int ret, err;
-
-       /* We may create quota structure so we need to reserve enough blocks */
-       handle = ext3_journal_start(inode, 2*EXT3_QUOTA_INIT_BLOCKS(inode->i_sb));
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-       ret = dquot_initialize(inode, type);
-       err = ext3_journal_stop(handle);
-       if (!ret)
-               ret = err;
-       return ret;
-}
-
-static int ext3_dquot_drop(struct inode *inode)
-{
-       handle_t *handle;
-       int ret, err;
-
-       /* We may delete quota structure so we need to reserve enough blocks */
-       handle = ext3_journal_start(inode, 2*EXT3_QUOTA_DEL_BLOCKS(inode->i_sb));
-       if (IS_ERR(handle)) {
-               /*
-                * We call dquot_drop() anyway to at least release references
-                * to quota structures so that umount does not hang.
-                */
-               dquot_drop(inode);
-               return PTR_ERR(handle);
-       }
-       ret = dquot_drop(inode);
-       err = ext3_journal_stop(handle);
-       if (!ret)
-               ret = err;
-       return ret;
-}
-
 static int ext3_write_dquot(struct dquot *dquot)
 {
        int ret, err;
index 175414a..83b7be8 100644 (file)
@@ -498,7 +498,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode,
                error = ext3_journal_dirty_metadata(handle, bh);
                if (IS_SYNC(inode))
                        handle->h_sync = 1;
-               DQUOT_FREE_BLOCK(inode, 1);
+               vfs_dq_free_block(inode, 1);
                ea_bdebug(bh, "refcount now=%d; releasing",
                          le32_to_cpu(BHDR(bh)->h_refcount));
                if (ce)
@@ -774,7 +774,7 @@ inserted:
                                /* The old block is released after updating
                                   the inode. */
                                error = -EDQUOT;
-                               if (DQUOT_ALLOC_BLOCK(inode, 1))
+                               if (vfs_dq_alloc_block(inode, 1))
                                        goto cleanup;
                                error = ext3_journal_get_write_access(handle,
                                                                      new_bh);
@@ -848,7 +848,7 @@ cleanup:
        return error;
 
 cleanup_dquot:
-       DQUOT_FREE_BLOCK(inode, 1);
+       vfs_dq_free_block(inode, 1);
        goto cleanup;
 
 bad_block:
index de9459b..38f40d5 100644 (file)
@@ -536,7 +536,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
        ext4_mb_free_blocks(handle, inode, block, count,
                            metadata, &dquot_freed_blocks);
        if (dquot_freed_blocks)
-               DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
+               vfs_dq_free_block(inode, dquot_freed_blocks);
        return;
 }
 
index b0c87dc..6083bb3 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/blkdev.h>
 #include <linux/magic.h>
 #include <linux/jbd2.h>
+#include <linux/quota.h>
 #include "ext4_i.h"
 
 /*
@@ -1098,6 +1099,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
 extern int ext4_block_truncate_page(handle_t *handle,
                struct address_space *mapping, loff_t from);
 extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
+extern qsize_t ext4_get_reserved_space(struct inode *inode);
 
 /* ioctl.c */
 extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
index 2d2b358..fb51b40 100644 (file)
@@ -220,10 +220,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
         * Note: we must free any quota before locking the superblock,
         * as writing the quota to disk may need the lock as well.
         */
-       DQUOT_INIT(inode);
+       vfs_dq_init(inode);
        ext4_xattr_delete_inode(handle, inode);
-       DQUOT_FREE_INODE(inode);
-       DQUOT_DROP(inode);
+       vfs_dq_free_inode(inode);
+       vfs_dq_drop(inode);
 
        is_directory = S_ISDIR(inode->i_mode);
 
@@ -915,7 +915,7 @@ got:
        ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
 
        ret = inode;
-       if (DQUOT_ALLOC_INODE(inode)) {
+       if (vfs_dq_alloc_inode(inode)) {
                err = -EDQUOT;
                goto fail_drop;
        }
@@ -956,10 +956,10 @@ really_out:
        return ret;
 
 fail_free_drop:
-       DQUOT_FREE_INODE(inode);
+       vfs_dq_free_inode(inode);
 
 fail_drop:
-       DQUOT_DROP(inode);
+       vfs_dq_drop(inode);
        inode->i_flags |= S_NOQUOTA;
        inode->i_nlink = 0;
        unlock_new_inode(inode);
index c7fed5b..71d3ecd 100644 (file)
@@ -975,6 +975,17 @@ out:
        return err;
 }
 
+qsize_t ext4_get_reserved_space(struct inode *inode)
+{
+       unsigned long long total;
+
+       spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+       total = EXT4_I(inode)->i_reserved_data_blocks +
+               EXT4_I(inode)->i_reserved_meta_blocks;
+       spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+       return total;
+}
 /*
  * Calculate the number of metadata blocks need to reserve
  * to allocate @blocks for non extent file based file
@@ -1036,8 +1047,14 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
        /* update per-inode reservations */
        BUG_ON(used  > EXT4_I(inode)->i_reserved_data_blocks);
        EXT4_I(inode)->i_reserved_data_blocks -= used;
-
        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+       /*
+        * free those over-booking quota for metadata blocks
+        */
+
+       if (mdb_free)
+               vfs_dq_release_reservation_block(inode, mdb_free);
 }
 
 /*
@@ -1553,8 +1570,8 @@ static int ext4_journalled_write_end(struct file *file,
 static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
 {
        int retries = 0;
-       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       unsigned long md_needed, mdblocks, total = 0;
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       unsigned long md_needed, mdblocks, total = 0;
 
        /*
         * recalculate the amount of metadata blocks to reserve
@@ -1570,12 +1587,23 @@ repeat:
        md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
        total = md_needed + nrblocks;
 
+       /*
+        * Make quota reservation here to prevent quota overflow
+        * later. Real quota accounting is done at pages writeout
+        * time.
+        */
+       if (vfs_dq_reserve_block(inode, total)) {
+               spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+               return -EDQUOT;
+       }
+
        if (ext4_claim_free_blocks(sbi, total)) {
                spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
                if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
                        yield();
                        goto repeat;
                }
+               vfs_dq_release_reservation_block(inode, total);
                return -ENOSPC;
        }
        EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
@@ -1629,6 +1657,8 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
        BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
        EXT4_I(inode)->i_reserved_meta_blocks = mdb;
        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+       vfs_dq_release_reservation_block(inode, release);
 }
 
 static void ext4_da_page_release_reservation(struct page *page,
@@ -4612,7 +4642,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
                        error = PTR_ERR(handle);
                        goto err_out;
                }
-               error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+               error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
                if (error) {
                        ext4_journal_stop(handle);
                        return error;
@@ -4991,7 +5021,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
  * i_size has been changed by generic_commit_write() and we thus need
  * to include the updated inode in the current transaction.
  *
- * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
+ * Also, vfs_dq_alloc_block() will always dirty the inode when blocks
  * are allocated to the file.
  *
  * If the inode is marked synchronous, we don't honour that here - doing
index 9f61e62..b038188 100644 (file)
@@ -3086,9 +3086,12 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
        if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
                /* release all the reserved blocks if non delalloc */
                percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
-       else
+       else {
                percpu_counter_sub(&sbi->s_dirtyblocks_counter,
                                                ac->ac_b_ex.fe_len);
+               /* convert reserved quota blocks to real quota blocks */
+               vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
+       }
 
        if (sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group = ext4_flex_group(sbi,
@@ -4544,7 +4547,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
        struct ext4_sb_info *sbi;
        struct super_block *sb;
        ext4_fsblk_t block = 0;
-       unsigned int inquota;
+       unsigned int inquota = 0;
        unsigned int reserv_blks = 0;
 
        sb = ar->inode->i_sb;
@@ -4562,9 +4565,17 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
                   (unsigned long long) ar->pleft,
                   (unsigned long long) ar->pright);
 
-       if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
-               /*
-                * With delalloc we already reserved the blocks
+       /*
+        * For delayed allocation, we could skip the ENOSPC and
+        * EDQUOT check, as blocks and quotas have been already
+        * reserved when data being copied into pagecache.
+        */
+       if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+               ar->flags |= EXT4_MB_DELALLOC_RESERVED;
+       else {
+               /* Without delayed allocation we need to verify
+                * there is enough free blocks to do block allocation
+                * and verify allocation doesn't exceed the quota limits.
                 */
                while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
                        /* let others to free the space */
@@ -4576,19 +4587,16 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
                        return 0;
                }
                reserv_blks = ar->len;
+               while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
+                       ar->flags |= EXT4_MB_HINT_NOPREALLOC;
+                       ar->len--;
+               }
+               inquota = ar->len;
+               if (ar->len == 0) {
+                       *errp = -EDQUOT;
+                       goto out3;
+               }
        }
-       while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
-               ar->flags |= EXT4_MB_HINT_NOPREALLOC;
-               ar->len--;
-       }
-       if (ar->len == 0) {
-               *errp = -EDQUOT;
-               goto out3;
-       }
-       inquota = ar->len;
-
-       if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
-               ar->flags |= EXT4_MB_DELALLOC_RESERVED;
 
        ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
        if (!ac) {
@@ -4654,8 +4662,8 @@ repeat:
 out2:
        kmem_cache_free(ext4_ac_cachep, ac);
 out1:
-       if (ar->len < inquota)
-               DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
+       if (inquota && ar->len < inquota)
+               vfs_dq_free_block(ar->inode, inquota - ar->len);
 out3:
        if (!ar->len) {
                if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
index ba702bd..8341024 100644 (file)
@@ -2092,7 +2092,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
 
        /* Initialize quotas before so that eventual writes go in
         * separate transaction */
-       DQUOT_INIT(dentry->d_inode);
+       vfs_dq_init(dentry->d_inode);
        handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
        if (IS_ERR(handle))
                return PTR_ERR(handle);
@@ -2151,7 +2151,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
 
        /* Initialize quotas before so that eventual writes go
         * in separate transaction */
-       DQUOT_INIT(dentry->d_inode);
+       vfs_dq_init(dentry->d_inode);
        handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
        if (IS_ERR(handle))
                return PTR_ERR(handle);
@@ -2318,7 +2318,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
        /* Initialize quotas before so that eventual writes go
         * in separate transaction */
        if (new_dentry->d_inode)
-               DQUOT_INIT(new_dentry->d_inode);
+               vfs_dq_init(new_dentry->d_inode);
        handle = ext4_journal_start(old_dir, 2 *
                                        EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
                                        EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
index 39d1993..f7371a6 100644 (file)
@@ -926,8 +926,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_
 #define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
 #define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
 
-static int ext4_dquot_initialize(struct inode *inode, int type);
-static int ext4_dquot_drop(struct inode *inode);
 static int ext4_write_dquot(struct dquot *dquot);
 static int ext4_acquire_dquot(struct dquot *dquot);
 static int ext4_release_dquot(struct dquot *dquot);
@@ -942,9 +940,13 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
                                const char *data, size_t len, loff_t off);
 
 static struct dquot_operations ext4_quota_operations = {
-       .initialize     = ext4_dquot_initialize,
-       .drop           = ext4_dquot_drop,
+       .initialize     = dquot_initialize,
+       .drop           = dquot_drop,
        .alloc_space    = dquot_alloc_space,
+       .reserve_space  = dquot_reserve_space,
+       .claim_space    = dquot_claim_space,
+       .release_rsv    = dquot_release_reserved_space,
+       .get_reserved_space = ext4_get_reserved_space,
        .alloc_inode    = dquot_alloc_inode,
        .free_space     = dquot_free_space,
        .free_inode     = dquot_free_inode,
@@ -1802,7 +1804,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
                }
 
                list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
-               DQUOT_INIT(inode);
+               vfs_dq_init(inode);
                if (inode->i_nlink) {
                        printk(KERN_DEBUG
                                "%s: truncating inode %lu to %lld bytes\n",
@@ -3367,8 +3369,8 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
  * is locked for write. Otherwise the are possible deadlocks:
  * Process 1                         Process 2
  * ext4_create()                     quota_sync()
- *   jbd2_journal_start()                   write_dquot()
- *   DQUOT_INIT()                        down(dqio_mutex)
+ *   jbd2_journal_start()                  write_dquot()
+ *   vfs_dq_init()                         down(dqio_mutex)
  *     down(dqio_mutex)                    jbd2_journal_start()
  *
  */
@@ -3380,44 +3382,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
        return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
 }
 
-static int ext4_dquot_initialize(struct inode *inode, int type)
-{
-       handle_t *handle;
-       int ret, err;
-
-       /* We may create quota structure so we need to reserve enough blocks */
-       handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb));
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-       ret = dquot_initialize(inode, type);
-       err = ext4_journal_stop(handle);
-       if (!ret)
-               ret = err;
-       return ret;
-}
-
-static int ext4_dquot_drop(struct inode *inode)
-{
-       handle_t *handle;
-       int ret, err;
-
-       /* We may delete quota structure so we need to reserve enough blocks */
-       handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
-       if (IS_ERR(handle)) {
-               /*
-                * We call dquot_drop() anyway to at least release references
-                * to quota structures so that umount does not hang.
-                */
-               dquot_drop(inode);
-               return PTR_ERR(handle);
-       }
-       ret = dquot_drop(inode);
-       err = ext4_journal_stop(handle);
-       if (!ret)
-               ret = err;
-       return ret;
-}
-
 static int ext4_write_dquot(struct dquot *dquot)
 {
        int ret, err;
index 157ce65..62b31c2 100644 (file)
@@ -490,7 +490,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
                error = ext4_handle_dirty_metadata(handle, inode, bh);
                if (IS_SYNC(inode))
                        ext4_handle_sync(handle);
-               DQUOT_FREE_BLOCK(inode, 1);
+               vfs_dq_free_block(inode, 1);
                ea_bdebug(bh, "refcount now=%d; releasing",
                          le32_to_cpu(BHDR(bh)->h_refcount));
                if (ce)
@@ -784,7 +784,7 @@ inserted:
                                /* The old block is released after updating
                                   the inode. */
                                error = -EDQUOT;
-                               if (DQUOT_ALLOC_BLOCK(inode, 1))
+                               if (vfs_dq_alloc_block(inode, 1))
                                        goto cleanup;
                                error = ext4_journal_get_write_access(handle,
                                                                      new_bh);
@@ -860,7 +860,7 @@ cleanup:
        return error;
 
 cleanup_dquot:
-       DQUOT_FREE_BLOCK(inode, 1);
+       vfs_dq_free_block(inode, 1);
        goto cleanup;
 
 bad_block:
index 643ac43..29df4a2 100644 (file)
@@ -294,7 +294,7 @@ void clear_inode(struct inode *inode)
        BUG_ON(!(inode->i_state & I_FREEING));
        BUG_ON(inode->i_state & I_CLEAR);
        inode_sync_wait(inode);
-       DQUOT_DROP(inode);
+       vfs_dq_drop(inode);
        if (inode->i_sb->s_op->clear_inode)
                inode->i_sb->s_op->clear_inode(inode);
        if (S_ISBLK(inode->i_mode) && inode->i_bdev)
@@ -1168,7 +1168,7 @@ void generic_delete_inode(struct inode *inode)
        if (op->delete_inode) {
                void (*delete)(struct inode *) = op->delete_inode;
                if (!is_bad_inode(inode))
-                       DQUOT_INIT(inode);
+                       vfs_dq_init(inode);
                /* Filesystems implementing their own
                 * s_op->delete_inode are required to call
                 * truncate_inode_pages and clear_inode()
index d3e5c33..a166c16 100644 (file)
@@ -233,7 +233,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
 
        if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
            (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
-               if (DQUOT_TRANSFER(inode, iattr))
+               if (vfs_dq_transfer(inode, iattr))
                        return -EDQUOT;
        }
 
index b00ee9f..b2ae190 100644 (file)
@@ -158,9 +158,9 @@ void jfs_delete_inode(struct inode *inode)
                /*
                 * Free the inode from the quota allocation.
                 */
-               DQUOT_INIT(inode);
-               DQUOT_FREE_INODE(inode);
-               DQUOT_DROP(inode);
+               vfs_dq_init(inode);
+               vfs_dq_free_inode(inode);
+               vfs_dq_drop(inode);
        }
 
        clear_inode(inode);
index 4dcc058..925871e 100644 (file)
@@ -381,10 +381,10 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
                 * It's time to move the inline table to an external
                 * page and begin to build the xtree
                 */
-               if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage))
+               if (vfs_dq_alloc_block(ip, sbi->nbperpage))
                        goto clean_up;
                if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
-                       DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
+                       vfs_dq_free_block(ip, sbi->nbperpage);
                        goto clean_up;
                }
 
@@ -408,7 +408,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
                        memcpy(&jfs_ip->i_dirtable, temp_table,
                               sizeof (temp_table));
                        dbFree(ip, xaddr, sbi->nbperpage);
-                       DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
+                       vfs_dq_free_block(ip, sbi->nbperpage);
                        goto clean_up;
                }
                ip->i_size = PSIZE;
@@ -1027,7 +1027,7 @@ static int dtSplitUp(tid_t tid,
                        n = xlen;
 
                /* Allocate blocks to quota. */
-               if (DQUOT_ALLOC_BLOCK(ip, n)) {
+               if (vfs_dq_alloc_block(ip, n)) {
                        rc = -EDQUOT;
                        goto extendOut;
                }
@@ -1308,7 +1308,7 @@ static int dtSplitUp(tid_t tid,
 
        /* Rollback quota allocation */
        if (rc && quota_allocation)
-               DQUOT_FREE_BLOCK(ip, quota_allocation);
+               vfs_dq_free_block(ip, quota_allocation);
 
       dtSplitUp_Exit:
 
@@ -1369,7 +1369,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
                return -EIO;
 
        /* Allocate blocks to quota. */
-       if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+       if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
                release_metapage(rmp);
                return -EDQUOT;
        }
@@ -1916,7 +1916,7 @@ static int dtSplitRoot(tid_t tid,
        rp = rmp->data;
 
        /* Allocate blocks to quota. */
-       if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+       if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
                release_metapage(rmp);
                return -EDQUOT;
        }
@@ -2287,7 +2287,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
        xlen = lengthPXD(&fp->header.self);
 
        /* Free quota allocation. */
-       DQUOT_FREE_BLOCK(ip, xlen);
+       vfs_dq_free_block(ip, xlen);
 
        /* free/invalidate its buffer page */
        discard_metapage(fmp);
@@ -2363,7 +2363,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
                                xlen = lengthPXD(&p->header.self);
 
                                /* Free quota allocation */
-                               DQUOT_FREE_BLOCK(ip, xlen);
+                               vfs_dq_free_block(ip, xlen);
 
                                /* free/invalidate its buffer page */
                                discard_metapage(mp);
index 7ae1e32..169802e 100644 (file)
@@ -141,7 +141,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
        }
 
        /* Allocate blocks to quota. */
-       if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
+       if (vfs_dq_alloc_block(ip, nxlen)) {
                dbFree(ip, nxaddr, (s64) nxlen);
                mutex_unlock(&JFS_IP(ip)->commit_mutex);
                return -EDQUOT;
@@ -164,7 +164,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
         */
        if (rc) {
                dbFree(ip, nxaddr, nxlen);
-               DQUOT_FREE_BLOCK(ip, nxlen);
+               vfs_dq_free_block(ip, nxlen);
                mutex_unlock(&JFS_IP(ip)->commit_mutex);
                return (rc);
        }
@@ -256,7 +256,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
                goto exit;
 
        /* Allocat blocks to quota. */
-       if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
+       if (vfs_dq_alloc_block(ip, nxlen)) {
                dbFree(ip, nxaddr, (s64) nxlen);
                mutex_unlock(&JFS_IP(ip)->commit_mutex);
                return -EDQUOT;
@@ -297,7 +297,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
                /* extend the extent */
                if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
                        dbFree(ip, xaddr + xlen, delta);
-                       DQUOT_FREE_BLOCK(ip, nxlen);
+                       vfs_dq_free_block(ip, nxlen);
                        goto exit;
                }
        } else {
@@ -308,7 +308,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
                 */
                if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
                        dbFree(ip, nxaddr, nxlen);
-                       DQUOT_FREE_BLOCK(ip, nxlen);
+                       vfs_dq_free_block(ip, nxlen);
                        goto exit;
                }
        }
index d4d142c..dc0e021 100644 (file)
@@ -116,7 +116,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
        /*
         * Allocate inode to quota.
         */
-       if (DQUOT_ALLOC_INODE(inode)) {
+       if (vfs_dq_alloc_inode(inode)) {
                rc = -EDQUOT;
                goto fail_drop;
        }
@@ -162,7 +162,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
        return inode;
 
 fail_drop:
-       DQUOT_DROP(inode);
+       vfs_dq_drop(inode);
        inode->i_flags |= S_NOQUOTA;
 fail_unlock:
        inode->i_nlink = 0;
index ae3acaf..a27e26c 100644 (file)
@@ -846,10 +846,10 @@ int xtInsert(tid_t tid,           /* transaction id */
                        hint = addressXAD(xad) + lengthXAD(xad) - 1;
                } else
                        hint = 0;
-               if ((rc = DQUOT_ALLOC_BLOCK(ip, xlen)))
+               if ((rc = vfs_dq_alloc_block(ip, xlen)))
                        goto out;
                if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
-                       DQUOT_FREE_BLOCK(ip, xlen);
+                       vfs_dq_free_block(ip, xlen);
                        goto out;
                }
        }
@@ -878,7 +878,7 @@ int xtInsert(tid_t tid,             /* transaction id */
                        /* undo data extent allocation */
                        if (*xaddrp == 0) {
                                dbFree(ip, xaddr, (s64) xlen);
-                               DQUOT_FREE_BLOCK(ip, xlen);
+                               vfs_dq_free_block(ip, xlen);
                        }
                        return rc;
                }
@@ -1246,7 +1246,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
        rbn = addressPXD(pxd);
 
        /* Allocate blocks to quota. */
-       if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+       if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
                rc = -EDQUOT;
                goto clean_up;
        }
@@ -1456,7 +1456,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
 
        /* Rollback quota allocation. */
        if (quota_allocation)
-               DQUOT_FREE_BLOCK(ip, quota_allocation);
+               vfs_dq_free_block(ip, quota_allocation);
 
        return (rc);
 }
@@ -1513,7 +1513,7 @@ xtSplitRoot(tid_t tid,
                return -EIO;
 
        /* Allocate blocks to quota. */
-       if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+       if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
                release_metapage(rmp);
                return -EDQUOT;
        }
@@ -3941,7 +3941,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
                ip->i_size = newsize;
 
        /* update quota allocation to reflect freed blocks */
-       DQUOT_FREE_BLOCK(ip, nfreed);
+       vfs_dq_free_block(ip, nfreed);
 
        /*
         * free tlock of invalidated pages
index b4de56b..9feaa04 100644 (file)
@@ -356,7 +356,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
        jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
 
        /* Init inode for quota operations. */
-       DQUOT_INIT(ip);
+       vfs_dq_init(ip);
 
        /* directory must be empty to be removed */
        if (!dtEmpty(ip)) {
@@ -483,7 +483,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
        jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name);
 
        /* Init inode for quota operations. */
-       DQUOT_INIT(ip);
+       vfs_dq_init(ip);
 
        if ((rc = get_UCSname(&dname, dentry)))
                goto out;
@@ -1136,7 +1136,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        } else if (new_ip) {
                IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
                /* Init inode for quota operations. */
-               DQUOT_INIT(new_ip);
+               vfs_dq_init(new_ip);
        }
 
        /*
index 9b7f2cd..61dfa81 100644 (file)
@@ -260,14 +260,14 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
        nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
 
        /* Allocate new blocks to quota. */
-       if (DQUOT_ALLOC_BLOCK(ip, nblocks)) {
+       if (vfs_dq_alloc_block(ip, nblocks)) {
                return -EDQUOT;
        }
 
        rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
        if (rc) {
                /*Rollback quota allocation. */
-               DQUOT_FREE_BLOCK(ip, nblocks);
+               vfs_dq_free_block(ip, nblocks);
                return rc;
        }
 
@@ -332,7 +332,7 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
 
       failed:
        /* Rollback quota allocation. */
-       DQUOT_FREE_BLOCK(ip, nblocks);
+       vfs_dq_free_block(ip, nblocks);
 
        dbFree(ip, blkno, nblocks);
        return rc;
@@ -538,7 +538,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
 
        if (blocks_needed > current_blocks) {
                /* Allocate new blocks to quota. */
-               if (DQUOT_ALLOC_BLOCK(inode, blocks_needed))
+               if (vfs_dq_alloc_block(inode, blocks_needed))
                        return -EDQUOT;
 
                quota_allocation = blocks_needed;
@@ -602,7 +602,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
       clean_up:
        /* Rollback quota allocation */
        if (quota_allocation)
-               DQUOT_FREE_BLOCK(inode, quota_allocation);
+               vfs_dq_free_block(inode, quota_allocation);
 
        return (rc);
 }
@@ -677,7 +677,7 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
 
        /* If old blocks exist, they must be removed from quota allocation. */
        if (old_blocks)
-               DQUOT_FREE_BLOCK(inode, old_blocks);
+               vfs_dq_free_block(inode, old_blocks);
 
        inode->i_ctime = CURRENT_TIME;
 
index 1993176..1928197 100644 (file)
@@ -1473,7 +1473,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
        error = security_inode_create(dir, dentry, mode);
        if (error)
                return error;
-       DQUOT_INIT(dir);
+       vfs_dq_init(dir);
        error = dir->i_op->create(dir, dentry, mode, nd);
        if (!error)
                fsnotify_create(dir, dentry);
@@ -1552,7 +1552,7 @@ int may_open(struct path *path, int acc_mode, int flag)
                        error = security_path_truncate(path, 0,
                                               ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
                if (!error) {
-                       DQUOT_INIT(inode);
+                       vfs_dq_init(inode);
 
                        error = do_truncate(dentry, 0,
                                            ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
@@ -1563,7 +1563,7 @@ int may_open(struct path *path, int acc_mode, int flag)
                        return error;
        } else
                if (flag & FMODE_WRITE)
-                       DQUOT_INIT(inode);
+                       vfs_dq_init(inode);
 
        return 0;
 }
@@ -1946,7 +1946,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
        if (error)
                return error;
 
-       DQUOT_INIT(dir);
+       vfs_dq_init(dir);
        error = dir->i_op->mknod(dir, dentry, mode, dev);
        if (!error)
                fsnotify_create(dir, dentry);
@@ -2045,7 +2045,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        if (error)
                return error;
 
-       DQUOT_INIT(dir);
+       vfs_dq_init(dir);
        error = dir->i_op->mkdir(dir, dentry, mode);
        if (!error)
                fsnotify_mkdir(dir, dentry);
@@ -2131,7 +2131,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
        if (!dir->i_op->rmdir)
                return -EPERM;
 
-       DQUOT_INIT(dir);
+       vfs_dq_init(dir);
 
        mutex_lock(&dentry->d_inode->i_mutex);
        dentry_unhash(dentry);
@@ -2218,7 +2218,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
        if (!dir->i_op->unlink)
                return -EPERM;
 
-       DQUOT_INIT(dir);
+       vfs_dq_init(dir);
 
        mutex_lock(&dentry->d_inode->i_mutex);
        if (d_mountpoint(dentry))
@@ -2329,7 +2329,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
        if (error)
                return error;
 
-       DQUOT_INIT(dir);
+       vfs_dq_init(dir);
        error = dir->i_op->symlink(dir, dentry, oldname);
        if (!error)
                fsnotify_create(dir, dentry);
@@ -2413,7 +2413,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
                return error;
 
        mutex_lock(&inode->i_mutex);
-       DQUOT_INIT(dir);
+       vfs_dq_init(dir);
        error = dir->i_op->link(old_dentry, dir, new_dentry);
        mutex_unlock(&inode->i_mutex);
        if (!error)
@@ -2612,8 +2612,8 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (!old_dir->i_op->rename)
                return -EPERM;
 
-       DQUOT_INIT(old_dir);
-       DQUOT_INIT(new_dir);
+       vfs_dq_init(old_dir);
+       vfs_dq_init(new_dir);
 
        old_name = fsnotify_oldname_init(old_dentry->d_name.name);
 
index c165a64..78376b6 100644 (file)
@@ -356,7 +356,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
                        put_write_access(inode);
                        goto out_nfserr;
                }
-               DQUOT_INIT(inode);
+               vfs_dq_init(inode);
        }
 
        /* sanitize the mode change */
@@ -723,7 +723,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
                else
                        flags = O_WRONLY|O_LARGEFILE;
 
-               DQUOT_INIT(inode);
+               vfs_dq_init(inode);
        }
        *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
                            flags, cred);
index a3a78ce..75b6167 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -273,7 +273,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
        if (!error)
                error = security_path_truncate(&path, length, 0);
        if (!error) {
-               DQUOT_INIT(inode);
+               vfs_dq_init(inode);
                error = do_truncate(path.dentry, length, 0, NULL);
        }
 
diff --git a/fs/quota.c b/fs/quota.c
deleted file mode 100644 (file)
index d76ada9..0000000
+++ /dev/null
@@ -1,513 +0,0 @@
-/*
- * Quota code necessary even when VFS quota support is not compiled
- * into the kernel.  The interesting stuff is over in dquot.c, here
- * we have symbols for initial quotactl(2) handling, the sysctl(2)
- * variables, etc - things needed even when quota support disabled.
- */
-
-#include <linux/fs.h>
-#include <linux/namei.h>
-#include <linux/slab.h>
-#include <asm/current.h>
-#include <asm/uaccess.h>
-#include <linux/compat.h>
-#include <linux/kernel.h>
-#include <linux/security.h>
-#include <linux/syscalls.h>
-#include <linux/buffer_head.h>
-#include <linux/capability.h>
-#include <linux/quotaops.h>
-#include <linux/types.h>
-
-/* Check validity of generic quotactl commands */
-static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
-{
-       if (type >= MAXQUOTAS)
-               return -EINVAL;
-       if (!sb && cmd != Q_SYNC)
-               return -ENODEV;
-       /* Is operation supported? */
-       if (sb && !sb->s_qcop)
-               return -ENOSYS;
-
-       switch (cmd) {
-               case Q_GETFMT:
-                       break;
-               case Q_QUOTAON:
-                       if (!sb->s_qcop->quota_on)
-                               return -ENOSYS;
-                       break;
-               case Q_QUOTAOFF:
-                       if (!sb->s_qcop->quota_off)
-                               return -ENOSYS;
-                       break;
-               case Q_SETINFO:
-                       if (!sb->s_qcop->set_info)
-                               return -ENOSYS;
-                       break;
-               case Q_GETINFO:
-                       if (!sb->s_qcop->get_info)
-                               return -ENOSYS;
-                       break;
-               case Q_SETQUOTA:
-                       if (!sb->s_qcop->set_dqblk)
-                               return -ENOSYS;
-                       break;
-               case Q_GETQUOTA:
-                       if (!sb->s_qcop->get_dqblk)
-                               return -ENOSYS;
-                       break;
-               case Q_SYNC:
-                       if (sb && !sb->s_qcop->quota_sync)
-                               return -ENOSYS;
-                       break;
-               default:
-                       return -EINVAL;
-       }
-
-       /* Is quota turned on for commands which need it? */
-       switch (cmd) {
-               case Q_GETFMT:
-               case Q_GETINFO:
-               case Q_SETINFO:
-               case Q_SETQUOTA:
-               case Q_GETQUOTA:
-                       /* This is just informative test so we are satisfied without a lock */
-                       if (!sb_has_quota_active(sb, type))
-                               return -ESRCH;
-       }
-
-       /* Check privileges */
-       if (cmd == Q_GETQUOTA) {
-               if (((type == USRQUOTA && current_euid() != id) ||
-                    (type == GRPQUOTA && !in_egroup_p(id))) &&
-                   !capable(CAP_SYS_ADMIN))
-                       return -EPERM;
-       }
-       else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO)
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EPERM;
-
-       return 0;
-}
-
-/* Check validity of XFS Quota Manager commands */
-static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
-{
-       if (type >= XQM_MAXQUOTAS)
-               return -EINVAL;
-       if (!sb)
-               return -ENODEV;
-       if (!sb->s_qcop)
-               return -ENOSYS;
-
-       switch (cmd) {
-               case Q_XQUOTAON:
-               case Q_XQUOTAOFF:
-               case Q_XQUOTARM:
-                       if (!sb->s_qcop->set_xstate)
-                               return -ENOSYS;
-                       break;
-               case Q_XGETQSTAT:
-                       if (!sb->s_qcop->get_xstate)
-                               return -ENOSYS;
-                       break;
-               case Q_XSETQLIM:
-                       if (!sb->s_qcop->set_xquota)
-                               return -ENOSYS;
-                       break;
-               case Q_XGETQUOTA:
-                       if (!sb->s_qcop->get_xquota)
-                               return -ENOSYS;
-                       break;
-               case Q_XQUOTASYNC:
-                       if (!sb->s_qcop->quota_sync)
-                               return -ENOSYS;
-                       break;
-               default:
-                       return -EINVAL;
-       }
-
-       /* Check privileges */
-       if (cmd == Q_XGETQUOTA) {
-               if (((type == XQM_USRQUOTA && current_euid() != id) ||
-                    (type == XQM_GRPQUOTA && !in_egroup_p(id))) &&
-                    !capable(CAP_SYS_ADMIN))
-                       return -EPERM;
-       } else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) {
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EPERM;
-       }
-
-       return 0;
-}
-
-static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
-{
-       int error;
-
-       if (XQM_COMMAND(cmd))
-               error = xqm_quotactl_valid(sb, type, cmd, id);
-       else
-               error = generic_quotactl_valid(sb, type, cmd, id);
-       if (!error)
-               error = security_quotactl(cmd, type, id, sb);
-       return error;
-}
-
-static void quota_sync_sb(struct super_block *sb, int type)
-{
-       int cnt;
-
-       sb->s_qcop->quota_sync(sb, type);
-
-       if (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE)
-               return;
-       /* This is not very clever (and fast) but currently I don't know about
-        * any other simple way of getting quota data to disk and we must get
-        * them there for userspace to be visible... */
-       if (sb->s_op->sync_fs)
-               sb->s_op->sync_fs(sb, 1);
-       sync_blockdev(sb->s_bdev);
-
-       /*
-        * Now when everything is written we can discard the pagecache so
-        * that userspace sees the changes.
-        */
-       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (type != -1 && cnt != type)
-                       continue;
-               if (!sb_has_quota_active(sb, cnt))
-                       continue;
-               mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA);
-               truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
-               mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
-       }
-       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
-}
-
-void sync_dquots(struct super_block *sb, int type)
-{
-       int cnt;
-
-       if (sb) {
-               if (sb->s_qcop->quota_sync)
-                       quota_sync_sb(sb, type);
-               return;
-       }
-
-       spin_lock(&sb_lock);
-restart:
-       list_for_each_entry(sb, &super_blocks, s_list) {
-               /* This test just improves performance so it needn't be reliable... */
-               for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-                       if (type != -1 && type != cnt)
-                               continue;
-                       if (!sb_has_quota_active(sb, cnt))
-                               continue;
-                       if (!info_dirty(&sb_dqopt(sb)->info[cnt]) &&
-                           list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
-                               continue;
-                       break;
-               }
-               if (cnt == MAXQUOTAS)
-                       continue;
-               sb->s_count++;
-               spin_unlock(&sb_lock);
-               down_read(&sb->s_umount);
-               if (sb->s_root && sb->s_qcop->quota_sync)
-                       quota_sync_sb(sb, type);
-               up_read(&sb->s_umount);
-               spin_lock(&sb_lock);
-               if (__put_super_and_need_restart(sb))
-                       goto restart;
-       }
-       spin_unlock(&sb_lock);
-}
-
-/* Copy parameters and call proper function */
-static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr)
-{
-       int ret;
-
-       switch (cmd) {
-               case Q_QUOTAON: {
-                       char *pathname;
-
-                       if (IS_ERR(pathname = getname(addr)))
-                               return PTR_ERR(pathname);
-                       ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
-                       putname(pathname);
-                       return ret;
-               }
-               case Q_QUOTAOFF:
-                       return sb->s_qcop->quota_off(sb, type, 0);
-
-               case Q_GETFMT: {
-                       __u32 fmt;
-
-                       down_read(&sb_dqopt(sb)->dqptr_sem);
-                       if (!sb_has_quota_active(sb, type)) {
-                               up_read(&sb_dqopt(sb)->dqptr_sem);
-                               return -ESRCH;
-                       }
-                       fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
-                       up_read(&sb_dqopt(sb)->dqptr_sem);
-                       if (copy_to_user(addr, &fmt, sizeof(fmt)))
-                               return -EFAULT;
-                       return 0;
-               }
-               case Q_GETINFO: {
-                       struct if_dqinfo info;
-
-                       if ((ret = sb->s_qcop->get_info(sb, type, &info)))
-                               return ret;
-                       if (copy_to_user(addr, &info, sizeof(info)))
-                               return -EFAULT;
-                       return 0;
-               }
-               case Q_SETINFO: {
-                       struct if_dqinfo info;
-
-                       if (copy_from_user(&info, addr, sizeof(info)))
-                               return -EFAULT;
-                       return sb->s_qcop->set_info(sb, type, &info);
-               }
-               case Q_GETQUOTA: {
-                       struct if_dqblk idq;
-
-                       if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq)))
-                               return ret;
-                       if (copy_to_user(addr, &idq, sizeof(idq)))
-                               return -EFAULT;
-                       return 0;
-               }
-               case Q_SETQUOTA: {
-                       struct if_dqblk idq;
-
-                       if (copy_from_user(&idq, addr, sizeof(idq)))
-                               return -EFAULT;
-                       return sb->s_qcop->set_dqblk(sb, type, id, &idq);
-               }
-               case Q_SYNC:
-                       sync_dquots(sb, type);
-                       return 0;
-
-               case Q_XQUOTAON:
-               case Q_XQUOTAOFF:
-               case Q_XQUOTARM: {
-                       __u32 flags;
-
-                       if (copy_from_user(&flags, addr, sizeof(flags)))
-                               return -EFAULT;
-                       return sb->s_qcop->set_xstate(sb, flags, cmd);
-               }
-               case Q_XGETQSTAT: {
-                       struct fs_quota_stat fqs;
-               
-                       if ((ret = sb->s_qcop->get_xstate(sb, &fqs)))
-                               return ret;
-                       if (copy_to_user(addr, &fqs, sizeof(fqs)))
-                               return -EFAULT;
-                       return 0;
-               }
-               case Q_XSETQLIM: {
-                       struct fs_disk_quota fdq;
-
-                       if (copy_from_user(&fdq, addr, sizeof(fdq)))
-                               return -EFAULT;
-                      return sb->s_qcop->set_xquota(sb, type, id, &fdq);
-               }
-               case Q_XGETQUOTA: {
-                       struct fs_disk_quota fdq;
-
-                       if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq)))
-                               return ret;
-                       if (copy_to_user(addr, &fdq, sizeof(fdq)))
-                               return -EFAULT;
-                       return 0;
-               }
-               case Q_XQUOTASYNC:
-                       return sb->s_qcop->quota_sync(sb, type);
-               /* We never reach here unless validity check is broken */
-               default:
-                       BUG();
-       }
-       return 0;
-}
-
-/*
- * look up a superblock on which quota ops will be performed
- * - use the name of a block device to find the superblock thereon
- */
-static inline struct super_block *quotactl_block(const char __user *special)
-{
-#ifdef CONFIG_BLOCK
-       struct block_device *bdev;
-       struct super_block *sb;
-       char *tmp = getname(special);
-
-       if (IS_ERR(tmp))
-               return ERR_CAST(tmp);
-       bdev = lookup_bdev(tmp);
-       putname(tmp);
-       if (IS_ERR(bdev))
-               return ERR_CAST(bdev);
-       sb = get_super(bdev);
-       bdput(bdev);
-       if (!sb)
-               return ERR_PTR(-ENODEV);
-
-       return sb;
-#else
-       return ERR_PTR(-ENODEV);
-#endif
-}
-
-/*
- * This is the system call interface. This communicates with
- * the user-level programs. Currently this only supports diskquota
- * calls. Maybe we need to add the process quotas etc. in the future,
- * but we probably should use rlimits for that.
- */
-SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
-               qid_t, id, void __user *, addr)
-{
-       uint cmds, type;
-       struct super_block *sb = NULL;
-       int ret;
-
-       cmds = cmd >> SUBCMDSHIFT;
-       type = cmd & SUBCMDMASK;
-
-       if (cmds != Q_SYNC || special) {
-               sb = quotactl_block(special);
-               if (IS_ERR(sb))
-                       return PTR_ERR(sb);
-       }
-
-       ret = check_quotactl_valid(sb, type, cmds, id);
-       if (ret >= 0)
-               ret = do_quotactl(sb, type, cmds, id, addr);
-       if (sb)
-               drop_super(sb);
-
-       return ret;
-}
-
-#if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT)
-/*
- * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64)
- * and is necessary due to alignment problems.
- */
-struct compat_if_dqblk {
-       compat_u64 dqb_bhardlimit;
-       compat_u64 dqb_bsoftlimit;
-       compat_u64 dqb_curspace;
-       compat_u64 dqb_ihardlimit;
-       compat_u64 dqb_isoftlimit;
-       compat_u64 dqb_curinodes;
-       compat_u64 dqb_btime;
-       compat_u64 dqb_itime;
-       compat_uint_t dqb_valid;
-};
-
-/* XFS structures */
-struct compat_fs_qfilestat {
-       compat_u64 dqb_bhardlimit;
-       compat_u64 qfs_nblks;
-       compat_uint_t qfs_nextents;
-};
-
-struct compat_fs_quota_stat {
-       __s8            qs_version;
-       __u16           qs_flags;
-       __s8            qs_pad;
-       struct compat_fs_qfilestat      qs_uquota;
-       struct compat_fs_qfilestat      qs_gquota;
-       compat_uint_t   qs_incoredqs;
-       compat_int_t    qs_btimelimit;
-       compat_int_t    qs_itimelimit;
-       compat_int_t    qs_rtbtimelimit;
-       __u16           qs_bwarnlimit;
-       __u16           qs_iwarnlimit;
-};
-
-asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
-                                               qid_t id, void __user *addr)
-{
-       unsigned int cmds;
-       struct if_dqblk __user *dqblk;
-       struct compat_if_dqblk __user *compat_dqblk;
-       struct fs_quota_stat __user *fsqstat;
-       struct compat_fs_quota_stat __user *compat_fsqstat;
-       compat_uint_t data;
-       u16 xdata;
-       long ret;
-
-       cmds = cmd >> SUBCMDSHIFT;
-
-       switch (cmds) {
-       case Q_GETQUOTA:
-               dqblk = compat_alloc_user_space(sizeof(struct if_dqblk));
-               compat_dqblk = addr;
-               ret = sys_quotactl(cmd, special, id, dqblk);
-               if (ret)
-                       break;
-               if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) ||
-                       get_user(data, &dqblk->dqb_valid) ||
-                       put_user(data, &compat_dqblk->dqb_valid))
-                       ret = -EFAULT;
-               break;
-       case Q_SETQUOTA:
-               dqblk = compat_alloc_user_space(sizeof(struct if_dqblk));
-               compat_dqblk = addr;
-               ret = -EFAULT;
-               if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) ||
-                       get_user(data, &compat_dqblk->dqb_valid) ||
-                       put_user(data, &dqblk->dqb_valid))
-                       break;
-               ret = sys_quotactl(cmd, special, id, dqblk);
-               break;
-       case Q_XGETQSTAT:
-               fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat));
-               compat_fsqstat = addr;
-               ret = sys_quotactl(cmd, special, id, fsqstat);
-               if (ret)
-                       break;
-               ret = -EFAULT;
-               /* Copying qs_version, qs_flags, qs_pad */
-               if (copy_in_user(compat_fsqstat, fsqstat,
-                       offsetof(struct compat_fs_quota_stat, qs_uquota)))
-                       break;
-               /* Copying qs_uquota */
-               if (copy_in_user(&compat_fsqstat->qs_uquota,
-                       &fsqstat->qs_uquota,
-                       sizeof(compat_fsqstat->qs_uquota)) ||
-                       get_user(data, &fsqstat->qs_uquota.qfs_nextents) ||
-                       put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents))
-                       break;
-               /* Copying qs_gquota */
-               if (copy_in_user(&compat_fsqstat->qs_gquota,
-                       &fsqstat->qs_gquota,
-                       sizeof(compat_fsqstat->qs_gquota)) ||
-                       get_user(data, &fsqstat->qs_gquota.qfs_nextents) ||
-                       put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents))
-                       break;
-               /* Copying the rest */
-               if (copy_in_user(&compat_fsqstat->qs_incoredqs,
-                       &fsqstat->qs_incoredqs,
-                       sizeof(struct compat_fs_quota_stat) -
-                       offsetof(struct compat_fs_quota_stat, qs_incoredqs)) ||
-                       get_user(xdata, &fsqstat->qs_iwarnlimit) ||
-                       put_user(xdata, &compat_fsqstat->qs_iwarnlimit))
-                       break;
-               ret = 0;
-               break;
-       default:
-               ret = sys_quotactl(cmd, special, id, addr);
-       }
-       return ret;
-}
-#endif
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
new file mode 100644 (file)
index 0000000..8047e01
--- /dev/null
@@ -0,0 +1,59 @@
+#
+#  Quota configuration
+#
+
+config QUOTA
+       bool "Quota support"
+       help
+         If you say Y here, you will be able to set per user limits for disk
+         usage (also called disk quotas). Currently, it works for the
+         ext2, ext3, and reiserfs file system. ext3 also supports journalled
+         quotas for which you don't need to run quotacheck(8) after an unclean
+         shutdown.
+         For further details, read the Quota mini-HOWTO, available from
+         <http://www.tldp.org/docs.html#howto>, or the documentation provided
+         with the quota tools. Probably the quota support is only useful for
+         multi user systems. If unsure, say N.
+
+config QUOTA_NETLINK_INTERFACE
+       bool "Report quota messages through netlink interface"
+       depends on QUOTA && NET
+       help
+         If you say Y here, quota warnings (about exceeding softlimit, reaching
+         hardlimit, etc.) will be reported through netlink interface. If unsure,
+         say Y.
+
+config PRINT_QUOTA_WARNING
+       bool "Print quota warnings to console (OBSOLETE)"
+       depends on QUOTA
+       default y
+       help
+         If you say Y here, quota warnings (about exceeding softlimit, reaching
+         hardlimit, etc.) will be printed to the process' controlling terminal.
+         Note that this behavior is currently deprecated and may go away in
+         future. Please use notification via netlink socket instead.
+
+# Generic support for tree structured quota files. Selected when needed.
+config QUOTA_TREE
+        tristate
+
+config QFMT_V1
+       tristate "Old quota format support"
+       depends on QUOTA
+       help
+         This quota format was (is) used by kernels earlier than 2.4.22. If
+         you have quota working and you don't want to convert to new quota
+         format say Y here.
+
+config QFMT_V2
+       tristate "Quota format v2 support"
+       depends on QUOTA
+       select QUOTA_TREE
+       help
+         This quota format allows using quotas with 32-bit UIDs/GIDs. If you
+         need this functionality say Y here.
+
+config QUOTACTL
+       bool
+       depends on XFS_QUOTA || QUOTA
+       default y
diff --git a/fs/quota/Makefile b/fs/quota/Makefile
new file mode 100644 (file)
index 0000000..385a083
--- /dev/null
@@ -0,0 +1,14 @@
+#
+# Makefile for the Linux filesystems.
+#
+# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+obj-y :=
+
+obj-$(CONFIG_QUOTA)            += dquot.o
+obj-$(CONFIG_QFMT_V1)          += quota_v1.o
+obj-$(CONFIG_QFMT_V2)          += quota_v2.o
+obj-$(CONFIG_QUOTA_TREE)       += quota_tree.o
+obj-$(CONFIG_QUOTACTL)         += quota.o
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
new file mode 100644 (file)
index 0000000..eb93801
--- /dev/null
@@ -0,0 +1,2611 @@
+/*
+ * Implementation of the diskquota system for the LINUX operating system. QUOTA
+ * is implemented using the BSD system call interface as the means of
+ * communication with the user level. This file contains the generic routines
+ * called by the different filesystems on allocation of an inode or block.
+ * These routines take care of the administration needed to have a consistent
+ * diskquota tracking system. The ideas of both user and group quotas are based
+ * on the Melbourne quota system as used on BSD derived systems. The internal
+ * implementation is based on one of the several variants of the LINUX
+ * inode-subsystem with added complexity of the diskquota system.
+ * 
+ * Author:     Marco van Wieringen <mvw@planets.elm.net>
+ *
+ * Fixes:   Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
+ *
+ *             Revised list management to avoid races
+ *             -- Bill Hawes, <whawes@star.net>, 9/98
+ *
+ *             Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
+ *             As the consequence the locking was moved from dquot_decr_...(),
+ *             dquot_incr_...() to calling functions.
+ *             invalidate_dquots() now writes modified dquots.
+ *             Serialized quota_off() and quota_on() for mount point.
+ *             Fixed a few bugs in grow_dquots().
+ *             Fixed deadlock in write_dquot() - we no longer account quotas on
+ *             quota files
+ *             remove_dquot_ref() moved to inode.c - it now traverses through inodes
+ *             add_dquot_ref() restarts after blocking
+ *             Added check for bogus uid and fixed check for group in quotactl.
+ *             Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
+ *
+ *             Used struct list_head instead of own list struct
+ *             Invalidation of referenced dquots is no longer possible
+ *             Improved free_dquots list management
+ *             Quota and i_blocks are now updated in one place to avoid races
+ *             Warnings are now delayed so we won't block in critical section
+ *             Write updated not to require dquot lock
+ *             Jan Kara, <jack@suse.cz>, 9/2000
+ *
+ *             Added dynamic quota structure allocation
+ *             Jan Kara <jack@suse.cz> 12/2000
+ *
+ *             Rewritten quota interface. Implemented new quota format and
+ *             formats registering.
+ *             Jan Kara, <jack@suse.cz>, 2001,2002
+ *
+ *             New SMP locking.
+ *             Jan Kara, <jack@suse.cz>, 10/2002
+ *
+ *             Added journalled quota support, fix lock inversion problems
+ *             Jan Kara, <jack@suse.cz>, 2003,2004
+ *
+ * (C) Copyright 1994 - 1997 Marco van Wieringen 
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/mm.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/tty.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/security.h>
+#include <linux/kmod.h>
+#include <linux/namei.h>
+#include <linux/buffer_head.h>
+#include <linux/capability.h>
+#include <linux/quotaops.h>
+#include <linux/writeback.h> /* for inode_lock, oddly enough.. */
+#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#endif
+
+#include <asm/uaccess.h>
+
+#define __DQUOT_PARANOIA
+
+/*
+ * There are three quota SMP locks. dq_list_lock protects all lists with quotas
+ * and quota formats, dqstats structure containing statistics about the lists
+ * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
+ * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
+ * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
+ * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
+ * modifications of quota state (on quotaon and quotaoff) and readers who care
+ * about latest values take it as well.
+ *
+ * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
+ *   dq_list_lock > dq_state_lock
+ *
+ * Note that some things (eg. sb pointer, type, id) doesn't change during
+ * the life of the dquot structure and so needn't to be protected by a lock
+ *
+ * Any operation working on dquots via inode pointers must hold dqptr_sem.  If
+ * operation is just reading pointers from inode (or not using them at all) the
+ * read lock is enough. If pointers are altered function must hold write lock
+ * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
+ * for altering the flag i_mutex is also needed).
+ *
+ * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
+ * from inodes (dquot_alloc_space() and such don't check the dq_lock).
+ * Currently dquot is locked only when it is being read to memory (or space for
+ * it is being allocated) on the first dqget() and when it is being released on
+ * the last dqput(). The allocation and release oparations are serialized by
+ * the dq_lock and by checking the use count in dquot_release().  Write
+ * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
+ * spinlock to internal buffers before writing.
+ *
+ * Lock ordering (including related VFS locks) is the following:
+ *   i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
+ *   dqio_mutex
+ * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
+ * dqptr_sem. But filesystem has to count with the fact that functions such as
+ * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
+ * from inside a transaction to keep filesystem consistency after a crash. Also
+ * filesystems usually want to do some IO on dquot from ->mark_dirty which is
+ * called with dqptr_sem held.
+ * i_mutex on quota files is special (it's below dqio_mutex)
+ */
+
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
+EXPORT_SYMBOL(dq_data_lock);
+
+static char *quotatypes[] = INITQFNAMES;
+static struct quota_format_type *quota_formats;        /* List of registered formats */
+static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
+
+/* SLAB cache for dquot structures */
+static struct kmem_cache *dquot_cachep;
+
+int register_quota_format(struct quota_format_type *fmt)
+{
+       spin_lock(&dq_list_lock);
+       fmt->qf_next = quota_formats;
+       quota_formats = fmt;
+       spin_unlock(&dq_list_lock);
+       return 0;
+}
+EXPORT_SYMBOL(register_quota_format);
+
+void unregister_quota_format(struct quota_format_type *fmt)
+{
+       struct quota_format_type **actqf;
+
+       spin_lock(&dq_list_lock);
+       for (actqf = &quota_formats; *actqf && *actqf != fmt;
+            actqf = &(*actqf)->qf_next)
+               ;
+       if (*actqf)
+               *actqf = (*actqf)->qf_next;
+       spin_unlock(&dq_list_lock);
+}
+EXPORT_SYMBOL(unregister_quota_format);
+
+static struct quota_format_type *find_quota_format(int id)
+{
+       struct quota_format_type *actqf;
+
+       spin_lock(&dq_list_lock);
+       for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
+            actqf = actqf->qf_next)
+               ;
+       if (!actqf || !try_module_get(actqf->qf_owner)) {
+               int qm;
+
+               spin_unlock(&dq_list_lock);
+               
+               for (qm = 0; module_names[qm].qm_fmt_id &&
+                            module_names[qm].qm_fmt_id != id; qm++)
+                       ;
+               if (!module_names[qm].qm_fmt_id ||
+                   request_module(module_names[qm].qm_mod_name))
+                       return NULL;
+
+               spin_lock(&dq_list_lock);
+               for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
+                    actqf = actqf->qf_next)
+                       ;
+               if (actqf && !try_module_get(actqf->qf_owner))
+                       actqf = NULL;
+       }
+       spin_unlock(&dq_list_lock);
+       return actqf;
+}
+
+static void put_quota_format(struct quota_format_type *fmt)
+{
+       module_put(fmt->qf_owner);
+}
+
+/*
+ * Dquot List Management:
+ * The quota code uses three lists for dquot management: the inuse_list,
+ * free_dquots, and dquot_hash[] array. A single dquot structure may be
+ * on all three lists, depending on its current state.
+ *
+ * All dquots are placed to the end of inuse_list when first created, and this
+ * list is used for invalidate operation, which must look at every dquot.
+ *
+ * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
+ * and this list is searched whenever we need an available dquot.  Dquots are
+ * removed from the list as soon as they are used again, and
+ * dqstats.free_dquots gives the number of dquots on the list. When
+ * dquot is invalidated it's completely released from memory.
+ *
+ * Dquots with a specific identity (device, type and id) are placed on
+ * one of the dquot_hash[] hash chains. The provides an efficient search
+ * mechanism to locate a specific dquot.
+ */
+
+static LIST_HEAD(inuse_list);
+static LIST_HEAD(free_dquots);
+static unsigned int dq_hash_bits, dq_hash_mask;
+static struct hlist_head *dquot_hash;
+
+struct dqstats dqstats;
+EXPORT_SYMBOL(dqstats);
+
+static inline unsigned int
+hashfn(const struct super_block *sb, unsigned int id, int type)
+{
+       unsigned long tmp;
+
+       tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
+       return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
+}
+
+/*
+ * Following list functions expect dq_list_lock to be held
+ */
+static inline void insert_dquot_hash(struct dquot *dquot)
+{
+       struct hlist_head *head;
+       head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
+       hlist_add_head(&dquot->dq_hash, head);
+}
+
+static inline void remove_dquot_hash(struct dquot *dquot)
+{
+       hlist_del_init(&dquot->dq_hash);
+}
+
+static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
+                               unsigned int id, int type)
+{
+       struct hlist_node *node;
+       struct dquot *dquot;
+
+       hlist_for_each (node, dquot_hash+hashent) {
+               dquot = hlist_entry(node, struct dquot, dq_hash);
+               if (dquot->dq_sb == sb && dquot->dq_id == id &&
+                   dquot->dq_type == type)
+                       return dquot;
+       }
+       return NULL;
+}
+
+/* Add a dquot to the tail of the free list */
+static inline void put_dquot_last(struct dquot *dquot)
+{
+       list_add_tail(&dquot->dq_free, &free_dquots);
+       dqstats.free_dquots++;
+}
+
+static inline void remove_free_dquot(struct dquot *dquot)
+{
+       if (list_empty(&dquot->dq_free))
+               return;
+       list_del_init(&dquot->dq_free);
+       dqstats.free_dquots--;
+}
+
+static inline void put_inuse(struct dquot *dquot)
+{
+       /* We add to the back of inuse list so we don't have to restart
+        * when traversing this list and we block */
+       list_add_tail(&dquot->dq_inuse, &inuse_list);
+       dqstats.allocated_dquots++;
+}
+
+static inline void remove_inuse(struct dquot *dquot)
+{
+       dqstats.allocated_dquots--;
+       list_del(&dquot->dq_inuse);
+}
+/*
+ * End of list functions needing dq_list_lock
+ */
+
+static void wait_on_dquot(struct dquot *dquot)
+{
+       mutex_lock(&dquot->dq_lock);
+       mutex_unlock(&dquot->dq_lock);
+}
+
+static inline int dquot_dirty(struct dquot *dquot)
+{
+       return test_bit(DQ_MOD_B, &dquot->dq_flags);
+}
+
+static inline int mark_dquot_dirty(struct dquot *dquot)
+{
+       return dquot->dq_sb->dq_op->mark_dirty(dquot);
+}
+
+int dquot_mark_dquot_dirty(struct dquot *dquot)
+{
+       spin_lock(&dq_list_lock);
+       if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags))
+               list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
+                               info[dquot->dq_type].dqi_dirty_list);
+       spin_unlock(&dq_list_lock);
+       return 0;
+}
+EXPORT_SYMBOL(dquot_mark_dquot_dirty);
+
+/* This function needs dq_list_lock */
+static inline int clear_dquot_dirty(struct dquot *dquot)
+{
+       if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
+               return 0;
+       list_del_init(&dquot->dq_dirty);
+       return 1;
+}
+
+void mark_info_dirty(struct super_block *sb, int type)
+{
+       set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
+}
+EXPORT_SYMBOL(mark_info_dirty);
+
+/*
+ *     Read dquot from disk and alloc space for it
+ */
+
+int dquot_acquire(struct dquot *dquot)
+{
+       int ret = 0, ret2 = 0;
+       struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+
+       mutex_lock(&dquot->dq_lock);
+       mutex_lock(&dqopt->dqio_mutex);
+       if (!test_bit(DQ_READ_B, &dquot->dq_flags))
+               ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
+       if (ret < 0)
+               goto out_iolock;
+       set_bit(DQ_READ_B, &dquot->dq_flags);
+       /* Instantiate dquot if needed */
+       if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
+               ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
+               /* Write the info if needed */
+               if (info_dirty(&dqopt->info[dquot->dq_type])) {
+                       ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+                                               dquot->dq_sb, dquot->dq_type);
+               }
+               if (ret < 0)
+                       goto out_iolock;
+               if (ret2 < 0) {
+                       ret = ret2;
+                       goto out_iolock;
+               }
+       }
+       set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+out_iolock:
+       mutex_unlock(&dqopt->dqio_mutex);
+       mutex_unlock(&dquot->dq_lock);
+       return ret;
+}
+EXPORT_SYMBOL(dquot_acquire);
+
+/*
+ *     Write dquot to disk
+ */
+int dquot_commit(struct dquot *dquot)
+{
+       int ret = 0, ret2 = 0;
+       struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+
+       mutex_lock(&dqopt->dqio_mutex);
+       spin_lock(&dq_list_lock);
+       if (!clear_dquot_dirty(dquot)) {
+               spin_unlock(&dq_list_lock);
+               goto out_sem;
+       }
+       spin_unlock(&dq_list_lock);
+       /* Inactive dquot can be only if there was error during read/init
+        * => we have better not writing it */
+       if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+               ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
+               if (info_dirty(&dqopt->info[dquot->dq_type])) {
+                       ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+                                               dquot->dq_sb, dquot->dq_type);
+               }
+               if (ret >= 0)
+                       ret = ret2;
+       }
+out_sem:
+       mutex_unlock(&dqopt->dqio_mutex);
+       return ret;
+}
+EXPORT_SYMBOL(dquot_commit);
+
+/*
+ *     Release dquot
+ */
+int dquot_release(struct dquot *dquot)
+{
+       int ret = 0, ret2 = 0;
+       struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+
+       mutex_lock(&dquot->dq_lock);
+       /* Check whether we are not racing with some other dqget() */
+       if (atomic_read(&dquot->dq_count) > 1)
+               goto out_dqlock;
+       mutex_lock(&dqopt->dqio_mutex);
+       if (dqopt->ops[dquot->dq_type]->release_dqblk) {
+               ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
+               /* Write the info */
+               if (info_dirty(&dqopt->info[dquot->dq_type])) {
+                       ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+                                               dquot->dq_sb, dquot->dq_type);
+               }
+               if (ret >= 0)
+                       ret = ret2;
+       }
+       clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+       mutex_unlock(&dqopt->dqio_mutex);
+out_dqlock:
+       mutex_unlock(&dquot->dq_lock);
+       return ret;
+}
+EXPORT_SYMBOL(dquot_release);
+
+void dquot_destroy(struct dquot *dquot)
+{
+       kmem_cache_free(dquot_cachep, dquot);
+}
+EXPORT_SYMBOL(dquot_destroy);
+
+static inline void do_destroy_dquot(struct dquot *dquot)
+{
+       dquot->dq_sb->dq_op->destroy_dquot(dquot);
+}
+
+/* Invalidate all dquots on the list. Note that this function is called after
+ * quota is disabled and pointers from inodes removed so there cannot be new
+ * quota users. There can still be some users of quotas due to inodes being
+ * just deleted or pruned by prune_icache() (those are not attached to any
+ * list) or parallel quotactl call. We have to wait for such users.
+ */
+static void invalidate_dquots(struct super_block *sb, int type)
+{
+       struct dquot *dquot, *tmp;
+
+restart:
+       spin_lock(&dq_list_lock);
+       list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
+               if (dquot->dq_sb != sb)
+                       continue;
+               if (dquot->dq_type != type)
+                       continue;
+               /* Wait for dquot users */
+               if (atomic_read(&dquot->dq_count)) {
+                       DEFINE_WAIT(wait);
+
+                       atomic_inc(&dquot->dq_count);
+                       prepare_to_wait(&dquot->dq_wait_unused, &wait,
+                                       TASK_UNINTERRUPTIBLE);
+                       spin_unlock(&dq_list_lock);
+                       /* Once dqput() wakes us up, we know it's time to free
+                        * the dquot.
+                        * IMPORTANT: we rely on the fact that there is always
+                        * at most one process waiting for dquot to free.
+                        * Otherwise dq_count would be > 1 and we would never
+                        * wake up.
+                        */
+                       if (atomic_read(&dquot->dq_count) > 1)
+                               schedule();
+                       finish_wait(&dquot->dq_wait_unused, &wait);
+                       dqput(dquot);
+                       /* At this moment dquot() need not exist (it could be
+                        * reclaimed by prune_dqcache(). Hence we must
+                        * restart. */
+                       goto restart;
+               }
+               /*
+                * Quota now has no users and it has been written on last
+                * dqput()
+                */
+               remove_dquot_hash(dquot);
+               remove_free_dquot(dquot);
+               remove_inuse(dquot);
+               do_destroy_dquot(dquot);
+       }
+       spin_unlock(&dq_list_lock);
+}
+
+/* Call callback for every active dquot on given filesystem */
+int dquot_scan_active(struct super_block *sb,
+                     int (*fn)(struct dquot *dquot, unsigned long priv),
+                     unsigned long priv)
+{
+       struct dquot *dquot, *old_dquot = NULL;
+       int ret = 0;
+
+       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+       spin_lock(&dq_list_lock);
+       list_for_each_entry(dquot, &inuse_list, dq_inuse) {
+               if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
+                       continue;
+               if (dquot->dq_sb != sb)
+                       continue;
+               /* Now we have active dquot so we can just increase use count */
+               atomic_inc(&dquot->dq_count);
+               dqstats.lookups++;
+               spin_unlock(&dq_list_lock);
+               dqput(old_dquot);
+               old_dquot = dquot;
+               ret = fn(dquot, priv);
+               if (ret < 0)
+                       goto out;
+               spin_lock(&dq_list_lock);
+               /* We are safe to continue now because our dquot could not
+                * be moved out of the inuse list while we hold the reference */
+       }
+       spin_unlock(&dq_list_lock);
+out:
+       dqput(old_dquot);
+       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+       return ret;
+}
+EXPORT_SYMBOL(dquot_scan_active);
+
+int vfs_quota_sync(struct super_block *sb, int type)
+{
+       struct list_head *dirty;
+       struct dquot *dquot;
+       struct quota_info *dqopt = sb_dqopt(sb);
+       int cnt;
+
+       mutex_lock(&dqopt->dqonoff_mutex);
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (type != -1 && cnt != type)
+                       continue;
+               if (!sb_has_quota_active(sb, cnt))
+                       continue;
+               spin_lock(&dq_list_lock);
+               dirty = &dqopt->info[cnt].dqi_dirty_list;
+               while (!list_empty(dirty)) {
+                       dquot = list_first_entry(dirty, struct dquot,
+                                                dq_dirty);
+                       /* Dirty and inactive can be only bad dquot... */
+                       if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+                               clear_dquot_dirty(dquot);
+                               continue;
+                       }
+                       /* Now we have active dquot from which someone is
+                        * holding reference so we can safely just increase
+                        * use count */
+                       atomic_inc(&dquot->dq_count);
+                       dqstats.lookups++;
+                       spin_unlock(&dq_list_lock);
+                       sb->dq_op->write_dquot(dquot);
+                       dqput(dquot);
+                       spin_lock(&dq_list_lock);
+               }
+               spin_unlock(&dq_list_lock);
+       }
+
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
+                   && info_dirty(&dqopt->info[cnt]))
+                       sb->dq_op->write_info(sb, cnt);
+       spin_lock(&dq_list_lock);
+       dqstats.syncs++;
+       spin_unlock(&dq_list_lock);
+       mutex_unlock(&dqopt->dqonoff_mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL(vfs_quota_sync);
+
+/* Free unused dquots from cache */
+static void prune_dqcache(int count)
+{
+       struct list_head *head;
+       struct dquot *dquot;
+
+       head = free_dquots.prev;
+       while (head != &free_dquots && count) {
+               dquot = list_entry(head, struct dquot, dq_free);
+               remove_dquot_hash(dquot);
+               remove_free_dquot(dquot);
+               remove_inuse(dquot);
+               do_destroy_dquot(dquot);
+               count--;
+               head = free_dquots.prev;
+       }
+}
+
+/*
+ * This is called from kswapd when we think we need some
+ * more memory
+ */
+
+static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
+{
+       if (nr) {
+               spin_lock(&dq_list_lock);
+               prune_dqcache(nr);
+               spin_unlock(&dq_list_lock);
+       }
+       return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
+}
+
+static struct shrinker dqcache_shrinker = {
+       .shrink = shrink_dqcache_memory,
+       .seeks = DEFAULT_SEEKS,
+};
+
+/*
+ * Put reference to dquot
+ * NOTE: If you change this function please check whether dqput_blocks() works right...
+ */
+void dqput(struct dquot *dquot)
+{
+       int ret;
+
+       if (!dquot)
+               return;
+#ifdef __DQUOT_PARANOIA
+       if (!atomic_read(&dquot->dq_count)) {
+               printk("VFS: dqput: trying to free free dquot\n");
+               printk("VFS: device %s, dquot of %s %d\n",
+                       dquot->dq_sb->s_id,
+                       quotatypes[dquot->dq_type],
+                       dquot->dq_id);
+               BUG();
+       }
+#endif
+       
+       spin_lock(&dq_list_lock);
+       dqstats.drops++;
+       spin_unlock(&dq_list_lock);
+we_slept:
+       spin_lock(&dq_list_lock);
+       if (atomic_read(&dquot->dq_count) > 1) {
+               /* We have more than one user... nothing to do */
+               atomic_dec(&dquot->dq_count);
+               /* Releasing dquot during quotaoff phase? */
+               if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) &&
+                   atomic_read(&dquot->dq_count) == 1)
+                       wake_up(&dquot->dq_wait_unused);
+               spin_unlock(&dq_list_lock);
+               return;
+       }
+       /* Need to release dquot? */
+       if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
+               spin_unlock(&dq_list_lock);
+               /* Commit dquot before releasing */
+               ret = dquot->dq_sb->dq_op->write_dquot(dquot);
+               if (ret < 0) {
+                       printk(KERN_ERR "VFS: cannot write quota structure on "
+                               "device %s (error %d). Quota may get out of "
+                               "sync!\n", dquot->dq_sb->s_id, ret);
+                       /*
+                        * We clear dirty bit anyway, so that we avoid
+                        * infinite loop here
+                        */
+                       spin_lock(&dq_list_lock);
+                       clear_dquot_dirty(dquot);
+                       spin_unlock(&dq_list_lock);
+               }
+               goto we_slept;
+       }
+       /* Clear flag in case dquot was inactive (something bad happened) */
+       clear_dquot_dirty(dquot);
+       if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+               spin_unlock(&dq_list_lock);
+               dquot->dq_sb->dq_op->release_dquot(dquot);
+               goto we_slept;
+       }
+       atomic_dec(&dquot->dq_count);
+#ifdef __DQUOT_PARANOIA
+       /* sanity check */
+       BUG_ON(!list_empty(&dquot->dq_free));
+#endif
+       put_dquot_last(dquot);
+       spin_unlock(&dq_list_lock);
+}
+EXPORT_SYMBOL(dqput);
+
+struct dquot *dquot_alloc(struct super_block *sb, int type)
+{
+       return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
+}
+EXPORT_SYMBOL(dquot_alloc);
+
+static struct dquot *get_empty_dquot(struct super_block *sb, int type)
+{
+       struct dquot *dquot;
+
+       dquot = sb->dq_op->alloc_dquot(sb, type);
+       if(!dquot)
+               return NULL;
+
+       mutex_init(&dquot->dq_lock);
+       INIT_LIST_HEAD(&dquot->dq_free);
+       INIT_LIST_HEAD(&dquot->dq_inuse);
+       INIT_HLIST_NODE(&dquot->dq_hash);
+       INIT_LIST_HEAD(&dquot->dq_dirty);
+       init_waitqueue_head(&dquot->dq_wait_unused);
+       dquot->dq_sb = sb;
+       dquot->dq_type = type;
+       atomic_set(&dquot->dq_count, 1);
+
+       return dquot;
+}
+
+/*
+ * Get reference to dquot
+ *
+ * Locking is slightly tricky here. We are guarded from parallel quotaoff()
+ * destroying our dquot by:
+ *   a) checking for quota flags under dq_list_lock and
+ *   b) getting a reference to dquot before we release dq_list_lock
+ */
+struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
+{
+       unsigned int hashent = hashfn(sb, id, type);
+       struct dquot *dquot = NULL, *empty = NULL;
+
+        if (!sb_has_quota_active(sb, type))
+               return NULL;
+we_slept:
+       spin_lock(&dq_list_lock);
+       spin_lock(&dq_state_lock);
+       if (!sb_has_quota_active(sb, type)) {
+               spin_unlock(&dq_state_lock);
+               spin_unlock(&dq_list_lock);
+               goto out;
+       }
+       spin_unlock(&dq_state_lock);
+
+       dquot = find_dquot(hashent, sb, id, type);
+       if (!dquot) {
+               if (!empty) {
+                       spin_unlock(&dq_list_lock);
+                       empty = get_empty_dquot(sb, type);
+                       if (!empty)
+                               schedule();     /* Try to wait for a moment... */
+                       goto we_slept;
+               }
+               dquot = empty;
+               empty = NULL;
+               dquot->dq_id = id;
+               /* all dquots go on the inuse_list */
+               put_inuse(dquot);
+               /* hash it first so it can be found */
+               insert_dquot_hash(dquot);
+               dqstats.lookups++;
+               spin_unlock(&dq_list_lock);
+       } else {
+               if (!atomic_read(&dquot->dq_count))
+                       remove_free_dquot(dquot);
+               atomic_inc(&dquot->dq_count);
+               dqstats.cache_hits++;
+               dqstats.lookups++;
+               spin_unlock(&dq_list_lock);
+       }
+       /* Wait for dq_lock - after this we know that either dquot_release() is
+        * already finished or it will be canceled due to dq_count > 1 test */
+       wait_on_dquot(dquot);
+       /* Read the dquot / allocate space in quota file */
+       if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
+           sb->dq_op->acquire_dquot(dquot) < 0) {
+               dqput(dquot);
+               dquot = NULL;
+               goto out;
+       }
+#ifdef __DQUOT_PARANOIA
+       BUG_ON(!dquot->dq_sb);  /* Has somebody invalidated entry under us? */
+#endif
+out:
+       if (empty)
+               do_destroy_dquot(empty);
+
+       return dquot;
+}
+EXPORT_SYMBOL(dqget);
+
+static int dqinit_needed(struct inode *inode, int type)
+{
+       int cnt;
+
+       if (IS_NOQUOTA(inode))
+               return 0;
+       if (type != -1)
+               return !inode->i_dquot[type];
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               if (!inode->i_dquot[cnt])
+                       return 1;
+       return 0;
+}
+
+/* This routine is guarded by dqonoff_mutex mutex */
+static void add_dquot_ref(struct super_block *sb, int type)
+{
+       struct inode *inode, *old_inode = NULL;
+
+       spin_lock(&inode_lock);
+       list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+               if (!atomic_read(&inode->i_writecount))
+                       continue;
+               if (!dqinit_needed(inode, type))
+                       continue;
+               if (inode->i_state & (I_FREEING|I_WILL_FREE))
+                       continue;
+
+               __iget(inode);
+               spin_unlock(&inode_lock);
+
+               iput(old_inode);
+               sb->dq_op->initialize(inode, type);
+               /* We hold a reference to 'inode' so it couldn't have been
+                * removed from s_inodes list while we dropped the inode_lock.
+                * We cannot iput the inode now as we can be holding the last
+                * reference and we cannot iput it under inode_lock. So we
+                * keep the reference and iput it later. */
+               old_inode = inode;
+               spin_lock(&inode_lock);
+       }
+       spin_unlock(&inode_lock);
+       iput(old_inode);
+}
+
+/*
+ * Return 0 if dqput() won't block.
+ * (note that 1 doesn't necessarily mean blocking)
+ */
+static inline int dqput_blocks(struct dquot *dquot)
+{
+       if (atomic_read(&dquot->dq_count) <= 1)
+               return 1;
+       return 0;
+}
+
+/*
+ * Remove references to dquots from inode and add dquot to list for freeing
+ * if we have the last referece to dquot
+ * We can't race with anybody because we hold dqptr_sem for writing...
+ */
+static int remove_inode_dquot_ref(struct inode *inode, int type,
+                                 struct list_head *tofree_head)
+{
+       struct dquot *dquot = inode->i_dquot[type];
+
+       inode->i_dquot[type] = NULL;
+       if (dquot) {
+               if (dqput_blocks(dquot)) {
+#ifdef __DQUOT_PARANOIA
+                       if (atomic_read(&dquot->dq_count) != 1)
+                               printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
+#endif
+                       spin_lock(&dq_list_lock);
+                       /* As dquot must have currently users it can't be on
+                        * the free list... */
+                       list_add(&dquot->dq_free, tofree_head);
+                       spin_unlock(&dq_list_lock);
+                       return 1;
+               }
+               else
+                       dqput(dquot);   /* We have guaranteed we won't block */
+       }
+       return 0;
+}
+
+/*
+ * Free list of dquots
+ * Dquots are removed from inodes and no new references can be got so we are
+ * the only ones holding reference
+ */
+static void put_dquot_list(struct list_head *tofree_head)
+{
+       struct list_head *act_head;
+       struct dquot *dquot;
+
+       act_head = tofree_head->next;
+       while (act_head != tofree_head) {
+               dquot = list_entry(act_head, struct dquot, dq_free);
+               act_head = act_head->next;
+               /* Remove dquot from the list so we won't have problems... */
+               list_del_init(&dquot->dq_free);
+               dqput(dquot);
+       }
+}
+
+static void remove_dquot_ref(struct super_block *sb, int type,
+               struct list_head *tofree_head)
+{
+       struct inode *inode;
+
+       spin_lock(&inode_lock);
+       list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+               if (!IS_NOQUOTA(inode))
+                       remove_inode_dquot_ref(inode, type, tofree_head);
+       }
+       spin_unlock(&inode_lock);
+}
+
+/* Gather all references from inodes and drop them */
+static void drop_dquot_ref(struct super_block *sb, int type)
+{
+       LIST_HEAD(tofree_head);
+
+       if (sb->dq_op) {
+               down_write(&sb_dqopt(sb)->dqptr_sem);
+               remove_dquot_ref(sb, type, &tofree_head);
+               up_write(&sb_dqopt(sb)->dqptr_sem);
+               put_dquot_list(&tofree_head);
+       }
+}
+
+static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
+{
+       dquot->dq_dqb.dqb_curinodes += number;
+}
+
+static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
+{
+       dquot->dq_dqb.dqb_curspace += number;
+}
+
+static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
+{
+       dquot->dq_dqb.dqb_rsvspace += number;
+}
+
+/*
+ * Claim reserved quota space
+ */
+static void dquot_claim_reserved_space(struct dquot *dquot,
+                                               qsize_t number)
+{
+       WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
+       dquot->dq_dqb.dqb_curspace += number;
+       dquot->dq_dqb.dqb_rsvspace -= number;
+}
+
+static inline
+void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
+{
+       dquot->dq_dqb.dqb_rsvspace -= number;
+}
+
+static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
+{
+       if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
+           dquot->dq_dqb.dqb_curinodes >= number)
+               dquot->dq_dqb.dqb_curinodes -= number;
+       else
+               dquot->dq_dqb.dqb_curinodes = 0;
+       if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
+               dquot->dq_dqb.dqb_itime = (time_t) 0;
+       clear_bit(DQ_INODES_B, &dquot->dq_flags);
+}
+
+static void dquot_decr_space(struct dquot *dquot, qsize_t number)
+{
+       if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
+           dquot->dq_dqb.dqb_curspace >= number)
+               dquot->dq_dqb.dqb_curspace -= number;
+       else
+               dquot->dq_dqb.dqb_curspace = 0;
+       if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
+               dquot->dq_dqb.dqb_btime = (time_t) 0;
+       clear_bit(DQ_BLKS_B, &dquot->dq_flags);
+}
+
+static int warning_issued(struct dquot *dquot, const int warntype)
+{
+       int flag = (warntype == QUOTA_NL_BHARDWARN ||
+               warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
+               ((warntype == QUOTA_NL_IHARDWARN ||
+               warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
+
+       if (!flag)
+               return 0;
+       return test_and_set_bit(flag, &dquot->dq_flags);
+}
+
+#ifdef CONFIG_PRINT_QUOTA_WARNING
+static int flag_print_warnings = 1;
+
+static int need_print_warning(struct dquot *dquot)
+{
+       if (!flag_print_warnings)
+               return 0;
+
+       switch (dquot->dq_type) {
+               case USRQUOTA:
+                       return current_fsuid() == dquot->dq_id;
+               case GRPQUOTA:
+                       return in_group_p(dquot->dq_id);
+       }
+       return 0;
+}
+
+/* Print warning to user which exceeded quota */
+static void print_warning(struct dquot *dquot, const int warntype)
+{
+       char *msg = NULL;
+       struct tty_struct *tty;
+
+       if (warntype == QUOTA_NL_IHARDBELOW ||
+           warntype == QUOTA_NL_ISOFTBELOW ||
+           warntype == QUOTA_NL_BHARDBELOW ||
+           warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot))
+               return;
+
+       tty = get_current_tty();
+       if (!tty)
+               return;
+       tty_write_message(tty, dquot->dq_sb->s_id);
+       if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
+               tty_write_message(tty, ": warning, ");
+       else
+               tty_write_message(tty, ": write failed, ");
+       tty_write_message(tty, quotatypes[dquot->dq_type]);
+       switch (warntype) {
+               case QUOTA_NL_IHARDWARN:
+                       msg = " file limit reached.\r\n";
+                       break;
+               case QUOTA_NL_ISOFTLONGWARN:
+                       msg = " file quota exceeded too long.\r\n";
+                       break;
+               case QUOTA_NL_ISOFTWARN:
+                       msg = " file quota exceeded.\r\n";
+                       break;
+               case QUOTA_NL_BHARDWARN:
+                       msg = " block limit reached.\r\n";
+                       break;
+               case QUOTA_NL_BSOFTLONGWARN:
+                       msg = " block quota exceeded too long.\r\n";
+                       break;
+               case QUOTA_NL_BSOFTWARN:
+                       msg = " block quota exceeded.\r\n";
+                       break;
+       }
+       tty_write_message(tty, msg);
+       tty_kref_put(tty);
+}
+#endif
+
+#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
+
+/* Netlink family structure for quota */
+static struct genl_family quota_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .hdrsize = 0,
+       .name = "VFS_DQUOT",
+       .version = 1,
+       .maxattr = QUOTA_NL_A_MAX,
+};
+
+/* Send warning to userspace about user which exceeded quota */
+static void send_warning(const struct dquot *dquot, const char warntype)
+{
+       static atomic_t seq;
+       struct sk_buff *skb;
+       void *msg_head;
+       int ret;
+       int msg_size = 4 * nla_total_size(sizeof(u32)) +
+                      2 * nla_total_size(sizeof(u64));
+
+       /* We have to allocate using GFP_NOFS as we are called from a
+        * filesystem performing write and thus further recursion into
+        * the fs to free some data could cause deadlocks. */
+       skb = genlmsg_new(msg_size, GFP_NOFS);
+       if (!skb) {
+               printk(KERN_ERR
+                 "VFS: Not enough memory to send quota warning.\n");
+               return;
+       }
+       msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
+                       &quota_genl_family, 0, QUOTA_NL_C_WARNING);
+       if (!msg_head) {
+               printk(KERN_ERR
+                 "VFS: Cannot store netlink header in quota warning.\n");
+               goto err_out;
+       }
+       ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type);
+       if (ret)
+               goto attr_err_out;
+       ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id);
+       if (ret)
+               goto attr_err_out;
+       ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
+       if (ret)
+               goto attr_err_out;
+       ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR,
+               MAJOR(dquot->dq_sb->s_dev));
+       if (ret)
+               goto attr_err_out;
+       ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR,
+               MINOR(dquot->dq_sb->s_dev));
+       if (ret)
+               goto attr_err_out;
+       ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
+       if (ret)
+               goto attr_err_out;
+       genlmsg_end(skb, msg_head);
+
+       genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
+       return;
+attr_err_out:
+       printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
+err_out:
+       kfree_skb(skb);
+}
+#endif
+/*
+ * Write warnings to the console and send warning messages over netlink.
+ *
+ * Note that this function can sleep.
+ */
+static void flush_warnings(struct dquot *const *dquots, char *warntype)
+{
+       int i;
+
+       for (i = 0; i < MAXQUOTAS; i++)
+               if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN &&
+                   !warning_issued(dquots[i], warntype[i])) {
+#ifdef CONFIG_PRINT_QUOTA_WARNING
+                       print_warning(dquots[i], warntype[i]);
+#endif
+#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
+                       send_warning(dquots[i], warntype[i]);
+#endif
+               }
+}
+
+static int ignore_hardlimit(struct dquot *dquot)
+{
+       struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
+
+       return capable(CAP_SYS_RESOURCE) &&
+              (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
+               !(info->dqi_flags & V1_DQF_RSQUASH));
+}
+
+/* needs dq_data_lock */
+static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
+{
+       qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
+
+       *warntype = QUOTA_NL_NOWARN;
+       if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
+           test_bit(DQ_FAKE_B, &dquot->dq_flags))
+               return QUOTA_OK;
+
+       if (dquot->dq_dqb.dqb_ihardlimit &&
+           newinodes > dquot->dq_dqb.dqb_ihardlimit &&
+            !ignore_hardlimit(dquot)) {
+               *warntype = QUOTA_NL_IHARDWARN;
+               return NO_QUOTA;
+       }
+
+       if (dquot->dq_dqb.dqb_isoftlimit &&
+           newinodes > dquot->dq_dqb.dqb_isoftlimit &&
+           dquot->dq_dqb.dqb_itime &&
+           get_seconds() >= dquot->dq_dqb.dqb_itime &&
+            !ignore_hardlimit(dquot)) {
+               *warntype = QUOTA_NL_ISOFTLONGWARN;
+               return NO_QUOTA;
+       }
+
+       if (dquot->dq_dqb.dqb_isoftlimit &&
+           newinodes > dquot->dq_dqb.dqb_isoftlimit &&
+           dquot->dq_dqb.dqb_itime == 0) {
+               *warntype = QUOTA_NL_ISOFTWARN;
+               dquot->dq_dqb.dqb_itime = get_seconds() +
+                   sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
+       }
+
+       return QUOTA_OK;
+}
+
+/* needs dq_data_lock */
+static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
+{
+       qsize_t tspace;
+       struct super_block *sb = dquot->dq_sb;
+
+       *warntype = QUOTA_NL_NOWARN;
+       if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
+           test_bit(DQ_FAKE_B, &dquot->dq_flags))
+               return QUOTA_OK;
+
+       tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
+               + space;
+
+       if (dquot->dq_dqb.dqb_bhardlimit &&
+           tspace > dquot->dq_dqb.dqb_bhardlimit &&
+            !ignore_hardlimit(dquot)) {
+               if (!prealloc)
+                       *warntype = QUOTA_NL_BHARDWARN;
+               return NO_QUOTA;
+       }
+
+       if (dquot->dq_dqb.dqb_bsoftlimit &&
+           tspace > dquot->dq_dqb.dqb_bsoftlimit &&
+           dquot->dq_dqb.dqb_btime &&
+           get_seconds() >= dquot->dq_dqb.dqb_btime &&
+            !ignore_hardlimit(dquot)) {
+               if (!prealloc)
+                       *warntype = QUOTA_NL_BSOFTLONGWARN;
+               return NO_QUOTA;
+       }
+
+       if (dquot->dq_dqb.dqb_bsoftlimit &&
+           tspace > dquot->dq_dqb.dqb_bsoftlimit &&
+           dquot->dq_dqb.dqb_btime == 0) {
+               if (!prealloc) {
+                       *warntype = QUOTA_NL_BSOFTWARN;
+                       dquot->dq_dqb.dqb_btime = get_seconds() +
+                           sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
+               }
+               else
+                       /*
+                        * We don't allow preallocation to exceed softlimit so exceeding will
+                        * be always printed
+                        */
+                       return NO_QUOTA;
+       }
+
+       return QUOTA_OK;
+}
+
+static int info_idq_free(struct dquot *dquot, qsize_t inodes)
+{
+       qsize_t newinodes;
+
+       if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
+           dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
+           !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
+               return QUOTA_NL_NOWARN;
+
+       newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
+       if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
+               return QUOTA_NL_ISOFTBELOW;
+       if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
+           newinodes < dquot->dq_dqb.dqb_ihardlimit)
+               return QUOTA_NL_IHARDBELOW;
+       return QUOTA_NL_NOWARN;
+}
+
+static int info_bdq_free(struct dquot *dquot, qsize_t space)
+{
+       if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
+           dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
+               return QUOTA_NL_NOWARN;
+
+       if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
+               return QUOTA_NL_BSOFTBELOW;
+       if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit &&
+           dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit)
+               return QUOTA_NL_BHARDBELOW;
+       return QUOTA_NL_NOWARN;
+}
+/*
+ *     Initialize quota pointers in inode
+ *     We do things in a bit complicated way but by that we avoid calling
+ *     dqget() and thus filesystem callbacks under dqptr_sem.
+ */
+int dquot_initialize(struct inode *inode, int type)
+{
+       unsigned int id = 0;
+       int cnt, ret = 0;
+       struct dquot *got[MAXQUOTAS] = { NULL, NULL };
+       struct super_block *sb = inode->i_sb;
+
+       /* First test before acquiring mutex - solves deadlocks when we
+         * re-enter the quota code and are already holding the mutex */
+       if (IS_NOQUOTA(inode))
+               return 0;
+
+       /* First get references to structures we might need. */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (type != -1 && cnt != type)
+                       continue;
+               switch (cnt) {
+               case USRQUOTA:
+                       id = inode->i_uid;
+                       break;
+               case GRPQUOTA:
+                       id = inode->i_gid;
+                       break;
+               }
+               got[cnt] = dqget(sb, id, cnt);
+       }
+
+       down_write(&sb_dqopt(sb)->dqptr_sem);
+       /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
+       if (IS_NOQUOTA(inode))
+               goto out_err;
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (type != -1 && cnt != type)
+                       continue;
+               /* Avoid races with quotaoff() */
+               if (!sb_has_quota_active(sb, cnt))
+                       continue;
+               if (!inode->i_dquot[cnt]) {
+                       inode->i_dquot[cnt] = got[cnt];
+                       got[cnt] = NULL;
+               }
+       }
+out_err:
+       up_write(&sb_dqopt(sb)->dqptr_sem);
+       /* Drop unused references */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               dqput(got[cnt]);
+       return ret;
+}
+EXPORT_SYMBOL(dquot_initialize);
+
+/*
+ *     Release all quotas referenced by inode
+ */
+int dquot_drop(struct inode *inode)
+{
+       int cnt;
+       struct dquot *put[MAXQUOTAS];
+
+       down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               put[cnt] = inode->i_dquot[cnt];
+               inode->i_dquot[cnt] = NULL;
+       }
+       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               dqput(put[cnt]);
+       return 0;
+}
+EXPORT_SYMBOL(dquot_drop);
+
+/* Wrapper to remove references to quota structures from inode */
+void vfs_dq_drop(struct inode *inode)
+{
+       /* Here we can get arbitrary inode from clear_inode() so we have
+        * to be careful. OTOH we don't need locking as quota operations
+        * are allowed to change only at mount time */
+       if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op
+           && inode->i_sb->dq_op->drop) {
+               int cnt;
+               /* Test before calling to rule out calls from proc and such
+                 * where we are not allowed to block. Note that this is
+                * actually reliable test even without the lock - the caller
+                * must assure that nobody can come after the DQUOT_DROP and
+                * add quota pointers back anyway */
+               for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+                       if (inode->i_dquot[cnt])
+                               break;
+               if (cnt < MAXQUOTAS)
+                       inode->i_sb->dq_op->drop(inode);
+       }
+}
+EXPORT_SYMBOL(vfs_dq_drop);
+
+/*
+ * Following four functions update i_blocks+i_bytes fields and
+ * quota information (together with appropriate checks)
+ * NOTE: We absolutely rely on the fact that caller dirties
+ * the inode (usually macros in quotaops.h care about this) and
+ * holds a handle for the current transaction so that dquot write and
+ * inode write go into the same transaction.
+ */
+
+/*
+ * This operation can block, but only after everything is updated
+ */
+int __dquot_alloc_space(struct inode *inode, qsize_t number,
+                       int warn, int reserve)
+{
+       int cnt, ret = QUOTA_OK;
+       char warntype[MAXQUOTAS];
+
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               warntype[cnt] = QUOTA_NL_NOWARN;
+
+       spin_lock(&dq_data_lock);
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (!inode->i_dquot[cnt])
+                       continue;
+               if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
+                   == NO_QUOTA) {
+                       ret = NO_QUOTA;
+                       goto out_unlock;
+               }
+       }
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (!inode->i_dquot[cnt])
+                       continue;
+               if (reserve)
+                       dquot_resv_space(inode->i_dquot[cnt], number);
+               else
+                       dquot_incr_space(inode->i_dquot[cnt], number);
+       }
+       if (!reserve)
+               inode_add_bytes(inode, number);
+out_unlock:
+       spin_unlock(&dq_data_lock);
+       flush_warnings(inode->i_dquot, warntype);
+       return ret;
+}
+
+int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+{
+       int cnt, ret = QUOTA_OK;
+
+       /*
+        * First test before acquiring mutex - solves deadlocks when we
+        * re-enter the quota code and are already holding the mutex
+        */
+       if (IS_NOQUOTA(inode)) {
+               inode_add_bytes(inode, number);
+               goto out;
+       }
+
+       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       if (IS_NOQUOTA(inode)) {
+               inode_add_bytes(inode, number);
+               goto out_unlock;
+       }
+
+       ret = __dquot_alloc_space(inode, number, warn, 0);
+       if (ret == NO_QUOTA)
+               goto out_unlock;
+
+       /* Dirtify all the dquots - this can block when journalling */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               if (inode->i_dquot[cnt])
+                       mark_dquot_dirty(inode->i_dquot[cnt]);
+out_unlock:
+       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+       return ret;
+}
+EXPORT_SYMBOL(dquot_alloc_space);
+
+int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
+{
+       int ret = QUOTA_OK;
+
+       if (IS_NOQUOTA(inode))
+               goto out;
+
+       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       if (IS_NOQUOTA(inode))
+               goto out_unlock;
+
+       ret = __dquot_alloc_space(inode, number, warn, 1);
+out_unlock:
+       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+       return ret;
+}
+EXPORT_SYMBOL(dquot_reserve_space);
+
+/*
+ * This operation can block, but only after everything is updated
+ */
+int dquot_alloc_inode(const struct inode *inode, qsize_t number)
+{
+       int cnt, ret = NO_QUOTA;
+       char warntype[MAXQUOTAS];
+
+       /* First test before acquiring mutex - solves deadlocks when we
+         * re-enter the quota code and are already holding the mutex */
+       if (IS_NOQUOTA(inode))
+               return QUOTA_OK;
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               warntype[cnt] = QUOTA_NL_NOWARN;
+       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       if (IS_NOQUOTA(inode)) {
+               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+               return QUOTA_OK;
+       }
+       spin_lock(&dq_data_lock);
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (!inode->i_dquot[cnt])
+                       continue;
+               if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
+                   == NO_QUOTA)
+                       goto warn_put_all;
+       }
+
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (!inode->i_dquot[cnt])
+                       continue;
+               dquot_incr_inodes(inode->i_dquot[cnt], number);
+       }
+       ret = QUOTA_OK;
+warn_put_all:
+       spin_unlock(&dq_data_lock);
+       if (ret == QUOTA_OK)
+               /* Dirtify all the dquots - this can block when journalling */
+               for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+                       if (inode->i_dquot[cnt])
+                               mark_dquot_dirty(inode->i_dquot[cnt]);
+       flush_warnings(inode->i_dquot, warntype);
+       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       return ret;
+}
+EXPORT_SYMBOL(dquot_alloc_inode);
+
+int dquot_claim_space(struct inode *inode, qsize_t number)
+{
+       int cnt;
+       int ret = QUOTA_OK;
+
+       if (IS_NOQUOTA(inode)) {
+               inode_add_bytes(inode, number);
+               goto out;
+       }
+
+       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       if (IS_NOQUOTA(inode))  {
+               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+               inode_add_bytes(inode, number);
+               goto out;
+       }
+
+       spin_lock(&dq_data_lock);
+       /* Claim reserved quotas to allocated quotas */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (inode->i_dquot[cnt])
+                       dquot_claim_reserved_space(inode->i_dquot[cnt],
+                                                       number);
+       }
+       /* Update inode bytes */
+       inode_add_bytes(inode, number);
+       spin_unlock(&dq_data_lock);
+       /* Dirtify all the dquots - this can block when journalling */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               if (inode->i_dquot[cnt])
+                       mark_dquot_dirty(inode->i_dquot[cnt]);
+       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+       return ret;
+}
+EXPORT_SYMBOL(dquot_claim_space);
+
+/*
+ * Release reserved quota space
+ */
+void dquot_release_reserved_space(struct inode *inode, qsize_t number)
+{
+       int cnt;
+
+       if (IS_NOQUOTA(inode))
+               goto out;
+
+       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       if (IS_NOQUOTA(inode))
+               goto out_unlock;
+
+       spin_lock(&dq_data_lock);
+       /* Release reserved dquots */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (inode->i_dquot[cnt])
+                       dquot_free_reserved_space(inode->i_dquot[cnt], number);
+       }
+       spin_unlock(&dq_data_lock);
+
+out_unlock:
+       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+       return;
+}
+EXPORT_SYMBOL(dquot_release_reserved_space);
+
+/*
+ * This operation can block, but only after everything is updated
+ */
+int dquot_free_space(struct inode *inode, qsize_t number)
+{
+       unsigned int cnt;
+       char warntype[MAXQUOTAS];
+
+       /* First test before acquiring mutex - solves deadlocks when we
+         * re-enter the quota code and are already holding the mutex */
+       if (IS_NOQUOTA(inode)) {
+out_sub:
+               inode_sub_bytes(inode, number);
+               return QUOTA_OK;
+       }
+
+       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       /* Now recheck reliably when holding dqptr_sem */
+       if (IS_NOQUOTA(inode)) {
+               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+               goto out_sub;
+       }
+       spin_lock(&dq_data_lock);
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (!inode->i_dquot[cnt])
+                       continue;
+               warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
+               dquot_decr_space(inode->i_dquot[cnt], number);
+       }
+       inode_sub_bytes(inode, number);
+       spin_unlock(&dq_data_lock);
+       /* Dirtify all the dquots - this can block when journalling */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               if (inode->i_dquot[cnt])
+                       mark_dquot_dirty(inode->i_dquot[cnt]);
+       flush_warnings(inode->i_dquot, warntype);
+       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       return QUOTA_OK;
+}
+EXPORT_SYMBOL(dquot_free_space);
+
+/*
+ * This operation can block, but only after everything is updated
+ */
+int dquot_free_inode(const struct inode *inode, qsize_t number)
+{
+       unsigned int cnt;
+       char warntype[MAXQUOTAS];
+
+       /* First test before acquiring mutex - solves deadlocks when we
+         * re-enter the quota code and are already holding the mutex */
+       if (IS_NOQUOTA(inode))
+               return QUOTA_OK;
+
+       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       /* Now recheck reliably when holding dqptr_sem */
+       if (IS_NOQUOTA(inode)) {
+               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+               return QUOTA_OK;
+       }
+       spin_lock(&dq_data_lock);
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (!inode->i_dquot[cnt])
+                       continue;
+               warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
+               dquot_decr_inodes(inode->i_dquot[cnt], number);
+       }
+       spin_unlock(&dq_data_lock);
+       /* Dirtify all the dquots - this can block when journalling */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               if (inode->i_dquot[cnt])
+                       mark_dquot_dirty(inode->i_dquot[cnt]);
+       flush_warnings(inode->i_dquot, warntype);
+       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       return QUOTA_OK;
+}
+EXPORT_SYMBOL(dquot_free_inode);
+
+/*
+ * call back function, get reserved quota space from underlying fs
+ */
+qsize_t dquot_get_reserved_space(struct inode *inode)
+{
+       qsize_t reserved_space = 0;
+
+       if (sb_any_quota_active(inode->i_sb) &&
+           inode->i_sb->dq_op->get_reserved_space)
+               reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
+       return reserved_space;
+}
+
+/*
+ * Transfer the number of inode and blocks from one diskquota to an other.
+ *
+ * This operation can block, but only after everything is updated
+ * A transaction must be started when entering this function.
+ */
+int dquot_transfer(struct inode *inode, struct iattr *iattr)
+{
+       qsize_t space, cur_space;
+       qsize_t rsv_space = 0;
+       struct dquot *transfer_from[MAXQUOTAS];
+       struct dquot *transfer_to[MAXQUOTAS];
+       int cnt, ret = QUOTA_OK;
+       int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid,
+           chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid;
+       char warntype_to[MAXQUOTAS];
+       char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
+
+       /* First test before acquiring mutex - solves deadlocks when we
+         * re-enter the quota code and are already holding the mutex */
+       if (IS_NOQUOTA(inode))
+               return QUOTA_OK;
+       /* Initialize the arrays */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               transfer_from[cnt] = NULL;
+               transfer_to[cnt] = NULL;
+               warntype_to[cnt] = QUOTA_NL_NOWARN;
+       }
+       if (chuid)
+               transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid,
+                                             USRQUOTA);
+       if (chgid)
+               transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
+                                             GRPQUOTA);
+
+       down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       /* Now recheck reliably when holding dqptr_sem */
+       if (IS_NOQUOTA(inode)) {        /* File without quota accounting? */
+               up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+               goto put_all;
+       }
+       spin_lock(&dq_data_lock);
+       cur_space = inode_get_bytes(inode);
+       rsv_space = dquot_get_reserved_space(inode);
+       space = cur_space + rsv_space;
+       /* Build the transfer_from list and check the limits */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (!transfer_to[cnt])
+                       continue;
+               transfer_from[cnt] = inode->i_dquot[cnt];
+               if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
+                   NO_QUOTA || check_bdq(transfer_to[cnt], space, 0,
+                   warntype_to + cnt) == NO_QUOTA)
+                       goto over_quota;
+       }
+
+       /*
+        * Finally perform the needed transfer from transfer_from to transfer_to
+        */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               /*
+                * Skip changes for same uid or gid or for turned off quota-type.
+                */
+               if (!transfer_to[cnt])
+                       continue;
+
+               /* Due to IO error we might not have transfer_from[] structure */
+               if (transfer_from[cnt]) {
+                       warntype_from_inodes[cnt] =
+                               info_idq_free(transfer_from[cnt], 1);
+                       warntype_from_space[cnt] =
+                               info_bdq_free(transfer_from[cnt], space);
+                       dquot_decr_inodes(transfer_from[cnt], 1);
+                       dquot_decr_space(transfer_from[cnt], cur_space);
+                       dquot_free_reserved_space(transfer_from[cnt],
+                                                 rsv_space);
+               }
+
+               dquot_incr_inodes(transfer_to[cnt], 1);
+               dquot_incr_space(transfer_to[cnt], cur_space);
+               dquot_resv_space(transfer_to[cnt], rsv_space);
+
+               inode->i_dquot[cnt] = transfer_to[cnt];
+       }
+       spin_unlock(&dq_data_lock);
+       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+
+       /* Dirtify all the dquots - this can block when journalling */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (transfer_from[cnt])
+                       mark_dquot_dirty(transfer_from[cnt]);
+               if (transfer_to[cnt]) {
+                       mark_dquot_dirty(transfer_to[cnt]);
+                       /* The reference we got is transferred to the inode */
+                       transfer_to[cnt] = NULL;
+               }
+       }
+warn_put_all:
+       flush_warnings(transfer_to, warntype_to);
+       flush_warnings(transfer_from, warntype_from_inodes);
+       flush_warnings(transfer_from, warntype_from_space);
+put_all:
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               dqput(transfer_from[cnt]);
+               dqput(transfer_to[cnt]);
+       }
+       return ret;
+over_quota:
+       spin_unlock(&dq_data_lock);
+       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       /* Clear dquot pointers we don't want to dqput() */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               transfer_from[cnt] = NULL;
+       ret = NO_QUOTA;
+       goto warn_put_all;
+}
+EXPORT_SYMBOL(dquot_transfer);
+
+/* Wrapper for transferring ownership of an inode */
+int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
+{
+       if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) {
+               vfs_dq_init(inode);
+               if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA)
+                       return 1;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(vfs_dq_transfer);
+
+/*
+ * Write info of quota file to disk
+ */
+int dquot_commit_info(struct super_block *sb, int type)
+{
+       int ret;
+       struct quota_info *dqopt = sb_dqopt(sb);
+
+       mutex_lock(&dqopt->dqio_mutex);
+       ret = dqopt->ops[type]->write_file_info(sb, type);
+       mutex_unlock(&dqopt->dqio_mutex);
+       return ret;
+}
+EXPORT_SYMBOL(dquot_commit_info);
+
+/*
+ * Definitions of diskquota operations.
+ */
+struct dquot_operations dquot_operations = {
+       .initialize     = dquot_initialize,
+       .drop           = dquot_drop,
+       .alloc_space    = dquot_alloc_space,
+       .alloc_inode    = dquot_alloc_inode,
+       .free_space     = dquot_free_space,
+       .free_inode     = dquot_free_inode,
+       .transfer       = dquot_transfer,
+       .write_dquot    = dquot_commit,
+       .acquire_dquot  = dquot_acquire,
+       .release_dquot  = dquot_release,
+       .mark_dirty     = dquot_mark_dquot_dirty,
+       .write_info     = dquot_commit_info,
+       .alloc_dquot    = dquot_alloc,
+       .destroy_dquot  = dquot_destroy,
+};
+
+/*
+ * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
+ */
+int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
+{
+       int cnt, ret = 0;
+       struct quota_info *dqopt = sb_dqopt(sb);
+       struct inode *toputinode[MAXQUOTAS];
+
+       /* Cannot turn off usage accounting without turning off limits, or
+        * suspend quotas and simultaneously turn quotas off. */
+       if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
+           || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
+           DQUOT_USAGE_ENABLED)))
+               return -EINVAL;
+
+       /* We need to serialize quota_off() for device */
+       mutex_lock(&dqopt->dqonoff_mutex);
+
+       /*
+        * Skip everything if there's nothing to do. We have to do this because
+        * sometimes we are called when fill_super() failed and calling
+        * sync_fs() in such cases does no good.
+        */
+       if (!sb_any_quota_loaded(sb)) {
+               mutex_unlock(&dqopt->dqonoff_mutex);
+               return 0;
+       }
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               toputinode[cnt] = NULL;
+               if (type != -1 && cnt != type)
+                       continue;
+               if (!sb_has_quota_loaded(sb, cnt))
+                       continue;
+
+               if (flags & DQUOT_SUSPENDED) {
+                       spin_lock(&dq_state_lock);
+                       dqopt->flags |=
+                               dquot_state_flag(DQUOT_SUSPENDED, cnt);
+                       spin_unlock(&dq_state_lock);
+               } else {
+                       spin_lock(&dq_state_lock);
+                       dqopt->flags &= ~dquot_state_flag(flags, cnt);
+                       /* Turning off suspended quotas? */
+                       if (!sb_has_quota_loaded(sb, cnt) &&
+                           sb_has_quota_suspended(sb, cnt)) {
+                               dqopt->flags &= ~dquot_state_flag(
+                                                       DQUOT_SUSPENDED, cnt);
+                               spin_unlock(&dq_state_lock);
+                               iput(dqopt->files[cnt]);
+                               dqopt->files[cnt] = NULL;
+                               continue;
+                       }
+                       spin_unlock(&dq_state_lock);
+               }
+
+               /* We still have to keep quota loaded? */
+               if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
+                       continue;
+
+               /* Note: these are blocking operations */
+               drop_dquot_ref(sb, cnt);
+               invalidate_dquots(sb, cnt);
+               /*
+                * Now all dquots should be invalidated, all writes done so we
+                * should be only users of the info. No locks needed.
+                */
+               if (info_dirty(&dqopt->info[cnt]))
+                       sb->dq_op->write_info(sb, cnt);
+               if (dqopt->ops[cnt]->free_file_info)
+                       dqopt->ops[cnt]->free_file_info(sb, cnt);
+               put_quota_format(dqopt->info[cnt].dqi_format);
+
+               toputinode[cnt] = dqopt->files[cnt];
+               if (!sb_has_quota_loaded(sb, cnt))
+                       dqopt->files[cnt] = NULL;
+               dqopt->info[cnt].dqi_flags = 0;
+               dqopt->info[cnt].dqi_igrace = 0;
+               dqopt->info[cnt].dqi_bgrace = 0;
+               dqopt->ops[cnt] = NULL;
+       }
+       mutex_unlock(&dqopt->dqonoff_mutex);
+
+       /* Skip syncing and setting flags if quota files are hidden */
+       if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
+               goto put_inodes;
+
+       /* Sync the superblock so that buffers with quota data are written to
+        * disk (and so userspace sees correct data afterwards). */
+       if (sb->s_op->sync_fs)
+               sb->s_op->sync_fs(sb, 1);
+       sync_blockdev(sb->s_bdev);
+       /* Now the quota files are just ordinary files and we can set the
+        * inode flags back. Moreover we discard the pagecache so that
+        * userspace sees the writes we did bypassing the pagecache. We
+        * must also discard the blockdev buffers so that we see the
+        * changes done by userspace on the next quotaon() */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               if (toputinode[cnt]) {
+                       mutex_lock(&dqopt->dqonoff_mutex);
+                       /* If quota was reenabled in the meantime, we have
+                        * nothing to do */
+                       if (!sb_has_quota_loaded(sb, cnt)) {
+                               mutex_lock_nested(&toputinode[cnt]->i_mutex,
+                                                 I_MUTEX_QUOTA);
+                               toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
+                                 S_NOATIME | S_NOQUOTA);
+                               truncate_inode_pages(&toputinode[cnt]->i_data,
+                                                    0);
+                               mutex_unlock(&toputinode[cnt]->i_mutex);
+                               mark_inode_dirty(toputinode[cnt]);
+                       }
+                       mutex_unlock(&dqopt->dqonoff_mutex);
+               }
+       if (sb->s_bdev)
+               invalidate_bdev(sb->s_bdev);
+put_inodes:
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               if (toputinode[cnt]) {
+                       /* On remount RO, we keep the inode pointer so that we
+                        * can reenable quota on the subsequent remount RW. We
+                        * have to check 'flags' variable and not use sb_has_
+                        * function because another quotaon / quotaoff could
+                        * change global state before we got here. We refuse
+                        * to suspend quotas when there is pending delete on
+                        * the quota file... */
+                       if (!(flags & DQUOT_SUSPENDED))
+                               iput(toputinode[cnt]);
+                       else if (!toputinode[cnt]->i_nlink)
+                               ret = -EBUSY;
+               }
+       return ret;
+}
+EXPORT_SYMBOL(vfs_quota_disable);
+
+int vfs_quota_off(struct super_block *sb, int type, int remount)
+{
+       return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED :
+                                (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED));
+}
+EXPORT_SYMBOL(vfs_quota_off);
+/*
+ *     Turn quotas on on a device
+ */
+
+/*
+ * Helper function to turn quotas on when we already have the inode of
+ * quota file and no quota information is loaded.
+ */
+static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
+       unsigned int flags)
+{
+       struct quota_format_type *fmt = find_quota_format(format_id);
+       struct super_block *sb = inode->i_sb;
+       struct quota_info *dqopt = sb_dqopt(sb);
+       int error;
+       int oldflags = -1;
+
+       if (!fmt)
+               return -ESRCH;
+       if (!S_ISREG(inode->i_mode)) {
+               error = -EACCES;
+               goto out_fmt;
+       }
+       if (IS_RDONLY(inode)) {
+               error = -EROFS;
+               goto out_fmt;
+       }
+       if (!sb->s_op->quota_write || !sb->s_op->quota_read) {
+               error = -EINVAL;
+               goto out_fmt;
+       }
+       /* Usage always has to be set... */
+       if (!(flags & DQUOT_USAGE_ENABLED)) {
+               error = -EINVAL;
+               goto out_fmt;
+       }
+
+       if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+               /* As we bypass the pagecache we must now flush the inode so
+                * that we see all the changes from userspace... */
+               write_inode_now(inode, 1);
+               /* And now flush the block cache so that kernel sees the
+                * changes */
+               invalidate_bdev(sb->s_bdev);
+       }
+       mutex_lock(&inode->i_mutex);
+       mutex_lock(&dqopt->dqonoff_mutex);
+       if (sb_has_quota_loaded(sb, type)) {
+               error = -EBUSY;
+               goto out_lock;
+       }
+
+       if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+               /* We don't want quota and atime on quota files (deadlocks
+                * possible) Also nobody should write to the file - we use
+                * special IO operations which ignore the immutable bit. */
+               down_write(&dqopt->dqptr_sem);
+               oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
+                                            S_NOQUOTA);
+               inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
+               up_write(&dqopt->dqptr_sem);
+               sb->dq_op->drop(inode);
+       }
+
+       error = -EIO;
+       dqopt->files[type] = igrab(inode);
+       if (!dqopt->files[type])
+               goto out_lock;
+       error = -EINVAL;
+       if (!fmt->qf_ops->check_quota_file(sb, type))
+               goto out_file_init;
+
+       dqopt->ops[type] = fmt->qf_ops;
+       dqopt->info[type].dqi_format = fmt;
+       dqopt->info[type].dqi_fmt_id = format_id;
+       INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
+       mutex_lock(&dqopt->dqio_mutex);
+       error = dqopt->ops[type]->read_file_info(sb, type);
+       if (error < 0) {
+               mutex_unlock(&dqopt->dqio_mutex);
+               goto out_file_init;
+       }
+       mutex_unlock(&dqopt->dqio_mutex);
+       mutex_unlock(&inode->i_mutex);
+       spin_lock(&dq_state_lock);
+       dqopt->flags |= dquot_state_flag(flags, type);
+       spin_unlock(&dq_state_lock);
+
+       add_dquot_ref(sb, type);
+       mutex_unlock(&dqopt->dqonoff_mutex);
+
+       return 0;
+
+out_file_init:
+       dqopt->files[type] = NULL;
+       iput(inode);
+out_lock:
+       mutex_unlock(&dqopt->dqonoff_mutex);
+       if (oldflags != -1) {
+               down_write(&dqopt->dqptr_sem);
+               /* Set the flags back (in the case of accidental quotaon()
+                * on a wrong file we don't want to mess up the flags) */
+               inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
+               inode->i_flags |= oldflags;
+               up_write(&dqopt->dqptr_sem);
+       }
+       mutex_unlock(&inode->i_mutex);
+out_fmt:
+       put_quota_format(fmt);
+
+       return error; 
+}
+
+/* Reenable quotas on remount RW */
+static int vfs_quota_on_remount(struct super_block *sb, int type)
+{
+       struct quota_info *dqopt = sb_dqopt(sb);
+       struct inode *inode;
+       int ret;
+       unsigned int flags;
+
+       mutex_lock(&dqopt->dqonoff_mutex);
+       if (!sb_has_quota_suspended(sb, type)) {
+               mutex_unlock(&dqopt->dqonoff_mutex);
+               return 0;
+       }
+       inode = dqopt->files[type];
+       dqopt->files[type] = NULL;
+       spin_lock(&dq_state_lock);
+       flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
+                                               DQUOT_LIMITS_ENABLED, type);
+       dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type);
+       spin_unlock(&dq_state_lock);
+       mutex_unlock(&dqopt->dqonoff_mutex);
+
+       flags = dquot_generic_flag(flags, type);
+       ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id,
+                                  flags);
+       iput(inode);
+
+       return ret;
+}
+
+int vfs_quota_on_path(struct super_block *sb, int type, int format_id,
+                     struct path *path)
+{
+       int error = security_quota_on(path->dentry);
+       if (error)
+               return error;
+       /* Quota file not on the same filesystem? */
+       if (path->mnt->mnt_sb != sb)
+               error = -EXDEV;
+       else
+               error = vfs_load_quota_inode(path->dentry->d_inode, type,
+                                            format_id, DQUOT_USAGE_ENABLED |
+                                            DQUOT_LIMITS_ENABLED);
+       return error;
+}
+EXPORT_SYMBOL(vfs_quota_on_path);
+
+int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
+                int remount)
+{
+       struct path path;
+       int error;
+
+       if (remount)
+               return vfs_quota_on_remount(sb, type);
+
+       error = kern_path(name, LOOKUP_FOLLOW, &path);
+       if (!error) {
+               error = vfs_quota_on_path(sb, type, format_id, &path);
+               path_put(&path);
+       }
+       return error;
+}
+EXPORT_SYMBOL(vfs_quota_on);
+
+/*
+ * More powerful function for turning on quotas allowing setting
+ * of individual quota flags
+ */
+int vfs_quota_enable(struct inode *inode, int type, int format_id,
+               unsigned int flags)
+{
+       int ret = 0;
+       struct super_block *sb = inode->i_sb;
+       struct quota_info *dqopt = sb_dqopt(sb);
+
+       /* Just unsuspend quotas? */
+       if (flags & DQUOT_SUSPENDED)
+               return vfs_quota_on_remount(sb, type);
+       if (!flags)
+               return 0;
+       /* Just updating flags needed? */
+       if (sb_has_quota_loaded(sb, type)) {
+               mutex_lock(&dqopt->dqonoff_mutex);
+               /* Now do a reliable test... */
+               if (!sb_has_quota_loaded(sb, type)) {
+                       mutex_unlock(&dqopt->dqonoff_mutex);
+                       goto load_quota;
+               }
+               if (flags & DQUOT_USAGE_ENABLED &&
+                   sb_has_quota_usage_enabled(sb, type)) {
+                       ret = -EBUSY;
+                       goto out_lock;
+               }
+               if (flags & DQUOT_LIMITS_ENABLED &&
+                   sb_has_quota_limits_enabled(sb, type)) {
+                       ret = -EBUSY;
+                       goto out_lock;
+               }
+               spin_lock(&dq_state_lock);
+               sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
+               spin_unlock(&dq_state_lock);
+out_lock:
+               mutex_unlock(&dqopt->dqonoff_mutex);
+               return ret;
+       }
+
+load_quota:
+       return vfs_load_quota_inode(inode, type, format_id, flags);
+}
+EXPORT_SYMBOL(vfs_quota_enable);
+
+/*
+ * This function is used when filesystem needs to initialize quotas
+ * during mount time.
+ */
+int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
+               int format_id, int type)
+{
+       struct dentry *dentry;
+       int error;
+
+       dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
+       if (IS_ERR(dentry))
+               return PTR_ERR(dentry);
+
+       if (!dentry->d_inode) {
+               error = -ENOENT;
+               goto out;
+       }
+
+       error = security_quota_on(dentry);
+       if (!error)
+               error = vfs_load_quota_inode(dentry->d_inode, type, format_id,
+                               DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
+
+out:
+       dput(dentry);
+       return error;
+}
+EXPORT_SYMBOL(vfs_quota_on_mount);
+
+/* Wrapper to turn on quotas when remounting rw */
+int vfs_dq_quota_on_remount(struct super_block *sb)
+{
+       int cnt;
+       int ret = 0, err;
+
+       if (!sb->s_qcop || !sb->s_qcop->quota_on)
+               return -ENOSYS;
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1);
+               if (err < 0 && !ret)
+                       ret = err;
+       }
+       return ret;
+}
+EXPORT_SYMBOL(vfs_dq_quota_on_remount);
+
+static inline qsize_t qbtos(qsize_t blocks)
+{
+       return blocks << QIF_DQBLKSIZE_BITS;
+}
+
+static inline qsize_t stoqb(qsize_t space)
+{
+       return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+}
+
+/* Generic routine for getting common part of quota structure */
+static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
+{
+       struct mem_dqblk *dm = &dquot->dq_dqb;
+
+       spin_lock(&dq_data_lock);
+       di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
+       di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
+       di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace;
+       di->dqb_ihardlimit = dm->dqb_ihardlimit;
+       di->dqb_isoftlimit = dm->dqb_isoftlimit;
+       di->dqb_curinodes = dm->dqb_curinodes;
+       di->dqb_btime = dm->dqb_btime;
+       di->dqb_itime = dm->dqb_itime;
+       di->dqb_valid = QIF_ALL;
+       spin_unlock(&dq_data_lock);
+}
+
+int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
+                 struct if_dqblk *di)
+{
+       struct dquot *dquot;
+
+       dquot = dqget(sb, id, type);
+       if (!dquot)
+               return -ESRCH;
+       do_get_dqblk(dquot, di);
+       dqput(dquot);
+
+       return 0;
+}
+EXPORT_SYMBOL(vfs_get_dqblk);
+
+/* Generic routine for setting common part of quota structure */
+static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
+{
+       struct mem_dqblk *dm = &dquot->dq_dqb;
+       int check_blim = 0, check_ilim = 0;
+       struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
+
+       if ((di->dqb_valid & QIF_BLIMITS &&
+            (di->dqb_bhardlimit > dqi->dqi_maxblimit ||
+             di->dqb_bsoftlimit > dqi->dqi_maxblimit)) ||
+           (di->dqb_valid & QIF_ILIMITS &&
+            (di->dqb_ihardlimit > dqi->dqi_maxilimit ||
+             di->dqb_isoftlimit > dqi->dqi_maxilimit)))
+               return -ERANGE;
+
+       spin_lock(&dq_data_lock);
+       if (di->dqb_valid & QIF_SPACE) {
+               dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
+               check_blim = 1;
+               __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
+       }
+       if (di->dqb_valid & QIF_BLIMITS) {
+               dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
+               dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
+               check_blim = 1;
+               __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
+       }
+       if (di->dqb_valid & QIF_INODES) {
+               dm->dqb_curinodes = di->dqb_curinodes;
+               check_ilim = 1;
+               __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
+       }
+       if (di->dqb_valid & QIF_ILIMITS) {
+               dm->dqb_isoftlimit = di->dqb_isoftlimit;
+               dm->dqb_ihardlimit = di->dqb_ihardlimit;
+               check_ilim = 1;
+               __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
+       }
+       if (di->dqb_valid & QIF_BTIME) {
+               dm->dqb_btime = di->dqb_btime;
+               check_blim = 1;
+               __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
+       }
+       if (di->dqb_valid & QIF_ITIME) {
+               dm->dqb_itime = di->dqb_itime;
+               check_ilim = 1;
+               __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
+       }
+
+       if (check_blim) {
+               if (!dm->dqb_bsoftlimit ||
+                   dm->dqb_curspace < dm->dqb_bsoftlimit) {
+                       dm->dqb_btime = 0;
+                       clear_bit(DQ_BLKS_B, &dquot->dq_flags);
+               } else if (!(di->dqb_valid & QIF_BTIME))
+                       /* Set grace only if user hasn't provided his own... */
+                       dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
+       }
+       if (check_ilim) {
+               if (!dm->dqb_isoftlimit ||
+                   dm->dqb_curinodes < dm->dqb_isoftlimit) {
+                       dm->dqb_itime = 0;
+                       clear_bit(DQ_INODES_B, &dquot->dq_flags);
+               } else if (!(di->dqb_valid & QIF_ITIME))
+                       /* Set grace only if user hasn't provided his own... */
+                       dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
+       }
+       if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
+           dm->dqb_isoftlimit)
+               clear_bit(DQ_FAKE_B, &dquot->dq_flags);
+       else
+               set_bit(DQ_FAKE_B, &dquot->dq_flags);
+       spin_unlock(&dq_data_lock);
+       mark_dquot_dirty(dquot);
+
+       return 0;
+}
+
+int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
+                 struct if_dqblk *di)
+{
+       struct dquot *dquot;
+       int rc;
+
+       dquot = dqget(sb, id, type);
+       if (!dquot) {
+               rc = -ESRCH;
+               goto out;
+       }
+       rc = do_set_dqblk(dquot, di);
+       dqput(dquot);
+out:
+       return rc;
+}
+EXPORT_SYMBOL(vfs_set_dqblk);
+
+/* Generic routine for getting common part of quota file information */
+int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
+{
+       struct mem_dqinfo *mi;
+  
+       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+       if (!sb_has_quota_active(sb, type)) {
+               mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+               return -ESRCH;
+       }
+       mi = sb_dqopt(sb)->info + type;
+       spin_lock(&dq_data_lock);
+       ii->dqi_bgrace = mi->dqi_bgrace;
+       ii->dqi_igrace = mi->dqi_igrace;
+       ii->dqi_flags = mi->dqi_flags & DQF_MASK;
+       ii->dqi_valid = IIF_ALL;
+       spin_unlock(&dq_data_lock);
+       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+       return 0;
+}
+EXPORT_SYMBOL(vfs_get_dqinfo);
+
+/* Generic routine for setting common part of quota file information */
+int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
+{
+       struct mem_dqinfo *mi;
+       int err = 0;
+
+       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+       if (!sb_has_quota_active(sb, type)) {
+               err = -ESRCH;
+               goto out;
+       }
+       mi = sb_dqopt(sb)->info + type;
+       spin_lock(&dq_data_lock);
+       if (ii->dqi_valid & IIF_BGRACE)
+               mi->dqi_bgrace = ii->dqi_bgrace;
+       if (ii->dqi_valid & IIF_IGRACE)
+               mi->dqi_igrace = ii->dqi_igrace;
+       if (ii->dqi_valid & IIF_FLAGS)
+               mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
+                               (ii->dqi_flags & DQF_MASK);
+       spin_unlock(&dq_data_lock);
+       mark_info_dirty(sb, type);
+       /* Force write to disk */
+       sb->dq_op->write_info(sb, type);
+out:
+       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+       return err;
+}
+EXPORT_SYMBOL(vfs_set_dqinfo);
+
+struct quotactl_ops vfs_quotactl_ops = {
+       .quota_on       = vfs_quota_on,
+       .quota_off      = vfs_quota_off,
+       .quota_sync     = vfs_quota_sync,
+       .get_info       = vfs_get_dqinfo,
+       .set_info       = vfs_set_dqinfo,
+       .get_dqblk      = vfs_get_dqblk,
+       .set_dqblk      = vfs_set_dqblk
+};
+
+static ctl_table fs_dqstats_table[] = {
+       {
+               .ctl_name       = FS_DQ_LOOKUPS,
+               .procname       = "lookups",
+               .data           = &dqstats.lookups,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = &proc_dointvec,
+       },
+       {
+               .ctl_name       = FS_DQ_DROPS,
+               .procname       = "drops",
+               .data           = &dqstats.drops,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = &proc_dointvec,
+       },
+       {
+               .ctl_name       = FS_DQ_READS,
+               .procname       = "reads",
+               .data           = &dqstats.reads,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = &proc_dointvec,
+       },
+       {
+               .ctl_name       = FS_DQ_WRITES,
+               .procname       = "writes",
+               .data           = &dqstats.writes,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = &proc_dointvec,
+       },
+       {
+               .ctl_name       = FS_DQ_CACHE_HITS,
+               .procname       = "cache_hits",
+               .data           = &dqstats.cache_hits,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = &proc_dointvec,
+       },
+       {
+               .ctl_name       = FS_DQ_ALLOCATED,
+               .procname       = "allocated_dquots",
+               .data           = &dqstats.allocated_dquots,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = &proc_dointvec,
+       },
+       {
+               .ctl_name       = FS_DQ_FREE,
+               .procname       = "free_dquots",
+               .data           = &dqstats.free_dquots,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = &proc_dointvec,
+       },
+       {
+               .ctl_name       = FS_DQ_SYNCS,
+               .procname       = "syncs",
+               .data           = &dqstats.syncs,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = &proc_dointvec,
+       },
+#ifdef CONFIG_PRINT_QUOTA_WARNING
+       {
+               .ctl_name       = FS_DQ_WARNINGS,
+               .procname       = "warnings",
+               .data           = &flag_print_warnings,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec,
+       },
+#endif
+       { .ctl_name = 0 },
+};
+
+static ctl_table fs_table[] = {
+       {
+               .ctl_name       = FS_DQSTATS,
+               .procname       = "quota",
+               .mode           = 0555,
+               .child          = fs_dqstats_table,
+       },
+       { .ctl_name = 0 },
+};
+
+static ctl_table sys_table[] = {
+       {
+               .ctl_name       = CTL_FS,
+               .procname       = "fs",
+               .mode           = 0555,
+               .child          = fs_table,
+       },
+       { .ctl_name = 0 },
+};
+
+static int __init dquot_init(void)
+{
+       int i;
+       unsigned long nr_hash, order;
+
+       printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
+
+       register_sysctl_table(sys_table);
+
+       dquot_cachep = kmem_cache_create("dquot",
+                       sizeof(struct dquot), sizeof(unsigned long) * 4,
+                       (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
+                               SLAB_MEM_SPREAD|SLAB_PANIC),
+                       NULL);
+
+       order = 0;
+       dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
+       if (!dquot_hash)
+               panic("Cannot create dquot hash table");
+
+       /* Find power-of-two hlist_heads which can fit into allocation */
+       nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
+       dq_hash_bits = 0;
+       do {
+               dq_hash_bits++;
+       } while (nr_hash >> dq_hash_bits);
+       dq_hash_bits--;
+
+       nr_hash = 1UL << dq_hash_bits;
+       dq_hash_mask = nr_hash - 1;
+       for (i = 0; i < nr_hash; i++)
+               INIT_HLIST_HEAD(dquot_hash + i);
+
+       printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
+                       nr_hash, order, (PAGE_SIZE << order));
+
+       register_shrinker(&dqcache_shrinker);
+
+#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
+       if (genl_register_family(&quota_genl_family) != 0)
+               printk(KERN_ERR
+                      "VFS: Failed to create quota netlink interface.\n");
+#endif
+
+       return 0;
+}
+module_init(dquot_init);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
new file mode 100644 (file)
index 0000000..b7f5a46
--- /dev/null
@@ -0,0 +1,524 @@
+/*
+ * Quota code necessary even when VFS quota support is not compiled
+ * into the kernel.  The interesting stuff is over in dquot.c, here
+ * we have symbols for initial quotactl(2) handling, the sysctl(2)
+ * variables, etc - things needed even when quota support disabled.
+ */
+
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <asm/current.h>
+#include <asm/uaccess.h>
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/buffer_head.h>
+#include <linux/capability.h>
+#include <linux/quotaops.h>
+#include <linux/types.h>
+
+/* Check validity of generic quotactl commands */
+static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
+                                 qid_t id)
+{
+       if (type >= MAXQUOTAS)
+               return -EINVAL;
+       if (!sb && cmd != Q_SYNC)
+               return -ENODEV;
+       /* Is operation supported? */
+       if (sb && !sb->s_qcop)
+               return -ENOSYS;
+
+       switch (cmd) {
+               case Q_GETFMT:
+                       break;
+               case Q_QUOTAON:
+                       if (!sb->s_qcop->quota_on)
+                               return -ENOSYS;
+                       break;
+               case Q_QUOTAOFF:
+                       if (!sb->s_qcop->quota_off)
+                               return -ENOSYS;
+                       break;
+               case Q_SETINFO:
+                       if (!sb->s_qcop->set_info)
+                               return -ENOSYS;
+                       break;
+               case Q_GETINFO:
+                       if (!sb->s_qcop->get_info)
+                               return -ENOSYS;
+                       break;
+               case Q_SETQUOTA:
+                       if (!sb->s_qcop->set_dqblk)
+                               return -ENOSYS;
+                       break;
+               case Q_GETQUOTA:
+                       if (!sb->s_qcop->get_dqblk)
+                               return -ENOSYS;
+                       break;
+               case Q_SYNC:
+                       if (sb && !sb->s_qcop->quota_sync)
+                               return -ENOSYS;
+                       break;
+               default:
+                       return -EINVAL;
+       }
+
+       /* Is quota turned on for commands which need it? */
+       switch (cmd) {
+               case Q_GETFMT:
+               case Q_GETINFO:
+               case Q_SETINFO:
+               case Q_SETQUOTA:
+               case Q_GETQUOTA:
+                       /* This is just an informative test so we are satisfied
+                        * without the lock */
+                       if (!sb_has_quota_active(sb, type))
+                               return -ESRCH;
+       }
+
+       /* Check privileges */
+       if (cmd == Q_GETQUOTA) {
+               if (((type == USRQUOTA && current_euid() != id) ||
+                    (type == GRPQUOTA && !in_egroup_p(id))) &&
+                   !capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+       }
+       else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO)
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+       return 0;
+}
+
+/* Check validity of XFS Quota Manager commands */
+static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd,
+                             qid_t id)
+{
+       if (type >= XQM_MAXQUOTAS)
+               return -EINVAL;
+       if (!sb)
+               return -ENODEV;
+       if (!sb->s_qcop)
+               return -ENOSYS;
+
+       switch (cmd) {
+               case Q_XQUOTAON:
+               case Q_XQUOTAOFF:
+               case Q_XQUOTARM:
+                       if (!sb->s_qcop->set_xstate)
+                               return -ENOSYS;
+                       break;
+               case Q_XGETQSTAT:
+                       if (!sb->s_qcop->get_xstate)
+                               return -ENOSYS;
+                       break;
+               case Q_XSETQLIM:
+                       if (!sb->s_qcop->set_xquota)
+                               return -ENOSYS;
+                       break;
+               case Q_XGETQUOTA:
+                       if (!sb->s_qcop->get_xquota)
+                               return -ENOSYS;
+                       break;
+               case Q_XQUOTASYNC:
+                       if (!sb->s_qcop->quota_sync)
+                               return -ENOSYS;
+                       break;
+               default:
+                       return -EINVAL;
+       }
+
+       /* Check privileges */
+       if (cmd == Q_XGETQUOTA) {
+               if (((type == XQM_USRQUOTA && current_euid() != id) ||
+                    (type == XQM_GRPQUOTA && !in_egroup_p(id))) &&
+                    !capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+       } else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) {
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+       }
+
+       return 0;
+}
+
+static int check_quotactl_valid(struct super_block *sb, int type, int cmd,
+                               qid_t id)
+{
+       int error;
+
+       if (XQM_COMMAND(cmd))
+               error = xqm_quotactl_valid(sb, type, cmd, id);
+       else
+               error = generic_quotactl_valid(sb, type, cmd, id);
+       if (!error)
+               error = security_quotactl(cmd, type, id, sb);
+       return error;
+}
+
+static void quota_sync_sb(struct super_block *sb, int type)
+{
+       int cnt;
+
+       sb->s_qcop->quota_sync(sb, type);
+
+       if (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE)
+               return;
+       /* This is not very clever (and fast) but currently I don't know about
+        * any other simple way of getting quota data to disk and we must get
+        * them there for userspace to be visible... */
+       if (sb->s_op->sync_fs)
+               sb->s_op->sync_fs(sb, 1);
+       sync_blockdev(sb->s_bdev);
+
+       /*
+        * Now when everything is written we can discard the pagecache so
+        * that userspace sees the changes.
+        */
+       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (type != -1 && cnt != type)
+                       continue;
+               if (!sb_has_quota_active(sb, cnt))
+                       continue;
+               mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
+                                 I_MUTEX_QUOTA);
+               truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
+               mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
+       }
+       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+}
+
+void sync_dquots(struct super_block *sb, int type)
+{
+       int cnt;
+
+       if (sb) {
+               if (sb->s_qcop->quota_sync)
+                       quota_sync_sb(sb, type);
+               return;
+       }
+
+       spin_lock(&sb_lock);
+restart:
+       list_for_each_entry(sb, &super_blocks, s_list) {
+               /* This test just improves performance so it needn't be
+                * reliable... */
+               for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+                       if (type != -1 && type != cnt)
+                               continue;
+                       if (!sb_has_quota_active(sb, cnt))
+                               continue;
+                       if (!info_dirty(&sb_dqopt(sb)->info[cnt]) &&
+                          list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
+                               continue;
+                       break;
+               }
+               if (cnt == MAXQUOTAS)
+                       continue;
+               sb->s_count++;
+               spin_unlock(&sb_lock);
+               down_read(&sb->s_umount);
+               if (sb->s_root && sb->s_qcop->quota_sync)
+                       quota_sync_sb(sb, type);
+               up_read(&sb->s_umount);
+               spin_lock(&sb_lock);
+               if (__put_super_and_need_restart(sb))
+                       goto restart;
+       }
+       spin_unlock(&sb_lock);
+}
+
+/* Copy parameters and call proper function */
+static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
+                      void __user *addr)
+{
+       int ret;
+
+       switch (cmd) {
+               case Q_QUOTAON: {
+                       char *pathname;
+
+                       pathname = getname(addr);
+                       if (IS_ERR(pathname))
+                               return PTR_ERR(pathname);
+                       ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
+                       putname(pathname);
+                       return ret;
+               }
+               case Q_QUOTAOFF:
+                       return sb->s_qcop->quota_off(sb, type, 0);
+
+               case Q_GETFMT: {
+                       __u32 fmt;
+
+                       down_read(&sb_dqopt(sb)->dqptr_sem);
+                       if (!sb_has_quota_active(sb, type)) {
+                               up_read(&sb_dqopt(sb)->dqptr_sem);
+                               return -ESRCH;
+                       }
+                       fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
+                       up_read(&sb_dqopt(sb)->dqptr_sem);
+                       if (copy_to_user(addr, &fmt, sizeof(fmt)))
+                               return -EFAULT;
+                       return 0;
+               }
+               case Q_GETINFO: {
+                       struct if_dqinfo info;
+
+                       ret = sb->s_qcop->get_info(sb, type, &info);
+                       if (ret)
+                               return ret;
+                       if (copy_to_user(addr, &info, sizeof(info)))
+                               return -EFAULT;
+                       return 0;
+               }
+               case Q_SETINFO: {
+                       struct if_dqinfo info;
+
+                       if (copy_from_user(&info, addr, sizeof(info)))
+                               return -EFAULT;
+                       return sb->s_qcop->set_info(sb, type, &info);
+               }
+               case Q_GETQUOTA: {
+                       struct if_dqblk idq;
+
+                       ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
+                       if (ret)
+                               return ret;
+                       if (copy_to_user(addr, &idq, sizeof(idq)))
+                               return -EFAULT;
+                       return 0;
+               }
+               case Q_SETQUOTA: {
+                       struct if_dqblk idq;
+
+                       if (copy_from_user(&idq, addr, sizeof(idq)))
+                               return -EFAULT;
+                       return sb->s_qcop->set_dqblk(sb, type, id, &idq);
+               }
+               case Q_SYNC:
+                       sync_dquots(sb, type);
+                       return 0;
+
+               case Q_XQUOTAON:
+               case Q_XQUOTAOFF:
+               case Q_XQUOTARM: {
+                       __u32 flags;
+
+                       if (copy_from_user(&flags, addr, sizeof(flags)))
+                               return -EFAULT;
+                       return sb->s_qcop->set_xstate(sb, flags, cmd);
+               }
+               case Q_XGETQSTAT: {
+                       struct fs_quota_stat fqs;
+               
+                       if ((ret = sb->s_qcop->get_xstate(sb, &fqs)))
+                               return ret;
+                       if (copy_to_user(addr, &fqs, sizeof(fqs)))
+                               return -EFAULT;
+                       return 0;
+               }
+               case Q_XSETQLIM: {
+                       struct fs_disk_quota fdq;
+
+                       if (copy_from_user(&fdq, addr, sizeof(fdq)))
+                               return -EFAULT;
+                      return sb->s_qcop->set_xquota(sb, type, id, &fdq);
+               }
+               case Q_XGETQUOTA: {
+                       struct fs_disk_quota fdq;
+
+                       ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
+                       if (ret)
+                               return ret;
+                       if (copy_to_user(addr, &fdq, sizeof(fdq)))
+                               return -EFAULT;
+                       return 0;
+               }
+               case Q_XQUOTASYNC:
+                       return sb->s_qcop->quota_sync(sb, type);
+               /* We never reach here unless validity check is broken */
+               default:
+                       BUG();
+       }
+       return 0;
+}
+
+/*
+ * look up a superblock on which quota ops will be performed
+ * - use the name of a block device to find the superblock thereon
+ */
+static struct super_block *quotactl_block(const char __user *special)
+{
+#ifdef CONFIG_BLOCK
+       struct block_device *bdev;
+       struct super_block *sb;
+       char *tmp = getname(special);
+
+       if (IS_ERR(tmp))
+               return ERR_CAST(tmp);
+       bdev = lookup_bdev(tmp);
+       putname(tmp);
+       if (IS_ERR(bdev))
+               return ERR_CAST(bdev);
+       sb = get_super(bdev);
+       bdput(bdev);
+       if (!sb)
+               return ERR_PTR(-ENODEV);
+
+       return sb;
+#else
+       return ERR_PTR(-ENODEV);
+#endif
+}
+
+/*
+ * This is the system call interface. This communicates with
+ * the user-level programs. Currently this only supports diskquota
+ * calls. Maybe we need to add the process quotas etc. in the future,
+ * but we probably should use rlimits for that.
+ */
+SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
+               qid_t, id, void __user *, addr)
+{
+       uint cmds, type;
+       struct super_block *sb = NULL;
+       int ret;
+
+       cmds = cmd >> SUBCMDSHIFT;
+       type = cmd & SUBCMDMASK;
+
+       if (cmds != Q_SYNC || special) {
+               sb = quotactl_block(special);
+               if (IS_ERR(sb))
+                       return PTR_ERR(sb);
+       }
+
+       ret = check_quotactl_valid(sb, type, cmds, id);
+       if (ret >= 0)
+               ret = do_quotactl(sb, type, cmds, id, addr);
+       if (sb)
+               drop_super(sb);
+
+       return ret;
+}
+
+#if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT)
+/*
+ * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64)
+ * and is necessary due to alignment problems.
+ */
+struct compat_if_dqblk {
+       compat_u64 dqb_bhardlimit;
+       compat_u64 dqb_bsoftlimit;
+       compat_u64 dqb_curspace;
+       compat_u64 dqb_ihardlimit;
+       compat_u64 dqb_isoftlimit;
+       compat_u64 dqb_curinodes;
+       compat_u64 dqb_btime;
+       compat_u64 dqb_itime;
+       compat_uint_t dqb_valid;
+};
+
+/* XFS structures */
+struct compat_fs_qfilestat {
+       compat_u64 dqb_bhardlimit;
+       compat_u64 qfs_nblks;
+       compat_uint_t qfs_nextents;
+};
+
+struct compat_fs_quota_stat {
+       __s8            qs_version;
+       __u16           qs_flags;
+       __s8            qs_pad;
+       struct compat_fs_qfilestat      qs_uquota;
+       struct compat_fs_qfilestat      qs_gquota;
+       compat_uint_t   qs_incoredqs;
+       compat_int_t    qs_btimelimit;
+       compat_int_t    qs_itimelimit;
+       compat_int_t    qs_rtbtimelimit;
+       __u16           qs_bwarnlimit;
+       __u16           qs_iwarnlimit;
+};
+
+asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
+                                               qid_t id, void __user *addr)
+{
+       unsigned int cmds;
+       struct if_dqblk __user *dqblk;
+       struct compat_if_dqblk __user *compat_dqblk;
+       struct fs_quota_stat __user *fsqstat;
+       struct compat_fs_quota_stat __user *compat_fsqstat;
+       compat_uint_t data;
+       u16 xdata;
+       long ret;
+
+       cmds = cmd >> SUBCMDSHIFT;
+
+       switch (cmds) {
+       case Q_GETQUOTA:
+               dqblk = compat_alloc_user_space(sizeof(struct if_dqblk));
+               compat_dqblk = addr;
+               ret = sys_quotactl(cmd, special, id, dqblk);
+               if (ret)
+                       break;
+               if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) ||
+                       get_user(data, &dqblk->dqb_valid) ||
+                       put_user(data, &compat_dqblk->dqb_valid))
+                       ret = -EFAULT;
+               break;
+       case Q_SETQUOTA:
+               dqblk = compat_alloc_user_space(sizeof(struct if_dqblk));
+               compat_dqblk = addr;
+               ret = -EFAULT;
+               if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) ||
+                       get_user(data, &compat_dqblk->dqb_valid) ||
+                       put_user(data, &dqblk->dqb_valid))
+                       break;
+               ret = sys_quotactl(cmd, special, id, dqblk);
+               break;
+       case Q_XGETQSTAT:
+               fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat));
+               compat_fsqstat = addr;
+               ret = sys_quotactl(cmd, special, id, fsqstat);
+               if (ret)
+                       break;
+               ret = -EFAULT;
+               /* Copying qs_version, qs_flags, qs_pad */
+               if (copy_in_user(compat_fsqstat, fsqstat,
+                       offsetof(struct compat_fs_quota_stat, qs_uquota)))
+                       break;
+               /* Copying qs_uquota */
+               if (copy_in_user(&compat_fsqstat->qs_uquota,
+                       &fsqstat->qs_uquota,
+                       sizeof(compat_fsqstat->qs_uquota)) ||
+                       get_user(data, &fsqstat->qs_uquota.qfs_nextents) ||
+                       put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents))
+                       break;
+               /* Copying qs_gquota */
+               if (copy_in_user(&compat_fsqstat->qs_gquota,
+                       &fsqstat->qs_gquota,
+                       sizeof(compat_fsqstat->qs_gquota)) ||
+                       get_user(data, &fsqstat->qs_gquota.qfs_nextents) ||
+                       put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents))
+                       break;
+               /* Copying the rest */
+               if (copy_in_user(&compat_fsqstat->qs_incoredqs,
+                       &fsqstat->qs_incoredqs,
+                       sizeof(struct compat_fs_quota_stat) -
+                       offsetof(struct compat_fs_quota_stat, qs_incoredqs)) ||
+                       get_user(xdata, &fsqstat->qs_iwarnlimit) ||
+                       put_user(xdata, &compat_fsqstat->qs_iwarnlimit))
+                       break;
+               ret = 0;
+               break;
+       default:
+               ret = sys_quotactl(cmd, special, id, addr);
+       }
+       return ret;
+}
+#endif
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
new file mode 100644 (file)
index 0000000..f81f4bc
--- /dev/null
@@ -0,0 +1,651 @@
+/*
+ *     vfsv0 quota IO operations on file
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/dqblk_v2.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/quotaops.h>
+
+#include <asm/byteorder.h>
+
+#include "quota_tree.h"
+
+MODULE_AUTHOR("Jan Kara");
+MODULE_DESCRIPTION("Quota trie support");
+MODULE_LICENSE("GPL");
+
+#define __QUOTA_QT_PARANOIA
+
+static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
+{
+       unsigned int epb = info->dqi_usable_bs >> 2;
+
+       depth = info->dqi_qtree_depth - depth - 1;
+       while (depth--)
+               id /= epb;
+       return id % epb;
+}
+
+/* Number of entries in one blocks */
+static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
+{
+       return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
+              / info->dqi_entry_size;
+}
+
+static char *getdqbuf(size_t size)
+{
+       char *buf = kmalloc(size, GFP_NOFS);
+       if (!buf)
+               printk(KERN_WARNING
+                      "VFS: Not enough memory for quota buffers.\n");
+       return buf;
+}
+
+static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
+{
+       struct super_block *sb = info->dqi_sb;
+
+       memset(buf, 0, info->dqi_usable_bs);
+       return sb->s_op->quota_read(sb, info->dqi_type, buf,
+              info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
+}
+
+static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
+{
+       struct super_block *sb = info->dqi_sb;
+
+       return sb->s_op->quota_write(sb, info->dqi_type, buf,
+              info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
+}
+
+/* Remove empty block from list and return it */
+static int get_free_dqblk(struct qtree_mem_dqinfo *info)
+{
+       char *buf = getdqbuf(info->dqi_usable_bs);
+       struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
+       int ret, blk;
+
+       if (!buf)
+               return -ENOMEM;
+       if (info->dqi_free_blk) {
+               blk = info->dqi_free_blk;
+               ret = read_blk(info, blk, buf);
+               if (ret < 0)
+                       goto out_buf;
+               info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
+       }
+       else {
+               memset(buf, 0, info->dqi_usable_bs);
+               /* Assure block allocation... */
+               ret = write_blk(info, info->dqi_blocks, buf);
+               if (ret < 0)
+                       goto out_buf;
+               blk = info->dqi_blocks++;
+       }
+       mark_info_dirty(info->dqi_sb, info->dqi_type);
+       ret = blk;
+out_buf:
+       kfree(buf);
+       return ret;
+}
+
+/* Insert empty block to the list */
+static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
+{
+       struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
+       int err;
+
+       dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
+       dh->dqdh_prev_free = cpu_to_le32(0);
+       dh->dqdh_entries = cpu_to_le16(0);
+       err = write_blk(info, blk, buf);
+       if (err < 0)
+               return err;
+       info->dqi_free_blk = blk;
+       mark_info_dirty(info->dqi_sb, info->dqi_type);
+       return 0;
+}
+
+/* Remove given block from the list of blocks with free entries */
+static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
+                              uint blk)
+{
+       char *tmpbuf = getdqbuf(info->dqi_usable_bs);
+       struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
+       uint nextblk = le32_to_cpu(dh->dqdh_next_free);
+       uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
+       int err;
+
+       if (!tmpbuf)
+               return -ENOMEM;
+       if (nextblk) {
+               err = read_blk(info, nextblk, tmpbuf);
+               if (err < 0)
+                       goto out_buf;
+               ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
+                                                       dh->dqdh_prev_free;
+               err = write_blk(info, nextblk, tmpbuf);
+               if (err < 0)
+                       goto out_buf;
+       }
+       if (prevblk) {
+               err = read_blk(info, prevblk, tmpbuf);
+               if (err < 0)
+                       goto out_buf;
+               ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
+                                                       dh->dqdh_next_free;
+               err = write_blk(info, prevblk, tmpbuf);
+               if (err < 0)
+                       goto out_buf;
+       } else {
+               info->dqi_free_entry = nextblk;
+               mark_info_dirty(info->dqi_sb, info->dqi_type);
+       }
+       kfree(tmpbuf);
+       dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
+       /* No matter whether write succeeds block is out of list */
+       if (write_blk(info, blk, buf) < 0)
+               printk(KERN_ERR
+                      "VFS: Can't write block (%u) with free entries.\n",
+                      blk);
+       return 0;
+out_buf:
+       kfree(tmpbuf);
+       return err;
+}
+
+/* Insert given block to the beginning of list with free entries */
+static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
+                              uint blk)
+{
+       char *tmpbuf = getdqbuf(info->dqi_usable_bs);
+       struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
+       int err;
+
+       if (!tmpbuf)
+               return -ENOMEM;
+       dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
+       dh->dqdh_prev_free = cpu_to_le32(0);
+       err = write_blk(info, blk, buf);
+       if (err < 0)
+               goto out_buf;
+       if (info->dqi_free_entry) {
+               err = read_blk(info, info->dqi_free_entry, tmpbuf);
+               if (err < 0)
+                       goto out_buf;
+               ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
+                                                       cpu_to_le32(blk);
+               err = write_blk(info, info->dqi_free_entry, tmpbuf);
+               if (err < 0)
+                       goto out_buf;
+       }
+       kfree(tmpbuf);
+       info->dqi_free_entry = blk;
+       mark_info_dirty(info->dqi_sb, info->dqi_type);
+       return 0;
+out_buf:
+       kfree(tmpbuf);
+       return err;
+}
+
+/* Is the entry in the block free? */
+int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
+{
+       int i;
+
+       for (i = 0; i < info->dqi_entry_size; i++)
+               if (disk[i])
+                       return 0;
+       return 1;
+}
+EXPORT_SYMBOL(qtree_entry_unused);
+
+/* Find space for dquot */
+static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
+                             struct dquot *dquot, int *err)
+{
+       uint blk, i;
+       struct qt_disk_dqdbheader *dh;
+       char *buf = getdqbuf(info->dqi_usable_bs);
+       char *ddquot;
+
+       *err = 0;
+       if (!buf) {
+               *err = -ENOMEM;
+               return 0;
+       }
+       dh = (struct qt_disk_dqdbheader *)buf;
+       if (info->dqi_free_entry) {
+               blk = info->dqi_free_entry;
+               *err = read_blk(info, blk, buf);
+               if (*err < 0)
+                       goto out_buf;
+       } else {
+               blk = get_free_dqblk(info);
+               if ((int)blk < 0) {
+                       *err = blk;
+                       kfree(buf);
+                       return 0;
+               }
+               memset(buf, 0, info->dqi_usable_bs);
+               /* This is enough as the block is already zeroed and the entry
+                * list is empty... */
+               info->dqi_free_entry = blk;
+               mark_info_dirty(dquot->dq_sb, dquot->dq_type);
+       }
+       /* Block will be full? */
+       if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
+               *err = remove_free_dqentry(info, buf, blk);
+               if (*err < 0) {
+                       printk(KERN_ERR "VFS: find_free_dqentry(): Can't "
+                              "remove block (%u) from entry free list.\n",
+                              blk);
+                       goto out_buf;
+               }
+       }
+       le16_add_cpu(&dh->dqdh_entries, 1);
+       /* Find free structure in block */
+       ddquot = buf + sizeof(struct qt_disk_dqdbheader);
+       for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
+               if (qtree_entry_unused(info, ddquot))
+                       break;
+               ddquot += info->dqi_entry_size;
+       }
+#ifdef __QUOTA_QT_PARANOIA
+       if (i == qtree_dqstr_in_blk(info)) {
+               printk(KERN_ERR "VFS: find_free_dqentry(): Data block full "
+                               "but it shouldn't.\n");
+               *err = -EIO;
+               goto out_buf;
+       }
+#endif
+       *err = write_blk(info, blk, buf);
+       if (*err < 0) {
+               printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota "
+                               "data block %u.\n", blk);
+               goto out_buf;
+       }
+       dquot->dq_off = (blk << info->dqi_blocksize_bits) +
+                       sizeof(struct qt_disk_dqdbheader) +
+                       i * info->dqi_entry_size;
+       kfree(buf);
+       return blk;
+out_buf:
+       kfree(buf);
+       return 0;
+}
+
+/* Insert reference to structure into the trie */
+static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+                         uint *treeblk, int depth)
+{
+       char *buf = getdqbuf(info->dqi_usable_bs);
+       int ret = 0, newson = 0, newact = 0;
+       __le32 *ref;
+       uint newblk;
+
+       if (!buf)
+               return -ENOMEM;
+       if (!*treeblk) {
+               ret = get_free_dqblk(info);
+               if (ret < 0)
+                       goto out_buf;
+               *treeblk = ret;
+               memset(buf, 0, info->dqi_usable_bs);
+               newact = 1;
+       } else {
+               ret = read_blk(info, *treeblk, buf);
+               if (ret < 0) {
+                       printk(KERN_ERR "VFS: Can't read tree quota block "
+                                       "%u.\n", *treeblk);
+                       goto out_buf;
+               }
+       }
+       ref = (__le32 *)buf;
+       newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
+       if (!newblk)
+               newson = 1;
+       if (depth == info->dqi_qtree_depth - 1) {
+#ifdef __QUOTA_QT_PARANOIA
+               if (newblk) {
+                       printk(KERN_ERR "VFS: Inserting already present quota "
+                                       "entry (block %u).\n",
+                              le32_to_cpu(ref[get_index(info,
+                                               dquot->dq_id, depth)]));
+                       ret = -EIO;
+                       goto out_buf;
+               }
+#endif
+               newblk = find_free_dqentry(info, dquot, &ret);
+       } else {
+               ret = do_insert_tree(info, dquot, &newblk, depth+1);
+       }
+       if (newson && ret >= 0) {
+               ref[get_index(info, dquot->dq_id, depth)] =
+                                                       cpu_to_le32(newblk);
+               ret = write_blk(info, *treeblk, buf);
+       } else if (newact && ret < 0) {
+               put_free_dqblk(info, buf, *treeblk);
+       }
+out_buf:
+       kfree(buf);
+       return ret;
+}
+
+/* Wrapper for inserting quota structure into tree */
+static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
+                                struct dquot *dquot)
+{
+       int tmp = QT_TREEOFF;
+       return do_insert_tree(info, dquot, &tmp, 0);
+}
+
+/*
+ * We don't have to be afraid of deadlocks as we never have quotas on quota
+ * files...
+ */
+int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
+{
+       int type = dquot->dq_type;
+       struct super_block *sb = dquot->dq_sb;
+       ssize_t ret;
+       char *ddquot = getdqbuf(info->dqi_entry_size);
+
+       if (!ddquot)
+               return -ENOMEM;
+
+       /* dq_off is guarded by dqio_mutex */
+       if (!dquot->dq_off) {
+               ret = dq_insert_tree(info, dquot);
+               if (ret < 0) {
+                       printk(KERN_ERR "VFS: Error %zd occurred while "
+                                       "creating quota.\n", ret);
+                       kfree(ddquot);
+                       return ret;
+               }
+       }
+       spin_lock(&dq_data_lock);
+       info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
+       spin_unlock(&dq_data_lock);
+       ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
+                                   dquot->dq_off);
+       if (ret != info->dqi_entry_size) {
+               printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
+                      sb->s_id);
+               if (ret >= 0)
+                       ret = -ENOSPC;
+       } else {
+               ret = 0;
+       }
+       dqstats.writes++;
+       kfree(ddquot);
+
+       return ret;
+}
+EXPORT_SYMBOL(qtree_write_dquot);
+
+/* Free dquot entry in data block */
+static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+                       uint blk)
+{
+       struct qt_disk_dqdbheader *dh;
+       char *buf = getdqbuf(info->dqi_usable_bs);
+       int ret = 0;
+
+       if (!buf)
+               return -ENOMEM;
+       if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
+               printk(KERN_ERR "VFS: Quota structure has offset to other "
+                 "block (%u) than it should (%u).\n", blk,
+                 (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
+               goto out_buf;
+       }
+       ret = read_blk(info, blk, buf);
+       if (ret < 0) {
+               printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
+               goto out_buf;
+       }
+       dh = (struct qt_disk_dqdbheader *)buf;
+       le16_add_cpu(&dh->dqdh_entries, -1);
+       if (!le16_to_cpu(dh->dqdh_entries)) {   /* Block got free? */
+               ret = remove_free_dqentry(info, buf, blk);
+               if (ret >= 0)
+                       ret = put_free_dqblk(info, buf, blk);
+               if (ret < 0) {
+                       printk(KERN_ERR "VFS: Can't move quota data block (%u) "
+                         "to free list.\n", blk);
+                       goto out_buf;
+               }
+       } else {
+               memset(buf +
+                      (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
+                      0, info->dqi_entry_size);
+               if (le16_to_cpu(dh->dqdh_entries) ==
+                   qtree_dqstr_in_blk(info) - 1) {
+                       /* Insert will write block itself */
+                       ret = insert_free_dqentry(info, buf, blk);
+                       if (ret < 0) {
+                               printk(KERN_ERR "VFS: Can't insert quota data "
+                                      "block (%u) to free entry list.\n", blk);
+                               goto out_buf;
+                       }
+               } else {
+                       ret = write_blk(info, blk, buf);
+                       if (ret < 0) {
+                               printk(KERN_ERR "VFS: Can't write quota data "
+                                 "block %u\n", blk);
+                               goto out_buf;
+                       }
+               }
+       }
+       dquot->dq_off = 0;      /* Quota is now unattached */
+out_buf:
+       kfree(buf);
+       return ret;
+}
+
+/* Remove reference to dquot from tree */
+static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+                      uint *blk, int depth)
+{
+       char *buf = getdqbuf(info->dqi_usable_bs);
+       int ret = 0;
+       uint newblk;
+       __le32 *ref = (__le32 *)buf;
+
+       if (!buf)
+               return -ENOMEM;
+       ret = read_blk(info, *blk, buf);
+       if (ret < 0) {
+               printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
+               goto out_buf;
+       }
+       newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
+       if (depth == info->dqi_qtree_depth - 1) {
+               ret = free_dqentry(info, dquot, newblk);
+               newblk = 0;
+       } else {
+               ret = remove_tree(info, dquot, &newblk, depth+1);
+       }
+       if (ret >= 0 && !newblk) {
+               int i;
+               ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
+               /* Block got empty? */
+               for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
+                       ;
+               /* Don't put the root block into the free block list */
+               if (i == (info->dqi_usable_bs >> 2)
+                   && *blk != QT_TREEOFF) {
+                       put_free_dqblk(info, buf, *blk);
+                       *blk = 0;
+               } else {
+                       ret = write_blk(info, *blk, buf);
+                       if (ret < 0)
+                               printk(KERN_ERR "VFS: Can't write quota tree "
+                                 "block %u.\n", *blk);
+               }
+       }
+out_buf:
+       kfree(buf);
+       return ret;
+}
+
+/* Delete dquot from tree */
+int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
+{
+       uint tmp = QT_TREEOFF;
+
+       if (!dquot->dq_off)     /* Even not allocated? */
+               return 0;
+       return remove_tree(info, dquot, &tmp, 0);
+}
+EXPORT_SYMBOL(qtree_delete_dquot);
+
+/* Find entry in block */
+static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
+                                struct dquot *dquot, uint blk)
+{
+       char *buf = getdqbuf(info->dqi_usable_bs);
+       loff_t ret = 0;
+       int i;
+       char *ddquot;
+
+       if (!buf)
+               return -ENOMEM;
+       ret = read_blk(info, blk, buf);
+       if (ret < 0) {
+               printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
+               goto out_buf;
+       }
+       ddquot = buf + sizeof(struct qt_disk_dqdbheader);
+       for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
+               if (info->dqi_ops->is_id(ddquot, dquot))
+                       break;
+               ddquot += info->dqi_entry_size;
+       }
+       if (i == qtree_dqstr_in_blk(info)) {
+               printk(KERN_ERR "VFS: Quota for id %u referenced "
+                 "but not present.\n", dquot->dq_id);
+               ret = -EIO;
+               goto out_buf;
+       } else {
+               ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
+                 qt_disk_dqdbheader) + i * info->dqi_entry_size;
+       }
+out_buf:
+       kfree(buf);
+       return ret;
+}
+
+/* Find entry for given id in the tree */
+static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
+                               struct dquot *dquot, uint blk, int depth)
+{
+       char *buf = getdqbuf(info->dqi_usable_bs);
+       loff_t ret = 0;
+       __le32 *ref = (__le32 *)buf;
+
+       if (!buf)
+               return -ENOMEM;
+       ret = read_blk(info, blk, buf);
+       if (ret < 0) {
+               printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
+               goto out_buf;
+       }
+       ret = 0;
+       blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
+       if (!blk)       /* No reference? */
+               goto out_buf;
+       if (depth < info->dqi_qtree_depth - 1)
+               ret = find_tree_dqentry(info, dquot, blk, depth+1);
+       else
+               ret = find_block_dqentry(info, dquot, blk);
+out_buf:
+       kfree(buf);
+       return ret;
+}
+
+/* Find entry for given id in the tree - wrapper function */
+static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
+                                 struct dquot *dquot)
+{
+       return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
+}
+
+int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
+{
+       int type = dquot->dq_type;
+       struct super_block *sb = dquot->dq_sb;
+       loff_t offset;
+       char *ddquot;
+       int ret = 0;
+
+#ifdef __QUOTA_QT_PARANOIA
+       /* Invalidated quota? */
+       if (!sb_dqopt(dquot->dq_sb)->files[type]) {
+               printk(KERN_ERR "VFS: Quota invalidated while reading!\n");
+               return -EIO;
+       }
+#endif
+       /* Do we know offset of the dquot entry in the quota file? */
+       if (!dquot->dq_off) {
+               offset = find_dqentry(info, dquot);
+               if (offset <= 0) {      /* Entry not present? */
+                       if (offset < 0)
+                               printk(KERN_ERR "VFS: Can't read quota "
+                                 "structure for id %u.\n", dquot->dq_id);
+                       dquot->dq_off = 0;
+                       set_bit(DQ_FAKE_B, &dquot->dq_flags);
+                       memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
+                       ret = offset;
+                       goto out;
+               }
+               dquot->dq_off = offset;
+       }
+       ddquot = getdqbuf(info->dqi_entry_size);
+       if (!ddquot)
+               return -ENOMEM;
+       ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
+                                  dquot->dq_off);
+       if (ret != info->dqi_entry_size) {
+               if (ret >= 0)
+                       ret = -EIO;
+               printk(KERN_ERR "VFS: Error while reading quota "
+                               "structure for id %u.\n", dquot->dq_id);
+               set_bit(DQ_FAKE_B, &dquot->dq_flags);
+               memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
+               kfree(ddquot);
+               goto out;
+       }
+       spin_lock(&dq_data_lock);
+       info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
+       if (!dquot->dq_dqb.dqb_bhardlimit &&
+           !dquot->dq_dqb.dqb_bsoftlimit &&
+           !dquot->dq_dqb.dqb_ihardlimit &&
+           !dquot->dq_dqb.dqb_isoftlimit)
+               set_bit(DQ_FAKE_B, &dquot->dq_flags);
+       spin_unlock(&dq_data_lock);
+       kfree(ddquot);
+out:
+       dqstats.reads++;
+       return ret;
+}
+EXPORT_SYMBOL(qtree_read_dquot);
+
+/* Check whether dquot should not be deleted. We know we are
+ * the only one operating on dquot (thanks to dq_lock) */
+int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
+{
+       if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
+           !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
+               return qtree_delete_dquot(info, dquot);
+       return 0;
+}
+EXPORT_SYMBOL(qtree_release_dquot);
diff --git a/fs/quota/quota_tree.h b/fs/quota/quota_tree.h
new file mode 100644 (file)
index 0000000..a1ab8db
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ *     Definitions of structures for vfsv0 quota format
+ */
+
+#ifndef _LINUX_QUOTA_TREE_H
+#define _LINUX_QUOTA_TREE_H
+
+#include <linux/types.h>
+#include <linux/quota.h>
+
+/*
+ *  Structure of header of block with quota structures. It is padded to 16 bytes so
+ *  there will be space for exactly 21 quota-entries in a block
+ */
+struct qt_disk_dqdbheader {
+       __le32 dqdh_next_free;  /* Number of next block with free entry */
+       __le32 dqdh_prev_free;  /* Number of previous block with free entry */
+       __le16 dqdh_entries;    /* Number of valid entries in block */
+       __le16 dqdh_pad1;
+       __le32 dqdh_pad2;
+};
+
+#define QT_TREEOFF     1               /* Offset of tree in file in blocks */
+
+#endif /* _LINUX_QUOTAIO_TREE_H */
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
new file mode 100644 (file)
index 0000000..0edcf42
--- /dev/null
@@ -0,0 +1,234 @@
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/quota.h>
+#include <linux/quotaops.h>
+#include <linux/dqblk_v1.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/byteorder.h>
+
+#include "quotaio_v1.h"
+
+MODULE_AUTHOR("Jan Kara");
+MODULE_DESCRIPTION("Old quota format support");
+MODULE_LICENSE("GPL");
+
+#define QUOTABLOCK_BITS 10
+#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
+
+static inline qsize_t v1_stoqb(qsize_t space)
+{
+       return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS;
+}
+
+static inline qsize_t v1_qbtos(qsize_t blocks)
+{
+       return blocks << QUOTABLOCK_BITS;
+}
+
+static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d)
+{
+       m->dqb_ihardlimit = d->dqb_ihardlimit;
+       m->dqb_isoftlimit = d->dqb_isoftlimit;
+       m->dqb_curinodes = d->dqb_curinodes;
+       m->dqb_bhardlimit = v1_qbtos(d->dqb_bhardlimit);
+       m->dqb_bsoftlimit = v1_qbtos(d->dqb_bsoftlimit);
+       m->dqb_curspace = v1_qbtos(d->dqb_curblocks);
+       m->dqb_itime = d->dqb_itime;
+       m->dqb_btime = d->dqb_btime;
+}
+
+static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m)
+{
+       d->dqb_ihardlimit = m->dqb_ihardlimit;
+       d->dqb_isoftlimit = m->dqb_isoftlimit;
+       d->dqb_curinodes = m->dqb_curinodes;
+       d->dqb_bhardlimit = v1_stoqb(m->dqb_bhardlimit);
+       d->dqb_bsoftlimit = v1_stoqb(m->dqb_bsoftlimit);
+       d->dqb_curblocks = v1_stoqb(m->dqb_curspace);
+       d->dqb_itime = m->dqb_itime;
+       d->dqb_btime = m->dqb_btime;
+}
+
+static int v1_read_dqblk(struct dquot *dquot)
+{
+       int type = dquot->dq_type;
+       struct v1_disk_dqblk dqblk;
+
+       if (!sb_dqopt(dquot->dq_sb)->files[type])
+               return -EINVAL;
+
+       /* Set structure to 0s in case read fails/is after end of file */
+       memset(&dqblk, 0, sizeof(struct v1_disk_dqblk));
+       dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk,
+                       sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
+
+       v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk);
+       if (dquot->dq_dqb.dqb_bhardlimit == 0 &&
+           dquot->dq_dqb.dqb_bsoftlimit == 0 &&
+           dquot->dq_dqb.dqb_ihardlimit == 0 &&
+           dquot->dq_dqb.dqb_isoftlimit == 0)
+               set_bit(DQ_FAKE_B, &dquot->dq_flags);
+       dqstats.reads++;
+
+       return 0;
+}
+
+static int v1_commit_dqblk(struct dquot *dquot)
+{
+       short type = dquot->dq_type;
+       ssize_t ret;
+       struct v1_disk_dqblk dqblk;
+
+       v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
+       if (dquot->dq_id == 0) {
+               dqblk.dqb_btime =
+                       sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
+               dqblk.dqb_itime =
+                       sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
+       }
+       ret = 0;
+       if (sb_dqopt(dquot->dq_sb)->files[type])
+               ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type,
+                       (char *)&dqblk, sizeof(struct v1_disk_dqblk),
+                       v1_dqoff(dquot->dq_id));
+       if (ret != sizeof(struct v1_disk_dqblk)) {
+               printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
+                       dquot->dq_sb->s_id);
+               if (ret >= 0)
+                       ret = -EIO;
+               goto out;
+       }
+       ret = 0;
+
+out:
+       dqstats.writes++;
+
+       return ret;
+}
+
+/* Magics of new quota format */
+#define V2_INITQMAGICS {\
+       0xd9c01f11,     /* USRQUOTA */\
+       0xd9c01927      /* GRPQUOTA */\
+}
+
+/* Header of new quota format */
+struct v2_disk_dqheader {
+       __le32 dqh_magic;        /* Magic number identifying file */
+       __le32 dqh_version;      /* File version */
+};
+
+static int v1_check_quota_file(struct super_block *sb, int type)
+{
+       struct inode *inode = sb_dqopt(sb)->files[type];
+       ulong blocks;
+       size_t off; 
+       struct v2_disk_dqheader dqhead;
+       ssize_t size;
+       loff_t isize;
+       static const uint quota_magics[] = V2_INITQMAGICS;
+
+       isize = i_size_read(inode);
+       if (!isize)
+               return 0;
+       blocks = isize >> BLOCK_SIZE_BITS;
+       off = isize & (BLOCK_SIZE - 1);
+       if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) %
+           sizeof(struct v1_disk_dqblk))
+               return 0;
+       /* Doublecheck whether we didn't get file with new format - with old
+        * quotactl() this could happen */
+       size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
+                                   sizeof(struct v2_disk_dqheader), 0);
+       if (size != sizeof(struct v2_disk_dqheader))
+               return 1;       /* Probably not new format */
+       if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type])
+               return 1;       /* Definitely not new format */
+       printk(KERN_INFO
+              "VFS: %s: Refusing to turn on old quota format on given file."
+              " It probably contains newer quota format.\n", sb->s_id);
+        return 0;              /* Seems like a new format file -> refuse it */
+}
+
+static int v1_read_file_info(struct super_block *sb, int type)
+{
+       struct quota_info *dqopt = sb_dqopt(sb);
+       struct v1_disk_dqblk dqblk;
+       int ret;
+
+       ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
+                               sizeof(struct v1_disk_dqblk), v1_dqoff(0));
+       if (ret != sizeof(struct v1_disk_dqblk)) {
+               if (ret >= 0)
+                       ret = -EIO;
+               goto out;
+       }
+       ret = 0;
+       /* limits are stored as unsigned 32-bit data */
+       dqopt->info[type].dqi_maxblimit = 0xffffffff;
+       dqopt->info[type].dqi_maxilimit = 0xffffffff;
+       dqopt->info[type].dqi_igrace =
+                       dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
+       dqopt->info[type].dqi_bgrace =
+                       dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
+out:
+       return ret;
+}
+
+static int v1_write_file_info(struct super_block *sb, int type)
+{
+       struct quota_info *dqopt = sb_dqopt(sb);
+       struct v1_disk_dqblk dqblk;
+       int ret;
+
+       dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY;
+       ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
+                               sizeof(struct v1_disk_dqblk), v1_dqoff(0));
+       if (ret != sizeof(struct v1_disk_dqblk)) {
+               if (ret >= 0)
+                       ret = -EIO;
+               goto out;
+       }
+       dqblk.dqb_itime = dqopt->info[type].dqi_igrace;
+       dqblk.dqb_btime = dqopt->info[type].dqi_bgrace;
+       ret = sb->s_op->quota_write(sb, type, (char *)&dqblk,
+             sizeof(struct v1_disk_dqblk), v1_dqoff(0));
+       if (ret == sizeof(struct v1_disk_dqblk))
+               ret = 0;
+       else if (ret > 0)
+               ret = -EIO;
+out:
+       return ret;
+}
+
+static struct quota_format_ops v1_format_ops = {
+       .check_quota_file       = v1_check_quota_file,
+       .read_file_info         = v1_read_file_info,
+       .write_file_info        = v1_write_file_info,
+       .free_file_info         = NULL,
+       .read_dqblk             = v1_read_dqblk,
+       .commit_dqblk           = v1_commit_dqblk,
+};
+
+static struct quota_format_type v1_quota_format = {
+       .qf_fmt_id      = QFMT_VFS_OLD,
+       .qf_ops         = &v1_format_ops,
+       .qf_owner       = THIS_MODULE
+};
+
+static int __init init_v1_quota_format(void)
+{
+        return register_quota_format(&v1_quota_format);
+}
+
+static void __exit exit_v1_quota_format(void)
+{
+        unregister_quota_format(&v1_quota_format);
+}
+
+module_init(init_v1_quota_format);
+module_exit(exit_v1_quota_format);
+
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
new file mode 100644 (file)
index 0000000..a5475fb
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+ *     vfsv0 quota IO operations on file
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/dqblk_v2.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/quotaops.h>
+
+#include <asm/byteorder.h>
+
+#include "quota_tree.h"
+#include "quotaio_v2.h"
+
+MODULE_AUTHOR("Jan Kara");
+MODULE_DESCRIPTION("Quota format v2 support");
+MODULE_LICENSE("GPL");
+
+#define __QUOTA_V2_PARANOIA
+
+static void v2_mem2diskdqb(void *dp, struct dquot *dquot);
+static void v2_disk2memdqb(struct dquot *dquot, void *dp);
+static int v2_is_id(void *dp, struct dquot *dquot);
+
+static struct qtree_fmt_operations v2_qtree_ops = {
+       .mem2disk_dqblk = v2_mem2diskdqb,
+       .disk2mem_dqblk = v2_disk2memdqb,
+       .is_id = v2_is_id,
+};
+
+#define QUOTABLOCK_BITS 10
+#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
+
+static inline qsize_t v2_stoqb(qsize_t space)
+{
+       return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS;
+}
+
+static inline qsize_t v2_qbtos(qsize_t blocks)
+{
+       return blocks << QUOTABLOCK_BITS;
+}
+
+/* Check whether given file is really vfsv0 quotafile */
+static int v2_check_quota_file(struct super_block *sb, int type)
+{
+       struct v2_disk_dqheader dqhead;
+       ssize_t size;
+       static const uint quota_magics[] = V2_INITQMAGICS;
+       static const uint quota_versions[] = V2_INITQVERSIONS;
+       size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
+                                   sizeof(struct v2_disk_dqheader), 0);
+       if (size != sizeof(struct v2_disk_dqheader)) {
+               printk("quota_v2: failed read expected=%zd got=%zd\n",
+                       sizeof(struct v2_disk_dqheader), size);
+               return 0;
+       }
+       if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] ||
+           le32_to_cpu(dqhead.dqh_version) != quota_versions[type])
+               return 0;
+       return 1;
+}
+
+/* Read information header from quota file */
+static int v2_read_file_info(struct super_block *sb, int type)
+{
+       struct v2_disk_dqinfo dinfo;
+       struct mem_dqinfo *info = sb_dqinfo(sb, type);
+       struct qtree_mem_dqinfo *qinfo;
+       ssize_t size;
+
+       size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
+              sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
+       if (size != sizeof(struct v2_disk_dqinfo)) {
+               printk(KERN_WARNING "Can't read info structure on device %s.\n",
+                       sb->s_id);
+               return -1;
+       }
+       info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS);
+       if (!info->dqi_priv) {
+               printk(KERN_WARNING
+                      "Not enough memory for quota information structure.\n");
+               return -1;
+       }
+       qinfo = info->dqi_priv;
+       /* limits are stored as unsigned 32-bit data */
+       info->dqi_maxblimit = 0xffffffff;
+       info->dqi_maxilimit = 0xffffffff;
+       info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
+       info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
+       info->dqi_flags = le32_to_cpu(dinfo.dqi_flags);
+       qinfo->dqi_sb = sb;
+       qinfo->dqi_type = type;
+       qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
+       qinfo->dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
+       qinfo->dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
+       qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS;
+       qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS;
+       qinfo->dqi_qtree_depth = qtree_depth(qinfo);
+       qinfo->dqi_entry_size = sizeof(struct v2_disk_dqblk);
+       qinfo->dqi_ops = &v2_qtree_ops;
+       return 0;
+}
+
+/* Write information header to quota file */
+static int v2_write_file_info(struct super_block *sb, int type)
+{
+       struct v2_disk_dqinfo dinfo;
+       struct mem_dqinfo *info = sb_dqinfo(sb, type);
+       struct qtree_mem_dqinfo *qinfo = info->dqi_priv;
+       ssize_t size;
+
+       spin_lock(&dq_data_lock);
+       info->dqi_flags &= ~DQF_INFO_DIRTY;
+       dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
+       dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
+       dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
+       spin_unlock(&dq_data_lock);
+       dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks);
+       dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk);
+       dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry);
+       size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
+              sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
+       if (size != sizeof(struct v2_disk_dqinfo)) {
+               printk(KERN_WARNING "Can't write info structure on device %s.\n",
+                       sb->s_id);
+               return -1;
+       }
+       return 0;
+}
+
+static void v2_disk2memdqb(struct dquot *dquot, void *dp)
+{
+       struct v2_disk_dqblk *d = dp, empty;
+       struct mem_dqblk *m = &dquot->dq_dqb;
+
+       m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit);
+       m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit);
+       m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes);
+       m->dqb_itime = le64_to_cpu(d->dqb_itime);
+       m->dqb_bhardlimit = v2_qbtos(le32_to_cpu(d->dqb_bhardlimit));
+       m->dqb_bsoftlimit = v2_qbtos(le32_to_cpu(d->dqb_bsoftlimit));
+       m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
+       m->dqb_btime = le64_to_cpu(d->dqb_btime);
+       /* We need to escape back all-zero structure */
+       memset(&empty, 0, sizeof(struct v2_disk_dqblk));
+       empty.dqb_itime = cpu_to_le64(1);
+       if (!memcmp(&empty, dp, sizeof(struct v2_disk_dqblk)))
+               m->dqb_itime = 0;
+}
+
+static void v2_mem2diskdqb(void *dp, struct dquot *dquot)
+{
+       struct v2_disk_dqblk *d = dp;
+       struct mem_dqblk *m = &dquot->dq_dqb;
+       struct qtree_mem_dqinfo *info =
+                       sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
+
+       d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit);
+       d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit);
+       d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes);
+       d->dqb_itime = cpu_to_le64(m->dqb_itime);
+       d->dqb_bhardlimit = cpu_to_le32(v2_stoqb(m->dqb_bhardlimit));
+       d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit));
+       d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
+       d->dqb_btime = cpu_to_le64(m->dqb_btime);
+       d->dqb_id = cpu_to_le32(dquot->dq_id);
+       if (qtree_entry_unused(info, dp))
+               d->dqb_itime = cpu_to_le64(1);
+}
+
+static int v2_is_id(void *dp, struct dquot *dquot)
+{
+       struct v2_disk_dqblk *d = dp;
+       struct qtree_mem_dqinfo *info =
+                       sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
+
+       if (qtree_entry_unused(info, dp))
+               return 0;
+       return le32_to_cpu(d->dqb_id) == dquot->dq_id;
+}
+
+static int v2_read_dquot(struct dquot *dquot)
+{
+       return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot);
+}
+
+static int v2_write_dquot(struct dquot *dquot)
+{
+       return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot);
+}
+
+static int v2_release_dquot(struct dquot *dquot)
+{
+       return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot);
+}
+
+static int v2_free_file_info(struct super_block *sb, int type)
+{
+       kfree(sb_dqinfo(sb, type)->dqi_priv);
+       return 0;
+}
+
+static struct quota_format_ops v2_format_ops = {
+       .check_quota_file       = v2_check_quota_file,
+       .read_file_info         = v2_read_file_info,
+       .write_file_info        = v2_write_file_info,
+       .free_file_info         = v2_free_file_info,
+       .read_dqblk             = v2_read_dquot,
+       .commit_dqblk           = v2_write_dquot,
+       .release_dqblk          = v2_release_dquot,
+};
+
+static struct quota_format_type v2_quota_format = {
+       .qf_fmt_id      = QFMT_VFS_V0,
+       .qf_ops         = &v2_format_ops,
+       .qf_owner       = THIS_MODULE
+};
+
+static int __init init_v2_quota_format(void)
+{
+       return register_quota_format(&v2_quota_format);
+}
+
+static void __exit exit_v2_quota_format(void)
+{
+       unregister_quota_format(&v2_quota_format);
+}
+
+module_init(init_v2_quota_format);
+module_exit(exit_v2_quota_format);
diff --git a/fs/quota/quotaio_v1.h b/fs/quota/quotaio_v1.h
new file mode 100644 (file)
index 0000000..746654b
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef _LINUX_QUOTAIO_V1_H
+#define _LINUX_QUOTAIO_V1_H
+
+#include <linux/types.h>
+
+/*
+ * The following constants define the amount of time given a user
+ * before the soft limits are treated as hard limits (usually resulting
+ * in an allocation failure). The timer is started when the user crosses
+ * their soft limit, it is reset when they go below their soft limit.
+ */
+#define MAX_IQ_TIME  604800    /* (7*24*60*60) 1 week */
+#define MAX_DQ_TIME  604800    /* (7*24*60*60) 1 week */
+
+/*
+ * The following structure defines the format of the disk quota file
+ * (as it appears on disk) - the file is an array of these structures
+ * indexed by user or group number.
+ */
+struct v1_disk_dqblk {
+       __u32 dqb_bhardlimit;   /* absolute limit on disk blks alloc */
+       __u32 dqb_bsoftlimit;   /* preferred limit on disk blks */
+       __u32 dqb_curblocks;    /* current block count */
+       __u32 dqb_ihardlimit;   /* absolute limit on allocated inodes */
+       __u32 dqb_isoftlimit;   /* preferred inode limit */
+       __u32 dqb_curinodes;    /* current # allocated inodes */
+       time_t dqb_btime;       /* time limit for excessive disk use */
+       time_t dqb_itime;       /* time limit for excessive inode use */
+};
+
+#define v1_dqoff(UID)      ((loff_t)((UID) * sizeof (struct v1_disk_dqblk)))
+
+#endif /* _LINUX_QUOTAIO_V1_H */
diff --git a/fs/quota/quotaio_v2.h b/fs/quota/quotaio_v2.h
new file mode 100644 (file)
index 0000000..530fe58
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ *     Definitions of structures for vfsv0 quota format
+ */
+
+#ifndef _LINUX_QUOTAIO_V2_H
+#define _LINUX_QUOTAIO_V2_H
+
+#include <linux/types.h>
+#include <linux/quota.h>
+
+/*
+ * Definitions of magics and versions of current quota files
+ */
+#define V2_INITQMAGICS {\
+       0xd9c01f11,     /* USRQUOTA */\
+       0xd9c01927      /* GRPQUOTA */\
+}
+
+#define V2_INITQVERSIONS {\
+       0,              /* USRQUOTA */\
+       0               /* GRPQUOTA */\
+}
+
+/* First generic header */
+struct v2_disk_dqheader {
+       __le32 dqh_magic;       /* Magic number identifying file */
+       __le32 dqh_version;     /* File version */
+};
+
+/*
+ * The following structure defines the format of the disk quota file
+ * (as it appears on disk) - the file is a radix tree whose leaves point
+ * to blocks of these structures.
+ */
+struct v2_disk_dqblk {
+       __le32 dqb_id;          /* id this quota applies to */
+       __le32 dqb_ihardlimit;  /* absolute limit on allocated inodes */
+       __le32 dqb_isoftlimit;  /* preferred inode limit */
+       __le32 dqb_curinodes;   /* current # allocated inodes */
+       __le32 dqb_bhardlimit;  /* absolute limit on disk space (in QUOTABLOCK_SIZE) */
+       __le32 dqb_bsoftlimit;  /* preferred limit on disk space (in QUOTABLOCK_SIZE) */
+       __le64 dqb_curspace;    /* current space occupied (in bytes) */
+       __le64 dqb_btime;       /* time limit for excessive disk use */
+       __le64 dqb_itime;       /* time limit for excessive inode use */
+};
+
+/* Header with type and version specific information */
+struct v2_disk_dqinfo {
+       __le32 dqi_bgrace;      /* Time before block soft limit becomes hard limit */
+       __le32 dqi_igrace;      /* Time before inode soft limit becomes hard limit */
+       __le32 dqi_flags;       /* Flags for quotafile (DQF_*) */
+       __le32 dqi_blocks;      /* Number of blocks in file */
+       __le32 dqi_free_blk;    /* Number of first free block in the list */
+       __le32 dqi_free_entry;  /* Number of block with at least one free entry */
+};
+
+#define V2_DQINFOOFF   sizeof(struct v2_disk_dqheader) /* Offset of info header in file */
+#define V2_DQBLKSIZE_BITS 10                           /* Size of leaf block in tree */
+
+#endif /* _LINUX_QUOTAIO_V2_H */
diff --git a/fs/quota_tree.c b/fs/quota_tree.c
deleted file mode 100644 (file)
index 953404c..0000000
+++ /dev/null
@@ -1,645 +0,0 @@
-/*
- *     vfsv0 quota IO operations on file
- */
-
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/dqblk_v2.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/quotaops.h>
-
-#include <asm/byteorder.h>
-
-#include "quota_tree.h"
-
-MODULE_AUTHOR("Jan Kara");
-MODULE_DESCRIPTION("Quota trie support");
-MODULE_LICENSE("GPL");
-
-#define __QUOTA_QT_PARANOIA
-
-typedef char *dqbuf_t;
-
-static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
-{
-       unsigned int epb = info->dqi_usable_bs >> 2;
-
-       depth = info->dqi_qtree_depth - depth - 1;
-       while (depth--)
-               id /= epb;
-       return id % epb;
-}
-
-/* Number of entries in one blocks */
-static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
-{
-       return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
-              / info->dqi_entry_size;
-}
-
-static dqbuf_t getdqbuf(size_t size)
-{
-       dqbuf_t buf = kmalloc(size, GFP_NOFS);
-       if (!buf)
-               printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n");
-       return buf;
-}
-
-static inline void freedqbuf(dqbuf_t buf)
-{
-       kfree(buf);
-}
-
-static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf)
-{
-       struct super_block *sb = info->dqi_sb;
-
-       memset(buf, 0, info->dqi_usable_bs);
-       return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf,
-              info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
-}
-
-static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf)
-{
-       struct super_block *sb = info->dqi_sb;
-
-       return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf,
-              info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
-}
-
-/* Remove empty block from list and return it */
-static int get_free_dqblk(struct qtree_mem_dqinfo *info)
-{
-       dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
-       struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
-       int ret, blk;
-
-       if (!buf)
-               return -ENOMEM;
-       if (info->dqi_free_blk) {
-               blk = info->dqi_free_blk;
-               ret = read_blk(info, blk, buf);
-               if (ret < 0)
-                       goto out_buf;
-               info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
-       }
-       else {
-               memset(buf, 0, info->dqi_usable_bs);
-               /* Assure block allocation... */
-               ret = write_blk(info, info->dqi_blocks, buf);
-               if (ret < 0)
-                       goto out_buf;
-               blk = info->dqi_blocks++;
-       }
-       mark_info_dirty(info->dqi_sb, info->dqi_type);
-       ret = blk;
-out_buf:
-       freedqbuf(buf);
-       return ret;
-}
-
-/* Insert empty block to the list */
-static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
-{
-       struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
-       int err;
-
-       dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
-       dh->dqdh_prev_free = cpu_to_le32(0);
-       dh->dqdh_entries = cpu_to_le16(0);
-       err = write_blk(info, blk, buf);
-       if (err < 0)
-               return err;
-       info->dqi_free_blk = blk;
-       mark_info_dirty(info->dqi_sb, info->dqi_type);
-       return 0;
-}
-
-/* Remove given block from the list of blocks with free entries */
-static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
-{
-       dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs);
-       struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
-       uint nextblk = le32_to_cpu(dh->dqdh_next_free);
-       uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
-       int err;
-
-       if (!tmpbuf)
-               return -ENOMEM;
-       if (nextblk) {
-               err = read_blk(info, nextblk, tmpbuf);
-               if (err < 0)
-                       goto out_buf;
-               ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
-                                                       dh->dqdh_prev_free;
-               err = write_blk(info, nextblk, tmpbuf);
-               if (err < 0)
-                       goto out_buf;
-       }
-       if (prevblk) {
-               err = read_blk(info, prevblk, tmpbuf);
-               if (err < 0)
-                       goto out_buf;
-               ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
-                                                       dh->dqdh_next_free;
-               err = write_blk(info, prevblk, tmpbuf);
-               if (err < 0)
-                       goto out_buf;
-       } else {
-               info->dqi_free_entry = nextblk;
-               mark_info_dirty(info->dqi_sb, info->dqi_type);
-       }
-       freedqbuf(tmpbuf);
-       dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
-       /* No matter whether write succeeds block is out of list */
-       if (write_blk(info, blk, buf) < 0)
-               printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk);
-       return 0;
-out_buf:
-       freedqbuf(tmpbuf);
-       return err;
-}
-
-/* Insert given block to the beginning of list with free entries */
-static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
-{
-       dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs);
-       struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
-       int err;
-
-       if (!tmpbuf)
-               return -ENOMEM;
-       dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
-       dh->dqdh_prev_free = cpu_to_le32(0);
-       err = write_blk(info, blk, buf);
-       if (err < 0)
-               goto out_buf;
-       if (info->dqi_free_entry) {
-               err = read_blk(info, info->dqi_free_entry, tmpbuf);
-               if (err < 0)
-                       goto out_buf;
-               ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
-                                                       cpu_to_le32(blk);
-               err = write_blk(info, info->dqi_free_entry, tmpbuf);
-               if (err < 0)
-                       goto out_buf;
-       }
-       freedqbuf(tmpbuf);
-       info->dqi_free_entry = blk;
-       mark_info_dirty(info->dqi_sb, info->dqi_type);
-       return 0;
-out_buf:
-       freedqbuf(tmpbuf);
-       return err;
-}
-
-/* Is the entry in the block free? */
-int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
-{
-       int i;
-
-       for (i = 0; i < info->dqi_entry_size; i++)
-               if (disk[i])
-                       return 0;
-       return 1;
-}
-EXPORT_SYMBOL(qtree_entry_unused);
-
-/* Find space for dquot */
-static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
-                             struct dquot *dquot, int *err)
-{
-       uint blk, i;
-       struct qt_disk_dqdbheader *dh;
-       dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
-       char *ddquot;
-
-       *err = 0;
-       if (!buf) {
-               *err = -ENOMEM;
-               return 0;
-       }
-       dh = (struct qt_disk_dqdbheader *)buf;
-       if (info->dqi_free_entry) {
-               blk = info->dqi_free_entry;
-               *err = read_blk(info, blk, buf);
-               if (*err < 0)
-                       goto out_buf;
-       } else {
-               blk = get_free_dqblk(info);
-               if ((int)blk < 0) {
-                       *err = blk;
-                       freedqbuf(buf);
-                       return 0;
-               }
-               memset(buf, 0, info->dqi_usable_bs);
-               /* This is enough as block is already zeroed and entry list is empty... */
-               info->dqi_free_entry = blk;
-               mark_info_dirty(dquot->dq_sb, dquot->dq_type);
-       }
-       /* Block will be full? */
-       if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
-               *err = remove_free_dqentry(info, buf, blk);
-               if (*err < 0) {
-                       printk(KERN_ERR "VFS: find_free_dqentry(): Can't "
-                              "remove block (%u) from entry free list.\n",
-                              blk);
-                       goto out_buf;
-               }
-       }
-       le16_add_cpu(&dh->dqdh_entries, 1);
-       /* Find free structure in block */
-       for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader);
-            i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot);
-            i++, ddquot += info->dqi_entry_size);
-#ifdef __QUOTA_QT_PARANOIA
-       if (i == qtree_dqstr_in_blk(info)) {
-               printk(KERN_ERR "VFS: find_free_dqentry(): Data block full "
-                               "but it shouldn't.\n");
-               *err = -EIO;
-               goto out_buf;
-       }
-#endif
-       *err = write_blk(info, blk, buf);
-       if (*err < 0) {
-               printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota "
-                               "data block %u.\n", blk);
-               goto out_buf;
-       }
-       dquot->dq_off = (blk << info->dqi_blocksize_bits) +
-                       sizeof(struct qt_disk_dqdbheader) +
-                       i * info->dqi_entry_size;
-       freedqbuf(buf);
-       return blk;
-out_buf:
-       freedqbuf(buf);
-       return 0;
-}
-
-/* Insert reference to structure into the trie */
-static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
-                         uint *treeblk, int depth)
-{
-       dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
-       int ret = 0, newson = 0, newact = 0;
-       __le32 *ref;
-       uint newblk;
-
-       if (!buf)
-               return -ENOMEM;
-       if (!*treeblk) {
-               ret = get_free_dqblk(info);
-               if (ret < 0)
-                       goto out_buf;
-               *treeblk = ret;
-               memset(buf, 0, info->dqi_usable_bs);
-               newact = 1;
-       } else {
-               ret = read_blk(info, *treeblk, buf);
-               if (ret < 0) {
-                       printk(KERN_ERR "VFS: Can't read tree quota block "
-                                       "%u.\n", *treeblk);
-                       goto out_buf;
-               }
-       }
-       ref = (__le32 *)buf;
-       newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
-       if (!newblk)
-               newson = 1;
-       if (depth == info->dqi_qtree_depth - 1) {
-#ifdef __QUOTA_QT_PARANOIA
-               if (newblk) {
-                       printk(KERN_ERR "VFS: Inserting already present quota "
-                                       "entry (block %u).\n",
-                              le32_to_cpu(ref[get_index(info,
-                                               dquot->dq_id, depth)]));
-                       ret = -EIO;
-                       goto out_buf;
-               }
-#endif
-               newblk = find_free_dqentry(info, dquot, &ret);
-       } else {
-               ret = do_insert_tree(info, dquot, &newblk, depth+1);
-       }
-       if (newson && ret >= 0) {
-               ref[get_index(info, dquot->dq_id, depth)] =
-                                                       cpu_to_le32(newblk);
-               ret = write_blk(info, *treeblk, buf);
-       } else if (newact && ret < 0) {
-               put_free_dqblk(info, buf, *treeblk);
-       }
-out_buf:
-       freedqbuf(buf);
-       return ret;
-}
-
-/* Wrapper for inserting quota structure into tree */
-static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
-                                struct dquot *dquot)
-{
-       int tmp = QT_TREEOFF;
-       return do_insert_tree(info, dquot, &tmp, 0);
-}
-
-/*
- *     We don't have to be afraid of deadlocks as we never have quotas on quota files...
- */
-int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
-{
-       int type = dquot->dq_type;
-       struct super_block *sb = dquot->dq_sb;
-       ssize_t ret;
-       dqbuf_t ddquot = getdqbuf(info->dqi_entry_size);
-
-       if (!ddquot)
-               return -ENOMEM;
-
-       /* dq_off is guarded by dqio_mutex */
-       if (!dquot->dq_off) {
-               ret = dq_insert_tree(info, dquot);
-               if (ret < 0) {
-                       printk(KERN_ERR "VFS: Error %zd occurred while "
-                                       "creating quota.\n", ret);
-                       freedqbuf(ddquot);
-                       return ret;
-               }
-       }
-       spin_lock(&dq_data_lock);
-       info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
-       spin_unlock(&dq_data_lock);
-       ret = sb->s_op->quota_write(sb, type, (char *)ddquot,
-                                       info->dqi_entry_size, dquot->dq_off);
-       if (ret != info->dqi_entry_size) {
-               printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
-                      sb->s_id);
-               if (ret >= 0)
-                       ret = -ENOSPC;
-       } else {
-               ret = 0;
-       }
-       dqstats.writes++;
-       freedqbuf(ddquot);
-
-       return ret;
-}
-EXPORT_SYMBOL(qtree_write_dquot);
-
-/* Free dquot entry in data block */
-static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
-                       uint blk)
-{
-       struct qt_disk_dqdbheader *dh;
-       dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
-       int ret = 0;
-
-       if (!buf)
-               return -ENOMEM;
-       if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
-               printk(KERN_ERR "VFS: Quota structure has offset to other "
-                 "block (%u) than it should (%u).\n", blk,
-                 (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
-               goto out_buf;
-       }
-       ret = read_blk(info, blk, buf);
-       if (ret < 0) {
-               printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
-               goto out_buf;
-       }
-       dh = (struct qt_disk_dqdbheader *)buf;
-       le16_add_cpu(&dh->dqdh_entries, -1);
-       if (!le16_to_cpu(dh->dqdh_entries)) {   /* Block got free? */
-               ret = remove_free_dqentry(info, buf, blk);
-               if (ret >= 0)
-                       ret = put_free_dqblk(info, buf, blk);
-               if (ret < 0) {
-                       printk(KERN_ERR "VFS: Can't move quota data block (%u) "
-                         "to free list.\n", blk);
-                       goto out_buf;
-               }
-       } else {
-               memset(buf +
-                      (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
-                      0, info->dqi_entry_size);
-               if (le16_to_cpu(dh->dqdh_entries) ==
-                   qtree_dqstr_in_blk(info) - 1) {
-                       /* Insert will write block itself */
-                       ret = insert_free_dqentry(info, buf, blk);
-                       if (ret < 0) {
-                               printk(KERN_ERR "VFS: Can't insert quota data "
-                                      "block (%u) to free entry list.\n", blk);
-                               goto out_buf;
-                       }
-               } else {
-                       ret = write_blk(info, blk, buf);
-                       if (ret < 0) {
-                               printk(KERN_ERR "VFS: Can't write quota data "
-                                 "block %u\n", blk);
-                               goto out_buf;
-                       }
-               }
-       }
-       dquot->dq_off = 0;      /* Quota is now unattached */
-out_buf:
-       freedqbuf(buf);
-       return ret;
-}
-
-/* Remove reference to dquot from tree */
-static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
-                      uint *blk, int depth)
-{
-       dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
-       int ret = 0;
-       uint newblk;
-       __le32 *ref = (__le32 *)buf;
-
-       if (!buf)
-               return -ENOMEM;
-       ret = read_blk(info, *blk, buf);
-       if (ret < 0) {
-               printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
-               goto out_buf;
-       }
-       newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
-       if (depth == info->dqi_qtree_depth - 1) {
-               ret = free_dqentry(info, dquot, newblk);
-               newblk = 0;
-       } else {
-               ret = remove_tree(info, dquot, &newblk, depth+1);
-       }
-       if (ret >= 0 && !newblk) {
-               int i;
-               ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
-               /* Block got empty? */
-               for (i = 0;
-                    i < (info->dqi_usable_bs >> 2) && !ref[i];
-                    i++);
-               /* Don't put the root block into the free block list */
-               if (i == (info->dqi_usable_bs >> 2)
-                   && *blk != QT_TREEOFF) {
-                       put_free_dqblk(info, buf, *blk);
-                       *blk = 0;
-               } else {
-                       ret = write_blk(info, *blk, buf);
-                       if (ret < 0)
-                               printk(KERN_ERR "VFS: Can't write quota tree "
-                                 "block %u.\n", *blk);
-               }
-       }
-out_buf:
-       freedqbuf(buf);
-       return ret;
-}
-
-/* Delete dquot from tree */
-int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
-{
-       uint tmp = QT_TREEOFF;
-
-       if (!dquot->dq_off)     /* Even not allocated? */
-               return 0;
-       return remove_tree(info, dquot, &tmp, 0);
-}
-EXPORT_SYMBOL(qtree_delete_dquot);
-
-/* Find entry in block */
-static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
-                                struct dquot *dquot, uint blk)
-{
-       dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
-       loff_t ret = 0;
-       int i;
-       char *ddquot;
-
-       if (!buf)
-               return -ENOMEM;
-       ret = read_blk(info, blk, buf);
-       if (ret < 0) {
-               printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
-               goto out_buf;
-       }
-       for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader);
-            i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot);
-            i++, ddquot += info->dqi_entry_size);
-       if (i == qtree_dqstr_in_blk(info)) {
-               printk(KERN_ERR "VFS: Quota for id %u referenced "
-                 "but not present.\n", dquot->dq_id);
-               ret = -EIO;
-               goto out_buf;
-       } else {
-               ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
-                 qt_disk_dqdbheader) + i * info->dqi_entry_size;
-       }
-out_buf:
-       freedqbuf(buf);
-       return ret;
-}
-
-/* Find entry for given id in the tree */
-static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
-                               struct dquot *dquot, uint blk, int depth)
-{
-       dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
-       loff_t ret = 0;
-       __le32 *ref = (__le32 *)buf;
-
-       if (!buf)
-               return -ENOMEM;
-       ret = read_blk(info, blk, buf);
-       if (ret < 0) {
-               printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
-               goto out_buf;
-       }
-       ret = 0;
-       blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
-       if (!blk)       /* No reference? */
-               goto out_buf;
-       if (depth < info->dqi_qtree_depth - 1)
-               ret = find_tree_dqentry(info, dquot, blk, depth+1);
-       else
-               ret = find_block_dqentry(info, dquot, blk);
-out_buf:
-       freedqbuf(buf);
-       return ret;
-}
-
-/* Find entry for given id in the tree - wrapper function */
-static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
-                                 struct dquot *dquot)
-{
-       return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
-}
-
-int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
-{
-       int type = dquot->dq_type;
-       struct super_block *sb = dquot->dq_sb;
-       loff_t offset;
-       dqbuf_t ddquot;
-       int ret = 0;
-
-#ifdef __QUOTA_QT_PARANOIA
-       /* Invalidated quota? */
-       if (!sb_dqopt(dquot->dq_sb)->files[type]) {
-               printk(KERN_ERR "VFS: Quota invalidated while reading!\n");
-               return -EIO;
-       }
-#endif
-       /* Do we know offset of the dquot entry in the quota file? */
-       if (!dquot->dq_off) {
-               offset = find_dqentry(info, dquot);
-               if (offset <= 0) {      /* Entry not present? */
-                       if (offset < 0)
-                               printk(KERN_ERR "VFS: Can't read quota "
-                                 "structure for id %u.\n", dquot->dq_id);
-                       dquot->dq_off = 0;
-                       set_bit(DQ_FAKE_B, &dquot->dq_flags);
-                       memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
-                       ret = offset;
-                       goto out;
-               }
-               dquot->dq_off = offset;
-       }
-       ddquot = getdqbuf(info->dqi_entry_size);
-       if (!ddquot)
-               return -ENOMEM;
-       ret = sb->s_op->quota_read(sb, type, (char *)ddquot,
-                                  info->dqi_entry_size, dquot->dq_off);
-       if (ret != info->dqi_entry_size) {
-               if (ret >= 0)
-                       ret = -EIO;
-               printk(KERN_ERR "VFS: Error while reading quota "
-                               "structure for id %u.\n", dquot->dq_id);
-               set_bit(DQ_FAKE_B, &dquot->dq_flags);
-               memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
-               freedqbuf(ddquot);
-               goto out;
-       }
-       spin_lock(&dq_data_lock);
-       info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
-       if (!dquot->dq_dqb.dqb_bhardlimit &&
-           !dquot->dq_dqb.dqb_bsoftlimit &&
-           !dquot->dq_dqb.dqb_ihardlimit &&
-           !dquot->dq_dqb.dqb_isoftlimit)
-               set_bit(DQ_FAKE_B, &dquot->dq_flags);
-       spin_unlock(&dq_data_lock);
-       freedqbuf(ddquot);
-out:
-       dqstats.reads++;
-       return ret;
-}
-EXPORT_SYMBOL(qtree_read_dquot);
-
-/* Check whether dquot should not be deleted. We know we are
- * the only one operating on dquot (thanks to dq_lock) */
-int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
-{
-       if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
-               return qtree_delete_dquot(info, dquot);
-       return 0;
-}
-EXPORT_SYMBOL(qtree_release_dquot);
diff --git a/fs/quota_tree.h b/fs/quota_tree.h
deleted file mode 100644 (file)
index a1ab8db..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- *     Definitions of structures for vfsv0 quota format
- */
-
-#ifndef _LINUX_QUOTA_TREE_H
-#define _LINUX_QUOTA_TREE_H
-
-#include <linux/types.h>
-#include <linux/quota.h>
-
-/*
- *  Structure of header of block with quota structures. It is padded to 16 bytes so
- *  there will be space for exactly 21 quota-entries in a block
- */
-struct qt_disk_dqdbheader {
-       __le32 dqdh_next_free;  /* Number of next block with free entry */
-       __le32 dqdh_prev_free;  /* Number of previous block with free entry */
-       __le16 dqdh_entries;    /* Number of valid entries in block */
-       __le16 dqdh_pad1;
-       __le32 dqdh_pad2;
-};
-
-#define QT_TREEOFF     1               /* Offset of tree in file in blocks */
-
-#endif /* _LINUX_QUOTAIO_TREE_H */
diff --git a/fs/quota_v1.c b/fs/quota_v1.c
deleted file mode 100644 (file)
index b4af1c6..0000000
+++ /dev/null
@@ -1,218 +0,0 @@
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/quota.h>
-#include <linux/quotaops.h>
-#include <linux/dqblk_v1.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include <asm/byteorder.h>
-
-#include "quotaio_v1.h"
-
-MODULE_AUTHOR("Jan Kara");
-MODULE_DESCRIPTION("Old quota format support");
-MODULE_LICENSE("GPL");
-
-#define QUOTABLOCK_BITS 10
-#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
-
-static inline qsize_t v1_stoqb(qsize_t space)
-{
-       return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS;
-}
-
-static inline qsize_t v1_qbtos(qsize_t blocks)
-{
-       return blocks << QUOTABLOCK_BITS;
-}
-
-static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d)
-{
-       m->dqb_ihardlimit = d->dqb_ihardlimit;
-       m->dqb_isoftlimit = d->dqb_isoftlimit;
-       m->dqb_curinodes = d->dqb_curinodes;
-       m->dqb_bhardlimit = v1_qbtos(d->dqb_bhardlimit);
-       m->dqb_bsoftlimit = v1_qbtos(d->dqb_bsoftlimit);
-       m->dqb_curspace = v1_qbtos(d->dqb_curblocks);
-       m->dqb_itime = d->dqb_itime;
-       m->dqb_btime = d->dqb_btime;
-}
-
-static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m)
-{
-       d->dqb_ihardlimit = m->dqb_ihardlimit;
-       d->dqb_isoftlimit = m->dqb_isoftlimit;
-       d->dqb_curinodes = m->dqb_curinodes;
-       d->dqb_bhardlimit = v1_stoqb(m->dqb_bhardlimit);
-       d->dqb_bsoftlimit = v1_stoqb(m->dqb_bsoftlimit);
-       d->dqb_curblocks = v1_stoqb(m->dqb_curspace);
-       d->dqb_itime = m->dqb_itime;
-       d->dqb_btime = m->dqb_btime;
-}
-
-static int v1_read_dqblk(struct dquot *dquot)
-{
-       int type = dquot->dq_type;
-       struct v1_disk_dqblk dqblk;
-
-       if (!sb_dqopt(dquot->dq_sb)->files[type])
-               return -EINVAL;
-
-       /* Set structure to 0s in case read fails/is after end of file */
-       memset(&dqblk, 0, sizeof(struct v1_disk_dqblk));
-       dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
-
-       v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk);
-       if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 &&
-           dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0)
-               set_bit(DQ_FAKE_B, &dquot->dq_flags);
-       dqstats.reads++;
-
-       return 0;
-}
-
-static int v1_commit_dqblk(struct dquot *dquot)
-{
-       short type = dquot->dq_type;
-       ssize_t ret;
-       struct v1_disk_dqblk dqblk;
-
-       v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
-       if (dquot->dq_id == 0) {
-               dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
-               dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
-       }
-       ret = 0;
-       if (sb_dqopt(dquot->dq_sb)->files[type])
-               ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk,
-                                       sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
-       if (ret != sizeof(struct v1_disk_dqblk)) {
-               printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
-                       dquot->dq_sb->s_id);
-               if (ret >= 0)
-                       ret = -EIO;
-               goto out;
-       }
-       ret = 0;
-
-out:
-       dqstats.writes++;
-
-       return ret;
-}
-
-/* Magics of new quota format */
-#define V2_INITQMAGICS {\
-       0xd9c01f11,     /* USRQUOTA */\
-       0xd9c01927      /* GRPQUOTA */\
-}
-
-/* Header of new quota format */
-struct v2_disk_dqheader {
-       __le32 dqh_magic;        /* Magic number identifying file */
-       __le32 dqh_version;      /* File version */
-};
-
-static int v1_check_quota_file(struct super_block *sb, int type)
-{
-       struct inode *inode = sb_dqopt(sb)->files[type];
-       ulong blocks;
-       size_t off; 
-       struct v2_disk_dqheader dqhead;
-       ssize_t size;
-       loff_t isize;
-       static const uint quota_magics[] = V2_INITQMAGICS;
-
-       isize = i_size_read(inode);
-       if (!isize)
-               return 0;
-       blocks = isize >> BLOCK_SIZE_BITS;
-       off = isize & (BLOCK_SIZE - 1);
-       if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk))
-               return 0;
-       /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */
-       size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
-       if (size != sizeof(struct v2_disk_dqheader))
-               return 1;       /* Probably not new format */
-       if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type])
-               return 1;       /* Definitely not new format */
-       printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id);
-        return 0;              /* Seems like a new format file -> refuse it */
-}
-
-static int v1_read_file_info(struct super_block *sb, int type)
-{
-       struct quota_info *dqopt = sb_dqopt(sb);
-       struct v1_disk_dqblk dqblk;
-       int ret;
-
-       if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) {
-               if (ret >= 0)
-                       ret = -EIO;
-               goto out;
-       }
-       ret = 0;
-       /* limits are stored as unsigned 32-bit data */
-       dqopt->info[type].dqi_maxblimit = 0xffffffff;
-       dqopt->info[type].dqi_maxilimit = 0xffffffff;
-       dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
-       dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
-out:
-       return ret;
-}
-
-static int v1_write_file_info(struct super_block *sb, int type)
-{
-       struct quota_info *dqopt = sb_dqopt(sb);
-       struct v1_disk_dqblk dqblk;
-       int ret;
-
-       dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY;
-       if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
-           sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) {
-               if (ret >= 0)
-                       ret = -EIO;
-               goto out;
-       }
-       dqblk.dqb_itime = dqopt->info[type].dqi_igrace;
-       dqblk.dqb_btime = dqopt->info[type].dqi_bgrace;
-       ret = sb->s_op->quota_write(sb, type, (char *)&dqblk,
-             sizeof(struct v1_disk_dqblk), v1_dqoff(0));
-       if (ret == sizeof(struct v1_disk_dqblk))
-               ret = 0;
-       else if (ret > 0)
-               ret = -EIO;
-out:
-       return ret;
-}
-
-static struct quota_format_ops v1_format_ops = {
-       .check_quota_file       = v1_check_quota_file,
-       .read_file_info         = v1_read_file_info,
-       .write_file_info        = v1_write_file_info,
-       .free_file_info         = NULL,
-       .read_dqblk             = v1_read_dqblk,
-       .commit_dqblk           = v1_commit_dqblk,
-};
-
-static struct quota_format_type v1_quota_format = {
-       .qf_fmt_id      = QFMT_VFS_OLD,
-       .qf_ops         = &v1_format_ops,
-       .qf_owner       = THIS_MODULE
-};
-
-static int __init init_v1_quota_format(void)
-{
-        return register_quota_format(&v1_quota_format);
-}
-
-static void __exit exit_v1_quota_format(void)
-{
-        unregister_quota_format(&v1_quota_format);
-}
-
-module_init(init_v1_quota_format);
-module_exit(exit_v1_quota_format);
-
diff --git a/fs/quota_v2.c b/fs/quota_v2.c
deleted file mode 100644 (file)
index b618b56..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- *     vfsv0 quota IO operations on file
- */
-
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/dqblk_v2.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/quotaops.h>
-
-#include <asm/byteorder.h>
-
-#include "quota_tree.h"
-#include "quotaio_v2.h"
-
-MODULE_AUTHOR("Jan Kara");
-MODULE_DESCRIPTION("Quota format v2 support");
-MODULE_LICENSE("GPL");
-
-#define __QUOTA_V2_PARANOIA
-
-static void v2_mem2diskdqb(void *dp, struct dquot *dquot);
-static void v2_disk2memdqb(struct dquot *dquot, void *dp);
-static int v2_is_id(void *dp, struct dquot *dquot);
-
-static struct qtree_fmt_operations v2_qtree_ops = {
-       .mem2disk_dqblk = v2_mem2diskdqb,
-       .disk2mem_dqblk = v2_disk2memdqb,
-       .is_id = v2_is_id,
-};
-
-#define QUOTABLOCK_BITS 10
-#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
-
-static inline qsize_t v2_stoqb(qsize_t space)
-{
-       return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS;
-}
-
-static inline qsize_t v2_qbtos(qsize_t blocks)
-{
-       return blocks << QUOTABLOCK_BITS;
-}
-
-/* Check whether given file is really vfsv0 quotafile */
-static int v2_check_quota_file(struct super_block *sb, int type)
-{
-       struct v2_disk_dqheader dqhead;
-       ssize_t size;
-       static const uint quota_magics[] = V2_INITQMAGICS;
-       static const uint quota_versions[] = V2_INITQVERSIONS;
-       size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
-       if (size != sizeof(struct v2_disk_dqheader)) {
-               printk("quota_v2: failed read expected=%zd got=%zd\n",
-                       sizeof(struct v2_disk_dqheader), size);
-               return 0;
-       }
-       if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] ||
-           le32_to_cpu(dqhead.dqh_version) != quota_versions[type])
-               return 0;
-       return 1;
-}
-
-/* Read information header from quota file */
-static int v2_read_file_info(struct super_block *sb, int type)
-{
-       struct v2_disk_dqinfo dinfo;
-       struct mem_dqinfo *info = sb_dqinfo(sb, type);
-       struct qtree_mem_dqinfo *qinfo;
-       ssize_t size;
-
-       size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
-              sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
-       if (size != sizeof(struct v2_disk_dqinfo)) {
-               printk(KERN_WARNING "Can't read info structure on device %s.\n",
-                       sb->s_id);
-               return -1;
-       }
-       info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS);
-       if (!info->dqi_priv) {
-               printk(KERN_WARNING
-                      "Not enough memory for quota information structure.\n");
-               return -1;
-       }
-       qinfo = info->dqi_priv;
-       /* limits are stored as unsigned 32-bit data */
-       info->dqi_maxblimit = 0xffffffff;
-       info->dqi_maxilimit = 0xffffffff;
-       info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
-       info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
-       info->dqi_flags = le32_to_cpu(dinfo.dqi_flags);
-       qinfo->dqi_sb = sb;
-       qinfo->dqi_type = type;
-       qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
-       qinfo->dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
-       qinfo->dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
-       qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS;
-       qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS;
-       qinfo->dqi_qtree_depth = qtree_depth(qinfo);
-       qinfo->dqi_entry_size = sizeof(struct v2_disk_dqblk);
-       qinfo->dqi_ops = &v2_qtree_ops;
-       return 0;
-}
-
-/* Write information header to quota file */
-static int v2_write_file_info(struct super_block *sb, int type)
-{
-       struct v2_disk_dqinfo dinfo;
-       struct mem_dqinfo *info = sb_dqinfo(sb, type);
-       struct qtree_mem_dqinfo *qinfo = info->dqi_priv;
-       ssize_t size;
-
-       spin_lock(&dq_data_lock);
-       info->dqi_flags &= ~DQF_INFO_DIRTY;
-       dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
-       dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
-       dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
-       spin_unlock(&dq_data_lock);
-       dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks);
-       dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk);
-       dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry);
-       size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
-              sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
-       if (size != sizeof(struct v2_disk_dqinfo)) {
-               printk(KERN_WARNING "Can't write info structure on device %s.\n",
-                       sb->s_id);
-               return -1;
-       }
-       return 0;
-}
-
-static void v2_disk2memdqb(struct dquot *dquot, void *dp)
-{
-       struct v2_disk_dqblk *d = dp, empty;
-       struct mem_dqblk *m = &dquot->dq_dqb;
-
-       m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit);
-       m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit);
-       m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes);
-       m->dqb_itime = le64_to_cpu(d->dqb_itime);
-       m->dqb_bhardlimit = v2_qbtos(le32_to_cpu(d->dqb_bhardlimit));
-       m->dqb_bsoftlimit = v2_qbtos(le32_to_cpu(d->dqb_bsoftlimit));
-       m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
-       m->dqb_btime = le64_to_cpu(d->dqb_btime);
-       /* We need to escape back all-zero structure */
-       memset(&empty, 0, sizeof(struct v2_disk_dqblk));
-       empty.dqb_itime = cpu_to_le64(1);
-       if (!memcmp(&empty, dp, sizeof(struct v2_disk_dqblk)))
-               m->dqb_itime = 0;
-}
-
-static void v2_mem2diskdqb(void *dp, struct dquot *dquot)
-{
-       struct v2_disk_dqblk *d = dp;
-       struct mem_dqblk *m = &dquot->dq_dqb;
-       struct qtree_mem_dqinfo *info =
-                       sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
-
-       d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit);
-       d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit);
-       d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes);
-       d->dqb_itime = cpu_to_le64(m->dqb_itime);
-       d->dqb_bhardlimit = cpu_to_le32(v2_stoqb(m->dqb_bhardlimit));
-       d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit));
-       d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
-       d->dqb_btime = cpu_to_le64(m->dqb_btime);
-       d->dqb_id = cpu_to_le32(dquot->dq_id);
-       if (qtree_entry_unused(info, dp))
-               d->dqb_itime = cpu_to_le64(1);
-}
-
-static int v2_is_id(void *dp, struct dquot *dquot)
-{
-       struct v2_disk_dqblk *d = dp;
-       struct qtree_mem_dqinfo *info =
-                       sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
-
-       if (qtree_entry_unused(info, dp))
-               return 0;
-       return le32_to_cpu(d->dqb_id) == dquot->dq_id;
-}
-
-static int v2_read_dquot(struct dquot *dquot)
-{
-       return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot);
-}
-
-static int v2_write_dquot(struct dquot *dquot)
-{
-       return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot);
-}
-
-static int v2_release_dquot(struct dquot *dquot)
-{
-       return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot);
-}
-
-static int v2_free_file_info(struct super_block *sb, int type)
-{
-       kfree(sb_dqinfo(sb, type)->dqi_priv);
-       return 0;
-}
-
-static struct quota_format_ops v2_format_ops = {
-       .check_quota_file       = v2_check_quota_file,
-       .read_file_info         = v2_read_file_info,
-       .write_file_info        = v2_write_file_info,
-       .free_file_info         = v2_free_file_info,
-       .read_dqblk             = v2_read_dquot,
-       .commit_dqblk           = v2_write_dquot,
-       .release_dqblk          = v2_release_dquot,
-};
-
-static struct quota_format_type v2_quota_format = {
-       .qf_fmt_id      = QFMT_VFS_V0,
-       .qf_ops         = &v2_format_ops,
-       .qf_owner       = THIS_MODULE
-};
-
-static int __init init_v2_quota_format(void)
-{
-       return register_quota_format(&v2_quota_format);
-}
-
-static void __exit exit_v2_quota_format(void)
-{
-       unregister_quota_format(&v2_quota_format);
-}
-
-module_init(init_v2_quota_format);
-module_exit(exit_v2_quota_format);
diff --git a/fs/quotaio_v1.h b/fs/quotaio_v1.h
deleted file mode 100644 (file)
index 746654b..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef _LINUX_QUOTAIO_V1_H
-#define _LINUX_QUOTAIO_V1_H
-
-#include <linux/types.h>
-
-/*
- * The following constants define the amount of time given a user
- * before the soft limits are treated as hard limits (usually resulting
- * in an allocation failure). The timer is started when the user crosses
- * their soft limit, it is reset when they go below their soft limit.
- */
-#define MAX_IQ_TIME  604800    /* (7*24*60*60) 1 week */
-#define MAX_DQ_TIME  604800    /* (7*24*60*60) 1 week */
-
-/*
- * The following structure defines the format of the disk quota file
- * (as it appears on disk) - the file is an array of these structures
- * indexed by user or group number.
- */
-struct v1_disk_dqblk {
-       __u32 dqb_bhardlimit;   /* absolute limit on disk blks alloc */
-       __u32 dqb_bsoftlimit;   /* preferred limit on disk blks */
-       __u32 dqb_curblocks;    /* current block count */
-       __u32 dqb_ihardlimit;   /* absolute limit on allocated inodes */
-       __u32 dqb_isoftlimit;   /* preferred inode limit */
-       __u32 dqb_curinodes;    /* current # allocated inodes */
-       time_t dqb_btime;       /* time limit for excessive disk use */
-       time_t dqb_itime;       /* time limit for excessive inode use */
-};
-
-#define v1_dqoff(UID)      ((loff_t)((UID) * sizeof (struct v1_disk_dqblk)))
-
-#endif /* _LINUX_QUOTAIO_V1_H */
diff --git a/fs/quotaio_v2.h b/fs/quotaio_v2.h
deleted file mode 100644 (file)
index 530fe58..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- *     Definitions of structures for vfsv0 quota format
- */
-
-#ifndef _LINUX_QUOTAIO_V2_H
-#define _LINUX_QUOTAIO_V2_H
-
-#include <linux/types.h>
-#include <linux/quota.h>
-
-/*
- * Definitions of magics and versions of current quota files
- */
-#define V2_INITQMAGICS {\
-       0xd9c01f11,     /* USRQUOTA */\
-       0xd9c01927      /* GRPQUOTA */\
-}
-
-#define V2_INITQVERSIONS {\
-       0,              /* USRQUOTA */\
-       0               /* GRPQUOTA */\
-}
-
-/* First generic header */
-struct v2_disk_dqheader {
-       __le32 dqh_magic;       /* Magic number identifying file */
-       __le32 dqh_version;     /* File version */
-};
-
-/*
- * The following structure defines the format of the disk quota file
- * (as it appears on disk) - the file is a radix tree whose leaves point
- * to blocks of these structures.
- */
-struct v2_disk_dqblk {
-       __le32 dqb_id;          /* id this quota applies to */
-       __le32 dqb_ihardlimit;  /* absolute limit on allocated inodes */
-       __le32 dqb_isoftlimit;  /* preferred inode limit */
-       __le32 dqb_curinodes;   /* current # allocated inodes */
-       __le32 dqb_bhardlimit;  /* absolute limit on disk space (in QUOTABLOCK_SIZE) */
-       __le32 dqb_bsoftlimit;  /* preferred limit on disk space (in QUOTABLOCK_SIZE) */
-       __le64 dqb_curspace;    /* current space occupied (in bytes) */
-       __le64 dqb_btime;       /* time limit for excessive disk use */
-       __le64 dqb_itime;       /* time limit for excessive inode use */
-};
-
-/* Header with type and version specific information */
-struct v2_disk_dqinfo {
-       __le32 dqi_bgrace;      /* Time before block soft limit becomes hard limit */
-       __le32 dqi_igrace;      /* Time before inode soft limit becomes hard limit */
-       __le32 dqi_flags;       /* Flags for quotafile (DQF_*) */
-       __le32 dqi_blocks;      /* Number of blocks in file */
-       __le32 dqi_free_blk;    /* Number of first free block in the list */
-       __le32 dqi_free_entry;  /* Number of block with at least one free entry */
-};
-
-#define V2_DQINFOOFF   sizeof(struct v2_disk_dqheader) /* Offset of info header in file */
-#define V2_DQBLKSIZE_BITS 10                           /* Size of leaf block in tree */
-
-#endif /* _LINUX_QUOTAIO_V2_H */
index 5d7c7ec..995ef1d 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/string.h>
 #include <linux/backing-dev.h>
 #include <linux/ramfs.h>
-#include <linux/quotaops.h>
 #include <linux/pagevec.h>
 #include <linux/mman.h>
 
@@ -205,11 +204,6 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
        if (ret)
                return ret;
 
-       /* by providing our own setattr() method, we skip this quotaism */
-       if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
-           (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid))
-               ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0;
-
        /* pick out size-changing events */
        if (ia->ia_valid & ATTR_SIZE) {
                loff_t size = i_size_read(inode);
index 4646caa..f32d142 100644 (file)
@@ -430,7 +430,7 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
 
        journal_mark_dirty(th, s, sbh);
        if (for_unformatted)
-               DQUOT_FREE_BLOCK_NODIRTY(inode, 1);
+               vfs_dq_free_block_nodirty(inode, 1);
 }
 
 void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@@ -1055,7 +1055,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
                               amount_needed, hint->inode->i_uid);
 #endif
                quota_ret =
-                   DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode, amount_needed);
+                   vfs_dq_alloc_block_nodirty(hint->inode, amount_needed);
                if (quota_ret)  /* Quota exceeded? */
                        return QUOTA_EXCEEDED;
                if (hint->preallocate && hint->prealloc_size) {
@@ -1064,8 +1064,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
                                       "reiserquota: allocating (prealloc) %d blocks id=%u",
                                       hint->prealloc_size, hint->inode->i_uid);
 #endif
-                       quota_ret =
-                           DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode,
+                       quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode,
                                                         hint->prealloc_size);
                        if (quota_ret)
                                hint->preallocate = hint->prealloc_size = 0;
@@ -1098,7 +1097,10 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
                                               nr_allocated,
                                               hint->inode->i_uid);
 #endif
-                               DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated);      /* Free not allocated blocks */
+                               /* Free not allocated blocks */
+                               vfs_dq_free_block_nodirty(hint->inode,
+                                       amount_needed + hint->prealloc_size -
+                                       nr_allocated);
                        }
                        while (nr_allocated--)
                                reiserfs_free_block(hint->th, hint->inode,
@@ -1129,7 +1131,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
                               REISERFS_I(hint->inode)->i_prealloc_count,
                               hint->inode->i_uid);
 #endif
-               DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed +
+               vfs_dq_free_block_nodirty(hint->inode, amount_needed +
                                         hint->prealloc_size - nr_allocated -
                                         REISERFS_I(hint->inode)->
                                         i_prealloc_count);
index 55fce92..823227a 100644 (file)
@@ -53,7 +53,7 @@ void reiserfs_delete_inode(struct inode *inode)
                 * after delete_object so that quota updates go into the same transaction as
                 * stat data deletion */
                if (!err) 
-                       DQUOT_FREE_INODE(inode);
+                       vfs_dq_free_inode(inode);
 
                if (journal_end(&th, inode->i_sb, jbegin_count))
                        goto out;
@@ -1763,7 +1763,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
 
        BUG_ON(!th->t_trans_id);
 
-       if (DQUOT_ALLOC_INODE(inode)) {
+       if (vfs_dq_alloc_inode(inode)) {
                err = -EDQUOT;
                goto out_end_trans;
        }
@@ -1947,12 +1947,12 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
        INODE_PKEY(inode)->k_objectid = 0;
 
        /* Quota change must be inside a transaction for journaling */
-       DQUOT_FREE_INODE(inode);
+       vfs_dq_free_inode(inode);
 
       out_end_trans:
        journal_end(th, th->t_super, th->t_blocks_allocated);
        /* Drop can be outside and it needs more credits so it's better to have it outside */
-       DQUOT_DROP(inode);
+       vfs_dq_drop(inode);
        inode->i_flags |= S_NOQUOTA;
        make_bad_inode(inode);
 
@@ -3119,7 +3119,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
                                if (error)
                                        goto out;
                                error =
-                                   DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+                                   vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
                                if (error) {
                                        journal_end(&th, inode->i_sb,
                                                    jbegin_count);
index 738967f..639d635 100644 (file)
@@ -555,7 +555,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
 */
 static int drop_new_inode(struct inode *inode)
 {
-       DQUOT_DROP(inode);
+       vfs_dq_drop(inode);
        make_bad_inode(inode);
        inode->i_flags |= S_NOQUOTA;
        iput(inode);
@@ -563,7 +563,7 @@ static int drop_new_inode(struct inode *inode)
 }
 
 /* utility function that does setup for reiserfs_new_inode.  
-** DQUOT_INIT needs lots of credits so it's better to have it
+** vfs_dq_init needs lots of credits so it's better to have it
 ** outside of a transaction, so we had to pull some bits of
 ** reiserfs_new_inode out into this func.
 */
@@ -586,7 +586,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode)
        } else {
                inode->i_gid = current_fsgid();
        }
-       DQUOT_INIT(inode);
+       vfs_dq_init(inode);
        return 0;
 }
 
index abbc64d..73aaa33 100644 (file)
@@ -1297,7 +1297,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath
                       "reiserquota delete_item(): freeing %u, id=%u type=%c",
                       quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih));
 #endif
-       DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
+       vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
 
        /* Return deleted body length */
        return n_ret_value;
@@ -1383,7 +1383,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
                                               quota_cut_bytes, inode->i_uid,
                                               key2type(key));
 #endif
-                               DQUOT_FREE_SPACE_NODIRTY(inode,
+                               vfs_dq_free_space_nodirty(inode,
                                                         quota_cut_bytes);
                        }
                        break;
@@ -1734,7 +1734,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
                       "reiserquota cut_from_item(): freeing %u id=%u type=%c",
                       quota_cut_bytes, p_s_inode->i_uid, '?');
 #endif
-       DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
+       vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
        return n_ret_value;
 }
 
@@ -1971,7 +1971,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
                       key2type(&(p_s_key->on_disk_key)));
 #endif
 
-       if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) {
+       if (vfs_dq_alloc_space_nodirty(inode, n_pasted_size)) {
                pathrelse(p_s_search_path);
                return -EDQUOT;
        }
@@ -2027,7 +2027,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
                       n_pasted_size, inode->i_uid,
                       key2type(&(p_s_key->on_disk_key)));
 #endif
-       DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size);
+       vfs_dq_free_space_nodirty(inode, n_pasted_size);
        return retval;
 }
 
@@ -2060,7 +2060,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath
 #endif
                /* We can't dirty inode here. It would be immediately written but
                 * appropriate stat item isn't inserted yet... */
-               if (DQUOT_ALLOC_SPACE_NODIRTY(inode, quota_bytes)) {
+               if (vfs_dq_alloc_space_nodirty(inode, quota_bytes)) {
                        pathrelse(p_s_path);
                        return -EDQUOT;
                }
@@ -2112,6 +2112,6 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath
                       quota_bytes, inode->i_uid, head2type(p_s_ih));
 #endif
        if (inode)
-               DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes);
+               vfs_dq_free_space_nodirty(inode, quota_bytes);
        return retval;
 }
index f3c820b..5dbafb7 100644 (file)
@@ -250,7 +250,7 @@ static int finish_unfinished(struct super_block *s)
                        retval = remove_save_link_only(s, &save_link_key, 0);
                        continue;
                }
-               DQUOT_INIT(inode);
+               vfs_dq_init(inode);
 
                if (truncate && S_ISDIR(inode->i_mode)) {
                        /* We got a truncate request for a dir which is impossible.
@@ -629,8 +629,6 @@ static const struct super_operations reiserfs_sops = {
 #ifdef CONFIG_QUOTA
 #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
 
-static int reiserfs_dquot_initialize(struct inode *, int);
-static int reiserfs_dquot_drop(struct inode *);
 static int reiserfs_write_dquot(struct dquot *);
 static int reiserfs_acquire_dquot(struct dquot *);
 static int reiserfs_release_dquot(struct dquot *);
@@ -639,8 +637,8 @@ static int reiserfs_write_info(struct super_block *, int);
 static int reiserfs_quota_on(struct super_block *, int, int, char *, int);
 
 static struct dquot_operations reiserfs_quota_operations = {
-       .initialize = reiserfs_dquot_initialize,
-       .drop = reiserfs_dquot_drop,
+       .initialize = dquot_initialize,
+       .drop = dquot_drop,
        .alloc_space = dquot_alloc_space,
        .alloc_inode = dquot_alloc_inode,
        .free_space = dquot_free_space,
@@ -1896,58 +1894,6 @@ static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 }
 
 #ifdef CONFIG_QUOTA
-static int reiserfs_dquot_initialize(struct inode *inode, int type)
-{
-       struct reiserfs_transaction_handle th;
-       int ret, err;
-
-       /* We may create quota structure so we need to reserve enough blocks */
-       reiserfs_write_lock(inode->i_sb);
-       ret =
-           journal_begin(&th, inode->i_sb,
-                         2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
-       if (ret)
-               goto out;
-       ret = dquot_initialize(inode, type);
-       err =
-           journal_end(&th, inode->i_sb,
-                       2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
-       if (!ret && err)
-               ret = err;
-      out:
-       reiserfs_write_unlock(inode->i_sb);
-       return ret;
-}
-
-static int reiserfs_dquot_drop(struct inode *inode)
-{
-       struct reiserfs_transaction_handle th;
-       int ret, err;
-
-       /* We may delete quota structure so we need to reserve enough blocks */
-       reiserfs_write_lock(inode->i_sb);
-       ret =
-           journal_begin(&th, inode->i_sb,
-                         2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
-       if (ret) {
-               /*
-                * We call dquot_drop() anyway to at least release references
-                * to quota structures so that umount does not hang.
-                */
-               dquot_drop(inode);
-               goto out;
-       }
-       ret = dquot_drop(inode);
-       err =
-           journal_end(&th, inode->i_sb,
-                       2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
-       if (!ret && err)
-               ret = err;
-      out:
-       reiserfs_write_unlock(inode->i_sb);
-       return ret;
-}
-
 static int reiserfs_write_dquot(struct dquot *dquot)
 {
        struct reiserfs_transaction_handle th;
index dd4acb1..49d0bd3 100644 (file)
@@ -197,7 +197,7 @@ void deactivate_super(struct super_block *s)
        if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
                s->s_count -= S_BIAS-1;
                spin_unlock(&sb_lock);
-               DQUOT_OFF(s, 0);
+               vfs_dq_off(s, 0);
                down_write(&s->s_umount);
                fs->kill_sb(s);
                put_filesystem(fs);
@@ -266,7 +266,7 @@ EXPORT_SYMBOL(unlock_super);
 void __fsync_super(struct super_block *sb)
 {
        sync_inodes_sb(sb, 0);
-       DQUOT_SYNC(sb);
+       vfs_dq_sync(sb);
        lock_super(sb);
        if (sb->s_dirt && sb->s_op->write_super)
                sb->s_op->write_super(sb);
@@ -655,7 +655,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
                        mark_files_ro(sb);
                else if (!fs_may_remount_ro(sb))
                        return -EBUSY;
-               retval = DQUOT_OFF(sb, 1);
+               retval = vfs_dq_off(sb, 1);
                if (retval < 0 && retval != -ENOSYS)
                        return -EBUSY;
        }
@@ -670,7 +670,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
        }
        sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
        if (remount_rw)
-               DQUOT_ON_REMOUNT(sb);
+               vfs_dq_quota_on_remount(sb);
        return 0;
 }
 
index ec95a69..7abc65f 100644 (file)
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -25,7 +25,7 @@ static void do_sync(unsigned long wait)
 {
        wakeup_pdflush(0);
        sync_inodes(0);         /* All mappings, inodes and their blockdevs */
-       DQUOT_SYNC(NULL);
+       vfs_dq_sync(NULL);
        sync_supers();          /* Write the superblocks */
        sync_filesystems(0);    /* Start syncing the filesystems */
        sync_filesystems(wait); /* Waitingly sync the filesystems */
index 1b809bd..2bb788a 100644 (file)
@@ -206,7 +206,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
                                        ((char *)bh->b_data)[(bit + i) >> 3]);
                        } else {
                                if (inode)
-                                       DQUOT_FREE_BLOCK(inode, 1);
+                                       vfs_dq_free_block(inode, 1);
                                udf_add_free_space(sbi, sbi->s_partition, 1);
                        }
                }
@@ -261,11 +261,11 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
                while (bit < (sb->s_blocksize << 3) && block_count > 0) {
                        if (!udf_test_bit(bit, bh->b_data))
                                goto out;
-                       else if (DQUOT_PREALLOC_BLOCK(inode, 1))
+                       else if (vfs_dq_prealloc_block(inode, 1))
                                goto out;
                        else if (!udf_clear_bit(bit, bh->b_data)) {
                                udf_debug("bit already cleared for block %d\n", bit);
-                               DQUOT_FREE_BLOCK(inode, 1);
+                               vfs_dq_free_block(inode, 1);
                                goto out;
                        }
                        block_count--;
@@ -393,7 +393,7 @@ got_block:
        /*
         * Check quota for allocation of this block.
         */
-       if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
+       if (inode && vfs_dq_alloc_block(inode, 1)) {
                mutex_unlock(&sbi->s_alloc_mutex);
                *err = -EDQUOT;
                return 0;
@@ -452,7 +452,7 @@ static void udf_table_free_blocks(struct super_block *sb,
        /* We do this up front - There are some error conditions that
           could occure, but.. oh well */
        if (inode)
-               DQUOT_FREE_BLOCK(inode, count);
+               vfs_dq_free_block(inode, count);
        if (udf_add_free_space(sbi, sbi->s_partition, count))
                mark_buffer_dirty(sbi->s_lvid_bh);
 
@@ -700,7 +700,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
                epos.offset -= adsize;
 
                alloc_count = (elen >> sb->s_blocksize_bits);
-               if (inode && DQUOT_PREALLOC_BLOCK(inode,
+               if (inode && vfs_dq_prealloc_block(inode,
                        alloc_count > block_count ? block_count : alloc_count))
                        alloc_count = 0;
                else if (alloc_count > block_count) {
@@ -806,7 +806,7 @@ static int udf_table_new_block(struct super_block *sb,
        goal_eloc.logicalBlockNum++;
        goal_elen -= sb->s_blocksize;
 
-       if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
+       if (inode && vfs_dq_alloc_block(inode, 1)) {
                brelse(goal_epos.bh);
                mutex_unlock(&sbi->s_alloc_mutex);
                *err = -EDQUOT;
index 31fc842..47dbe56 100644 (file)
@@ -36,8 +36,8 @@ void udf_free_inode(struct inode *inode)
         * Note: we must free any quota before locking the superblock,
         * as writing the quota to disk may need the lock as well.
         */
-       DQUOT_FREE_INODE(inode);
-       DQUOT_DROP(inode);
+       vfs_dq_free_inode(inode);
+       vfs_dq_drop(inode);
 
        clear_inode(inode);
 
@@ -154,8 +154,8 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
        insert_inode_hash(inode);
        mark_inode_dirty(inode);
 
-       if (DQUOT_ALLOC_INODE(inode)) {
-               DQUOT_DROP(inode);
+       if (vfs_dq_alloc_inode(inode)) {
+               vfs_dq_drop(inode);
                inode->i_flags |= S_NOQUOTA;
                inode->i_nlink = 0;
                iput(inode);
index 0d9ada1..54c16ec 100644 (file)
@@ -85,7 +85,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
                                   "bit already cleared for fragment %u", i);
        }
        
-       DQUOT_FREE_BLOCK (inode, count);
+       vfs_dq_free_block(inode, count);
 
        
        fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
@@ -195,7 +195,7 @@ do_more:
                ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
                if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
                        ufs_clusteracct (sb, ucpi, blkno, 1);
-               DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
+               vfs_dq_free_block(inode, uspi->s_fpb);
 
                fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
                uspi->cs_total.cs_nbfree++;
@@ -556,7 +556,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
                fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
        for (i = oldcount; i < newcount; i++)
                ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
-       if(DQUOT_ALLOC_BLOCK(inode, count)) {
+       if (vfs_dq_alloc_block(inode, count)) {
                *err = -EDQUOT;
                return 0;
        }
@@ -664,7 +664,7 @@ cg_found:
                for (i = count; i < uspi->s_fpb; i++)
                        ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
                i = uspi->s_fpb - count;
-               DQUOT_FREE_BLOCK(inode, i);
+               vfs_dq_free_block(inode, i);
 
                fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
                uspi->cs_total.cs_nffree += i;
@@ -676,7 +676,7 @@ cg_found:
        result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
        if (result == INVBLOCK)
                return 0;
-       if(DQUOT_ALLOC_BLOCK(inode, count)) {
+       if (vfs_dq_alloc_block(inode, count)) {
                *err = -EDQUOT;
                return 0;
        }
@@ -747,7 +747,7 @@ gotit:
        ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
        if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
                ufs_clusteracct (sb, ucpi, blkno, -1);
-       if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
+       if (vfs_dq_alloc_block(inode, uspi->s_fpb)) {
                *err = -EDQUOT;
                return INVBLOCK;
        }
index 6f5dcf0..3527c00 100644 (file)
@@ -95,8 +95,8 @@ void ufs_free_inode (struct inode * inode)
 
        is_directory = S_ISDIR(inode->i_mode);
 
-       DQUOT_FREE_INODE(inode);
-       DQUOT_DROP(inode);
+       vfs_dq_free_inode(inode);
+       vfs_dq_drop(inode);
 
        clear_inode (inode);
 
@@ -355,8 +355,8 @@ cg_found:
 
        unlock_super (sb);
 
-       if (DQUOT_ALLOC_INODE(inode)) {
-               DQUOT_DROP(inode);
+       if (vfs_dq_alloc_inode(inode)) {
+               vfs_dq_drop(inode);
                err = -EDQUOT;
                goto fail_without_unlock;
        }
index d72d5d8..78c4889 100644 (file)
@@ -198,6 +198,7 @@ struct mem_dqblk {
        qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */
        qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */
        qsize_t dqb_curspace;   /* current used space */
+       qsize_t dqb_rsvspace;   /* current reserved space for delalloc*/
        qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */
        qsize_t dqb_isoftlimit; /* preferred inode limit */
        qsize_t dqb_curinodes;  /* current # allocated inodes */
@@ -276,8 +277,6 @@ struct dquot {
        struct mem_dqblk dq_dqb;        /* Diskquota usage */
 };
 
-#define NODQUOT (struct dquot *)NULL
-
 #define QUOTA_OK          0
 #define NO_QUOTA          1
 
@@ -308,6 +307,14 @@ struct dquot_operations {
        int (*release_dquot) (struct dquot *);          /* Quota is going to be deleted from disk */
        int (*mark_dirty) (struct dquot *);             /* Dquot is marked dirty */
        int (*write_info) (struct super_block *, int);  /* Write of quota "superblock" */
+       /* reserve quota for delayed block allocation */
+       int (*reserve_space) (struct inode *, qsize_t, int);
+       /* claim reserved quota for delayed alloc */
+       int (*claim_space) (struct inode *, qsize_t);
+       /* release rsved quota for delayed alloc */
+       void (*release_rsv) (struct inode *, qsize_t);
+       /* get reserved quota for delayed alloc */
+       qsize_t (*get_reserved_space) (struct inode *);
 };
 
 /* Operations handling requests from userspace */
index 0b35b3a..36353d9 100644 (file)
@@ -35,6 +35,11 @@ void dquot_destroy(struct dquot *dquot);
 int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc);
 int dquot_alloc_inode(const struct inode *inode, qsize_t number);
 
+int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
+int dquot_claim_space(struct inode *inode, qsize_t number);
+void dquot_release_reserved_space(struct inode *inode, qsize_t number);
+qsize_t dquot_get_reserved_space(struct inode *inode);
+
 int dquot_free_space(struct inode *inode, qsize_t number);
 int dquot_free_inode(const struct inode *inode, qsize_t number);
 
@@ -183,6 +188,16 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
        return ret;
 }
 
+static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
+{
+       if (sb_any_quota_active(inode->i_sb)) {
+               /* Used space is updated in alloc_space() */
+               if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
+                       return 1;
+       }
+       return 0;
+}
+
 static inline int vfs_dq_alloc_inode(struct inode *inode)
 {
        if (sb_any_quota_active(inode->i_sb)) {
@@ -193,6 +208,31 @@ static inline int vfs_dq_alloc_inode(struct inode *inode)
        return 0;
 }
 
+/*
+ * Convert in-memory reserved quotas to real consumed quotas
+ */
+static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
+{
+       if (sb_any_quota_active(inode->i_sb)) {
+               if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
+                       return 1;
+       } else
+               inode_add_bytes(inode, nr);
+
+       mark_inode_dirty(inode);
+       return 0;
+}
+
+/*
+ * Release reserved (in-memory) quotas
+ */
+static inline
+void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
+{
+       if (sb_any_quota_active(inode->i_sb))
+               inode->i_sb->dq_op->release_rsv(inode, nr);
+}
+
 static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
 {
        if (sb_any_quota_active(inode->i_sb))
@@ -339,6 +379,22 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
        return 0;
 }
 
+static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
+{
+       return 0;
+}
+
+static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
+{
+       return vfs_dq_alloc_space(inode, nr);
+}
+
+static inline
+int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
+{
+       return 0;
+}
+
 static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
 {
        inode_sub_bytes(inode, nr);
@@ -354,67 +410,48 @@ static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
 
 static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
 {
-       return vfs_dq_prealloc_space_nodirty(inode,
-                       nr << inode->i_sb->s_blocksize_bits);
+       return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_blkbits);
 }
 
 static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr)
 {
-       return vfs_dq_prealloc_space(inode,
-                       nr << inode->i_sb->s_blocksize_bits);
+       return vfs_dq_prealloc_space(inode, nr << inode->i_blkbits);
 }
 
 static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr)
 {
-       return vfs_dq_alloc_space_nodirty(inode,
-                       nr << inode->i_sb->s_blocksize_bits);
+       return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_blkbits);
 }
 
 static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr)
 {
-       return vfs_dq_alloc_space(inode,
-                       nr << inode->i_sb->s_blocksize_bits);
+       return vfs_dq_alloc_space(inode, nr << inode->i_blkbits);
+}
+
+static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr)
+{
+       return vfs_dq_reserve_space(inode, nr << inode->i_blkbits);
+}
+
+static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr)
+{
+       return vfs_dq_claim_space(inode, nr << inode->i_blkbits);
+}
+
+static inline
+void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr)
+{
+       vfs_dq_release_reservation_space(inode, nr << inode->i_blkbits);
 }
 
 static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr)
 {
-       vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits);
+       vfs_dq_free_space_nodirty(inode, nr << inode->i_blkbits);
 }
 
 static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr)
 {
-       vfs_dq_free_space(inode, nr << inode->i_sb->s_blocksize_bits);
+       vfs_dq_free_space(inode, nr << inode->i_blkbits);
 }
 
-/*
- * Define uppercase equivalents for compatibility with old function names
- * Can go away when we think all users have been converted (15/04/2008)
- */
-#define DQUOT_INIT(inode) vfs_dq_init(inode)
-#define DQUOT_DROP(inode) vfs_dq_drop(inode)
-#define DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr) \
-                               vfs_dq_prealloc_space_nodirty(inode, nr)
-#define DQUOT_PREALLOC_SPACE(inode, nr) vfs_dq_prealloc_space(inode, nr)
-#define DQUOT_ALLOC_SPACE_NODIRTY(inode, nr) \
-                               vfs_dq_alloc_space_nodirty(inode, nr)
-#define DQUOT_ALLOC_SPACE(inode, nr) vfs_dq_alloc_space(inode, nr)
-#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) \
-                               vfs_dq_prealloc_block_nodirty(inode, nr)
-#define DQUOT_PREALLOC_BLOCK(inode, nr) vfs_dq_prealloc_block(inode, nr)
-#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) \
-                               vfs_dq_alloc_block_nodirty(inode, nr)
-#define DQUOT_ALLOC_BLOCK(inode, nr) vfs_dq_alloc_block(inode, nr)
-#define DQUOT_ALLOC_INODE(inode) vfs_dq_alloc_inode(inode)
-#define DQUOT_FREE_SPACE_NODIRTY(inode, nr) \
-                               vfs_dq_free_space_nodirty(inode, nr)
-#define DQUOT_FREE_SPACE(inode, nr) vfs_dq_free_space(inode, nr)
-#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) \
-                               vfs_dq_free_block_nodirty(inode, nr)
-#define DQUOT_FREE_BLOCK(inode, nr) vfs_dq_free_block(inode, nr)
-#define DQUOT_FREE_INODE(inode) vfs_dq_free_inode(inode)
-#define DQUOT_TRANSFER(inode, iattr) vfs_dq_transfer(inode, iattr)
-#define DQUOT_SYNC(sb) vfs_dq_sync(sb)
-#define DQUOT_OFF(sb, remount) vfs_dq_off(sb, remount)
-#define DQUOT_ON_REMOUNT(sb) vfs_dq_quota_on_remount(sb)
-
 #endif /* _LINUX_QUOTAOPS_ */