4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
40 #include <linux/list_lru.h>
41 #include <linux/kasan.h>
48 * dcache->d_inode->i_lock protects:
49 * - i_dentry, d_u.d_alias, d_inode of aliases
50 * dcache_hash_bucket lock protects:
51 * - the dcache hash table
52 * s_anon bl list spinlock protects:
53 * - the s_anon list (see __d_drop)
54 * dentry->d_sb->s_dentry_lru_lock protects:
55 * - the dcache lru lists and counters
62 * - d_parent and d_subdirs
63 * - childrens' d_child and d_parent
64 * - d_u.d_alias, d_inode
67 * dentry->d_inode->i_lock
69 * dentry->d_sb->s_dentry_lru_lock
70 * dcache_hash_bucket lock
73 * If there is an ancestor relationship:
74 * dentry->d_parent->...->d_parent->d_lock
76 * dentry->d_parent->d_lock
79 * If no ancestor relationship:
80 * if (dentry1 < dentry2)
84 int sysctl_vfs_cache_pressure __read_mostly = 100;
85 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
87 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
89 EXPORT_SYMBOL(rename_lock);
91 static struct kmem_cache *dentry_cache __read_mostly;
94 * This is the single most critical data structure when it comes
95 * to the dcache: the hashtable for lookups. Somebody should try
96 * to make this good - I've just made it work.
98 * This hash-function tries to avoid losing too many bits of hash
99 * information, yet avoid using a prime hash-size or similar.
102 static unsigned int d_hash_mask __read_mostly;
103 static unsigned int d_hash_shift __read_mostly;
105 static struct hlist_bl_head *dentry_hashtable __read_mostly;
107 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
110 hash += (unsigned long) parent / L1_CACHE_BYTES;
111 return dentry_hashtable + hash_32(hash, d_hash_shift);
114 #define IN_LOOKUP_SHIFT 10
115 static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
117 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
120 hash += (unsigned long) parent / L1_CACHE_BYTES;
121 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
125 /* Statistics gathering. */
126 struct dentry_stat_t dentry_stat = {
130 static DEFINE_PER_CPU(long, nr_dentry);
131 static DEFINE_PER_CPU(long, nr_dentry_unused);
133 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
136 * Here we resort to our own counters instead of using generic per-cpu counters
137 * for consistency with what the vfs inode code does. We are expected to harvest
138 * better code and performance by having our own specialized counters.
140 * Please note that the loop is done over all possible CPUs, not over all online
141 * CPUs. The reason for this is that we don't want to play games with CPUs going
142 * on and off. If one of them goes off, we will just keep their counters.
144 * glommer: See cffbc8a for details, and if you ever intend to change this,
145 * please update all vfs counters to match.
147 static long get_nr_dentry(void)
151 for_each_possible_cpu(i)
152 sum += per_cpu(nr_dentry, i);
153 return sum < 0 ? 0 : sum;
156 static long get_nr_dentry_unused(void)
160 for_each_possible_cpu(i)
161 sum += per_cpu(nr_dentry_unused, i);
162 return sum < 0 ? 0 : sum;
165 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
166 size_t *lenp, loff_t *ppos)
168 dentry_stat.nr_dentry = get_nr_dentry();
169 dentry_stat.nr_unused = get_nr_dentry_unused();
170 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
175 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
176 * The strings are both count bytes long, and count is non-zero.
178 #ifdef CONFIG_DCACHE_WORD_ACCESS
180 #include <asm/word-at-a-time.h>
182 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
183 * aligned allocation for this particular component. We don't
184 * strictly need the load_unaligned_zeropad() safety, but it
185 * doesn't hurt either.
187 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
188 * need the careful unaligned handling.
190 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
192 unsigned long a,b,mask;
195 a = *(unsigned long *)cs;
196 b = load_unaligned_zeropad(ct);
197 if (tcount < sizeof(unsigned long))
199 if (unlikely(a != b))
201 cs += sizeof(unsigned long);
202 ct += sizeof(unsigned long);
203 tcount -= sizeof(unsigned long);
207 mask = bytemask_from_count(tcount);
208 return unlikely(!!((a ^ b) & mask));
213 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
227 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
229 const unsigned char *cs;
231 * Be careful about RCU walk racing with rename:
232 * use ACCESS_ONCE to fetch the name pointer.
234 * NOTE! Even if a rename will mean that the length
235 * was not loaded atomically, we don't care. The
236 * RCU walk will check the sequence count eventually,
237 * and catch it. And we won't overrun the buffer,
238 * because we're reading the name pointer atomically,
239 * and a dentry name is guaranteed to be properly
240 * terminated with a NUL byte.
242 * End result: even if 'len' is wrong, we'll exit
243 * early because the data cannot match (there can
244 * be no NUL in the ct/tcount data)
246 cs = ACCESS_ONCE(dentry->d_name.name);
247 smp_read_barrier_depends();
248 return dentry_string_cmp(cs, ct, tcount);
251 struct external_name {
254 struct rcu_head head;
256 unsigned char name[];
259 static inline struct external_name *external_name(struct dentry *dentry)
261 return container_of(dentry->d_name.name, struct external_name, name[0]);
264 static void __d_free(struct rcu_head *head)
266 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
268 kmem_cache_free(dentry_cache, dentry);
271 static void __d_free_external(struct rcu_head *head)
273 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
274 kfree(external_name(dentry));
275 kmem_cache_free(dentry_cache, dentry);
278 static inline int dname_external(const struct dentry *dentry)
280 return dentry->d_name.name != dentry->d_iname;
283 static inline void __d_set_inode_and_type(struct dentry *dentry,
289 dentry->d_inode = inode;
290 flags = READ_ONCE(dentry->d_flags);
291 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
293 WRITE_ONCE(dentry->d_flags, flags);
296 static inline void __d_clear_type_and_inode(struct dentry *dentry)
298 unsigned flags = READ_ONCE(dentry->d_flags);
300 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
301 WRITE_ONCE(dentry->d_flags, flags);
302 dentry->d_inode = NULL;
305 static void dentry_free(struct dentry *dentry)
307 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
308 if (unlikely(dname_external(dentry))) {
309 struct external_name *p = external_name(dentry);
310 if (likely(atomic_dec_and_test(&p->u.count))) {
311 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
315 /* if dentry was never visible to RCU, immediate free is OK */
316 if (!(dentry->d_flags & DCACHE_RCUACCESS))
317 __d_free(&dentry->d_u.d_rcu);
319 call_rcu(&dentry->d_u.d_rcu, __d_free);
323 * dentry_rcuwalk_invalidate - invalidate in-progress rcu-walk lookups
324 * @dentry: the target dentry
325 * After this call, in-progress rcu-walk path lookup will fail. This
326 * should be called after unhashing, and after changing d_inode (if
327 * the dentry has not already been unhashed).
329 static inline void dentry_rcuwalk_invalidate(struct dentry *dentry)
331 lockdep_assert_held(&dentry->d_lock);
332 /* Go through am invalidation barrier */
333 write_seqcount_invalidate(&dentry->d_seq);
337 * Release the dentry's inode, using the filesystem
338 * d_iput() operation if defined.
340 static void dentry_unlink_inode(struct dentry * dentry)
341 __releases(dentry->d_lock)
342 __releases(dentry->d_inode->i_lock)
344 struct inode *inode = dentry->d_inode;
345 bool hashed = !d_unhashed(dentry);
348 raw_write_seqcount_begin(&dentry->d_seq);
349 __d_clear_type_and_inode(dentry);
350 hlist_del_init(&dentry->d_u.d_alias);
352 raw_write_seqcount_end(&dentry->d_seq);
353 spin_unlock(&dentry->d_lock);
354 spin_unlock(&inode->i_lock);
356 fsnotify_inoderemove(inode);
357 if (dentry->d_op && dentry->d_op->d_iput)
358 dentry->d_op->d_iput(dentry, inode);
364 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
365 * is in use - which includes both the "real" per-superblock
366 * LRU list _and_ the DCACHE_SHRINK_LIST use.
368 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
369 * on the shrink list (ie not on the superblock LRU list).
371 * The per-cpu "nr_dentry_unused" counters are updated with
372 * the DCACHE_LRU_LIST bit.
374 * These helper functions make sure we always follow the
375 * rules. d_lock must be held by the caller.
377 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
378 static void d_lru_add(struct dentry *dentry)
380 D_FLAG_VERIFY(dentry, 0);
381 dentry->d_flags |= DCACHE_LRU_LIST;
382 this_cpu_inc(nr_dentry_unused);
383 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
386 static void d_lru_del(struct dentry *dentry)
388 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
389 dentry->d_flags &= ~DCACHE_LRU_LIST;
390 this_cpu_dec(nr_dentry_unused);
391 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
394 static void d_shrink_del(struct dentry *dentry)
396 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
397 list_del_init(&dentry->d_lru);
398 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
399 this_cpu_dec(nr_dentry_unused);
402 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
404 D_FLAG_VERIFY(dentry, 0);
405 list_add(&dentry->d_lru, list);
406 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
407 this_cpu_inc(nr_dentry_unused);
411 * These can only be called under the global LRU lock, ie during the
412 * callback for freeing the LRU list. "isolate" removes it from the
413 * LRU lists entirely, while shrink_move moves it to the indicated
416 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
418 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
419 dentry->d_flags &= ~DCACHE_LRU_LIST;
420 this_cpu_dec(nr_dentry_unused);
421 list_lru_isolate(lru, &dentry->d_lru);
424 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
425 struct list_head *list)
427 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
428 dentry->d_flags |= DCACHE_SHRINK_LIST;
429 list_lru_isolate_move(lru, &dentry->d_lru, list);
433 * dentry_lru_(add|del)_list) must be called with d_lock held.
435 static void dentry_lru_add(struct dentry *dentry)
437 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
442 * d_drop - drop a dentry
443 * @dentry: dentry to drop
445 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
446 * be found through a VFS lookup any more. Note that this is different from
447 * deleting the dentry - d_delete will try to mark the dentry negative if
448 * possible, giving a successful _negative_ lookup, while d_drop will
449 * just make the cache lookup fail.
451 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
452 * reason (NFS timeouts or autofs deletes).
454 * __d_drop requires dentry->d_lock.
456 void __d_drop(struct dentry *dentry)
458 if (!d_unhashed(dentry)) {
459 struct hlist_bl_head *b;
461 * Hashed dentries are normally on the dentry hashtable,
462 * with the exception of those newly allocated by
463 * d_obtain_alias, which are always IS_ROOT:
465 if (unlikely(IS_ROOT(dentry)))
466 b = &dentry->d_sb->s_anon;
468 b = d_hash(dentry->d_parent, dentry->d_name.hash);
471 __hlist_bl_del(&dentry->d_hash);
472 dentry->d_hash.pprev = NULL;
474 dentry_rcuwalk_invalidate(dentry);
477 EXPORT_SYMBOL(__d_drop);
479 void d_drop(struct dentry *dentry)
481 spin_lock(&dentry->d_lock);
483 spin_unlock(&dentry->d_lock);
485 EXPORT_SYMBOL(d_drop);
487 static void __dentry_kill(struct dentry *dentry)
489 struct dentry *parent = NULL;
490 bool can_free = true;
491 if (!IS_ROOT(dentry))
492 parent = dentry->d_parent;
495 * The dentry is now unrecoverably dead to the world.
497 lockref_mark_dead(&dentry->d_lockref);
500 * inform the fs via d_prune that this dentry is about to be
501 * unhashed and destroyed.
503 if (dentry->d_flags & DCACHE_OP_PRUNE)
504 dentry->d_op->d_prune(dentry);
506 if (dentry->d_flags & DCACHE_LRU_LIST) {
507 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
510 /* if it was on the hash then remove it */
512 __list_del_entry(&dentry->d_child);
514 * Inform d_walk() that we are no longer attached to the
517 dentry->d_flags |= DCACHE_DENTRY_KILLED;
519 spin_unlock(&parent->d_lock);
521 dentry_unlink_inode(dentry);
523 spin_unlock(&dentry->d_lock);
524 this_cpu_dec(nr_dentry);
525 if (dentry->d_op && dentry->d_op->d_release)
526 dentry->d_op->d_release(dentry);
528 spin_lock(&dentry->d_lock);
529 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
530 dentry->d_flags |= DCACHE_MAY_FREE;
533 spin_unlock(&dentry->d_lock);
534 if (likely(can_free))
539 * Finish off a dentry we've decided to kill.
540 * dentry->d_lock must be held, returns with it unlocked.
541 * If ref is non-zero, then decrement the refcount too.
542 * Returns dentry requiring refcount drop, or NULL if we're done.
544 static struct dentry *dentry_kill(struct dentry *dentry)
545 __releases(dentry->d_lock)
547 struct inode *inode = dentry->d_inode;
548 struct dentry *parent = NULL;
550 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
553 if (!IS_ROOT(dentry)) {
554 parent = dentry->d_parent;
555 if (unlikely(!spin_trylock(&parent->d_lock))) {
557 spin_unlock(&inode->i_lock);
562 __dentry_kill(dentry);
566 spin_unlock(&dentry->d_lock);
568 return dentry; /* try again with same dentry */
571 static inline struct dentry *lock_parent(struct dentry *dentry)
573 struct dentry *parent = dentry->d_parent;
576 if (unlikely(dentry->d_lockref.count < 0))
578 if (likely(spin_trylock(&parent->d_lock)))
581 spin_unlock(&dentry->d_lock);
583 parent = ACCESS_ONCE(dentry->d_parent);
584 spin_lock(&parent->d_lock);
586 * We can't blindly lock dentry until we are sure
587 * that we won't violate the locking order.
588 * Any changes of dentry->d_parent must have
589 * been done with parent->d_lock held, so
590 * spin_lock() above is enough of a barrier
591 * for checking if it's still our child.
593 if (unlikely(parent != dentry->d_parent)) {
594 spin_unlock(&parent->d_lock);
598 if (parent != dentry)
599 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
606 * Try to do a lockless dput(), and return whether that was successful.
608 * If unsuccessful, we return false, having already taken the dentry lock.
610 * The caller needs to hold the RCU read lock, so that the dentry is
611 * guaranteed to stay around even if the refcount goes down to zero!
613 static inline bool fast_dput(struct dentry *dentry)
616 unsigned int d_flags;
619 * If we have a d_op->d_delete() operation, we sould not
620 * let the dentry count go to zero, so use "put_or_lock".
622 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
623 return lockref_put_or_lock(&dentry->d_lockref);
626 * .. otherwise, we can try to just decrement the
627 * lockref optimistically.
629 ret = lockref_put_return(&dentry->d_lockref);
632 * If the lockref_put_return() failed due to the lock being held
633 * by somebody else, the fast path has failed. We will need to
634 * get the lock, and then check the count again.
636 if (unlikely(ret < 0)) {
637 spin_lock(&dentry->d_lock);
638 if (dentry->d_lockref.count > 1) {
639 dentry->d_lockref.count--;
640 spin_unlock(&dentry->d_lock);
647 * If we weren't the last ref, we're done.
653 * Careful, careful. The reference count went down
654 * to zero, but we don't hold the dentry lock, so
655 * somebody else could get it again, and do another
656 * dput(), and we need to not race with that.
658 * However, there is a very special and common case
659 * where we don't care, because there is nothing to
660 * do: the dentry is still hashed, it does not have
661 * a 'delete' op, and it's referenced and already on
664 * NOTE! Since we aren't locked, these values are
665 * not "stable". However, it is sufficient that at
666 * some point after we dropped the reference the
667 * dentry was hashed and the flags had the proper
668 * value. Other dentry users may have re-gotten
669 * a reference to the dentry and change that, but
670 * our work is done - we can leave the dentry
671 * around with a zero refcount.
674 d_flags = ACCESS_ONCE(dentry->d_flags);
675 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
677 /* Nothing to do? Dropping the reference was all we needed? */
678 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
682 * Not the fast normal case? Get the lock. We've already decremented
683 * the refcount, but we'll need to re-check the situation after
686 spin_lock(&dentry->d_lock);
689 * Did somebody else grab a reference to it in the meantime, and
690 * we're no longer the last user after all? Alternatively, somebody
691 * else could have killed it and marked it dead. Either way, we
692 * don't need to do anything else.
694 if (dentry->d_lockref.count) {
695 spin_unlock(&dentry->d_lock);
700 * Re-get the reference we optimistically dropped. We hold the
701 * lock, and we just tested that it was zero, so we can just
704 dentry->d_lockref.count = 1;
712 * This is complicated by the fact that we do not want to put
713 * dentries that are no longer on any hash chain on the unused
714 * list: we'd much rather just get rid of them immediately.
716 * However, that implies that we have to traverse the dentry
717 * tree upwards to the parents which might _also_ now be
718 * scheduled for deletion (it may have been only waiting for
719 * its last child to go away).
721 * This tail recursion is done by hand as we don't want to depend
722 * on the compiler to always get this right (gcc generally doesn't).
723 * Real recursion would eat up our stack space.
727 * dput - release a dentry
728 * @dentry: dentry to release
730 * Release a dentry. This will drop the usage count and if appropriate
731 * call the dentry unlink method as well as removing it from the queues and
732 * releasing its resources. If the parent dentries were scheduled for release
733 * they too may now get deleted.
735 void dput(struct dentry *dentry)
737 if (unlikely(!dentry))
742 if (likely(fast_dput(dentry))) {
747 /* Slow case: now with the dentry lock held */
750 WARN_ON(d_in_lookup(dentry));
752 /* Unreachable? Get rid of it */
753 if (unlikely(d_unhashed(dentry)))
756 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
759 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
760 if (dentry->d_op->d_delete(dentry))
764 if (!(dentry->d_flags & DCACHE_REFERENCED))
765 dentry->d_flags |= DCACHE_REFERENCED;
766 dentry_lru_add(dentry);
768 dentry->d_lockref.count--;
769 spin_unlock(&dentry->d_lock);
773 dentry = dentry_kill(dentry);
780 /* This must be called with d_lock held */
781 static inline void __dget_dlock(struct dentry *dentry)
783 dentry->d_lockref.count++;
786 static inline void __dget(struct dentry *dentry)
788 lockref_get(&dentry->d_lockref);
791 struct dentry *dget_parent(struct dentry *dentry)
797 * Do optimistic parent lookup without any
801 ret = ACCESS_ONCE(dentry->d_parent);
802 gotref = lockref_get_not_zero(&ret->d_lockref);
804 if (likely(gotref)) {
805 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
812 * Don't need rcu_dereference because we re-check it was correct under
816 ret = dentry->d_parent;
817 spin_lock(&ret->d_lock);
818 if (unlikely(ret != dentry->d_parent)) {
819 spin_unlock(&ret->d_lock);
824 BUG_ON(!ret->d_lockref.count);
825 ret->d_lockref.count++;
826 spin_unlock(&ret->d_lock);
829 EXPORT_SYMBOL(dget_parent);
832 * d_find_alias - grab a hashed alias of inode
833 * @inode: inode in question
835 * If inode has a hashed alias, or is a directory and has any alias,
836 * acquire the reference to alias and return it. Otherwise return NULL.
837 * Notice that if inode is a directory there can be only one alias and
838 * it can be unhashed only if it has no children, or if it is the root
839 * of a filesystem, or if the directory was renamed and d_revalidate
840 * was the first vfs operation to notice.
842 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
843 * any other hashed alias over that one.
845 static struct dentry *__d_find_alias(struct inode *inode)
847 struct dentry *alias, *discon_alias;
851 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
852 spin_lock(&alias->d_lock);
853 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
854 if (IS_ROOT(alias) &&
855 (alias->d_flags & DCACHE_DISCONNECTED)) {
856 discon_alias = alias;
859 spin_unlock(&alias->d_lock);
863 spin_unlock(&alias->d_lock);
866 alias = discon_alias;
867 spin_lock(&alias->d_lock);
868 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
870 spin_unlock(&alias->d_lock);
873 spin_unlock(&alias->d_lock);
879 struct dentry *d_find_alias(struct inode *inode)
881 struct dentry *de = NULL;
883 if (!hlist_empty(&inode->i_dentry)) {
884 spin_lock(&inode->i_lock);
885 de = __d_find_alias(inode);
886 spin_unlock(&inode->i_lock);
890 EXPORT_SYMBOL(d_find_alias);
893 * Try to kill dentries associated with this inode.
894 * WARNING: you must own a reference to inode.
896 void d_prune_aliases(struct inode *inode)
898 struct dentry *dentry;
900 spin_lock(&inode->i_lock);
901 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
902 spin_lock(&dentry->d_lock);
903 if (!dentry->d_lockref.count) {
904 struct dentry *parent = lock_parent(dentry);
905 if (likely(!dentry->d_lockref.count)) {
906 __dentry_kill(dentry);
911 spin_unlock(&parent->d_lock);
913 spin_unlock(&dentry->d_lock);
915 spin_unlock(&inode->i_lock);
917 EXPORT_SYMBOL(d_prune_aliases);
919 static void shrink_dentry_list(struct list_head *list)
921 struct dentry *dentry, *parent;
923 while (!list_empty(list)) {
925 dentry = list_entry(list->prev, struct dentry, d_lru);
926 spin_lock(&dentry->d_lock);
927 parent = lock_parent(dentry);
930 * The dispose list is isolated and dentries are not accounted
931 * to the LRU here, so we can simply remove it from the list
932 * here regardless of whether it is referenced or not.
934 d_shrink_del(dentry);
937 * We found an inuse dentry which was not removed from
938 * the LRU because of laziness during lookup. Do not free it.
940 if (dentry->d_lockref.count > 0) {
941 spin_unlock(&dentry->d_lock);
943 spin_unlock(&parent->d_lock);
948 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
949 bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
950 spin_unlock(&dentry->d_lock);
952 spin_unlock(&parent->d_lock);
958 inode = dentry->d_inode;
959 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
960 d_shrink_add(dentry, list);
961 spin_unlock(&dentry->d_lock);
963 spin_unlock(&parent->d_lock);
967 __dentry_kill(dentry);
970 * We need to prune ancestors too. This is necessary to prevent
971 * quadratic behavior of shrink_dcache_parent(), but is also
972 * expected to be beneficial in reducing dentry cache
976 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
977 parent = lock_parent(dentry);
978 if (dentry->d_lockref.count != 1) {
979 dentry->d_lockref.count--;
980 spin_unlock(&dentry->d_lock);
982 spin_unlock(&parent->d_lock);
985 inode = dentry->d_inode; /* can't be NULL */
986 if (unlikely(!spin_trylock(&inode->i_lock))) {
987 spin_unlock(&dentry->d_lock);
989 spin_unlock(&parent->d_lock);
993 __dentry_kill(dentry);
999 static enum lru_status dentry_lru_isolate(struct list_head *item,
1000 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1002 struct list_head *freeable = arg;
1003 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1007 * we are inverting the lru lock/dentry->d_lock here,
1008 * so use a trylock. If we fail to get the lock, just skip
1011 if (!spin_trylock(&dentry->d_lock))
1015 * Referenced dentries are still in use. If they have active
1016 * counts, just remove them from the LRU. Otherwise give them
1017 * another pass through the LRU.
1019 if (dentry->d_lockref.count) {
1020 d_lru_isolate(lru, dentry);
1021 spin_unlock(&dentry->d_lock);
1025 if (dentry->d_flags & DCACHE_REFERENCED) {
1026 dentry->d_flags &= ~DCACHE_REFERENCED;
1027 spin_unlock(&dentry->d_lock);
1030 * The list move itself will be made by the common LRU code. At
1031 * this point, we've dropped the dentry->d_lock but keep the
1032 * lru lock. This is safe to do, since every list movement is
1033 * protected by the lru lock even if both locks are held.
1035 * This is guaranteed by the fact that all LRU management
1036 * functions are intermediated by the LRU API calls like
1037 * list_lru_add and list_lru_del. List movement in this file
1038 * only ever occur through this functions or through callbacks
1039 * like this one, that are called from the LRU API.
1041 * The only exceptions to this are functions like
1042 * shrink_dentry_list, and code that first checks for the
1043 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
1044 * operating only with stack provided lists after they are
1045 * properly isolated from the main list. It is thus, always a
1051 d_lru_shrink_move(lru, dentry, freeable);
1052 spin_unlock(&dentry->d_lock);
1058 * prune_dcache_sb - shrink the dcache
1060 * @sc: shrink control, passed to list_lru_shrink_walk()
1062 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1063 * is done when we need more memory and called from the superblock shrinker
1066 * This function may fail to free any resources if all the dentries are in
1069 long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1074 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1075 dentry_lru_isolate, &dispose);
1076 shrink_dentry_list(&dispose);
1080 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1081 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1083 struct list_head *freeable = arg;
1084 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1087 * we are inverting the lru lock/dentry->d_lock here,
1088 * so use a trylock. If we fail to get the lock, just skip
1091 if (!spin_trylock(&dentry->d_lock))
1094 d_lru_shrink_move(lru, dentry, freeable);
1095 spin_unlock(&dentry->d_lock);
1102 * shrink_dcache_sb - shrink dcache for a superblock
1105 * Shrink the dcache for the specified super block. This is used to free
1106 * the dcache before unmounting a file system.
1108 void shrink_dcache_sb(struct super_block *sb)
1115 freed = list_lru_walk(&sb->s_dentry_lru,
1116 dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1118 this_cpu_sub(nr_dentry_unused, freed);
1119 shrink_dentry_list(&dispose);
1120 } while (freed > 0);
1122 EXPORT_SYMBOL(shrink_dcache_sb);
1125 * enum d_walk_ret - action to talke during tree walk
1126 * @D_WALK_CONTINUE: contrinue walk
1127 * @D_WALK_QUIT: quit walk
1128 * @D_WALK_NORETRY: quit when retry is needed
1129 * @D_WALK_SKIP: skip this dentry and its children
1139 * d_walk - walk the dentry tree
1140 * @parent: start of walk
1141 * @data: data passed to @enter() and @finish()
1142 * @enter: callback when first entering the dentry
1143 * @finish: callback when successfully finished the walk
1145 * The @enter() and @finish() callbacks are called with d_lock held.
1147 static void d_walk(struct dentry *parent, void *data,
1148 enum d_walk_ret (*enter)(void *, struct dentry *),
1149 void (*finish)(void *))
1151 struct dentry *this_parent;
1152 struct list_head *next;
1154 enum d_walk_ret ret;
1158 read_seqbegin_or_lock(&rename_lock, &seq);
1159 this_parent = parent;
1160 spin_lock(&this_parent->d_lock);
1162 ret = enter(data, this_parent);
1164 case D_WALK_CONTINUE:
1169 case D_WALK_NORETRY:
1174 next = this_parent->d_subdirs.next;
1176 while (next != &this_parent->d_subdirs) {
1177 struct list_head *tmp = next;
1178 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1181 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1183 ret = enter(data, dentry);
1185 case D_WALK_CONTINUE:
1188 spin_unlock(&dentry->d_lock);
1190 case D_WALK_NORETRY:
1194 spin_unlock(&dentry->d_lock);
1198 if (!list_empty(&dentry->d_subdirs)) {
1199 spin_unlock(&this_parent->d_lock);
1200 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1201 this_parent = dentry;
1202 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1205 spin_unlock(&dentry->d_lock);
1208 * All done at this level ... ascend and resume the search.
1212 if (this_parent != parent) {
1213 struct dentry *child = this_parent;
1214 this_parent = child->d_parent;
1216 spin_unlock(&child->d_lock);
1217 spin_lock(&this_parent->d_lock);
1219 /* might go back up the wrong parent if we have had a rename. */
1220 if (need_seqretry(&rename_lock, seq))
1222 /* go into the first sibling still alive */
1224 next = child->d_child.next;
1225 if (next == &this_parent->d_subdirs)
1227 child = list_entry(next, struct dentry, d_child);
1228 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1232 if (need_seqretry(&rename_lock, seq))
1239 spin_unlock(&this_parent->d_lock);
1240 done_seqretry(&rename_lock, seq);
1244 spin_unlock(&this_parent->d_lock);
1254 * Search for at least 1 mount point in the dentry's subdirs.
1255 * We descend to the next level whenever the d_subdirs
1256 * list is non-empty and continue searching.
1259 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1262 if (d_mountpoint(dentry)) {
1266 return D_WALK_CONTINUE;
1270 * have_submounts - check for mounts over a dentry
1271 * @parent: dentry to check.
1273 * Return true if the parent or its subdirectories contain
1276 int have_submounts(struct dentry *parent)
1280 d_walk(parent, &ret, check_mount, NULL);
1284 EXPORT_SYMBOL(have_submounts);
1287 * Called by mount code to set a mountpoint and check if the mountpoint is
1288 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1289 * subtree can become unreachable).
1291 * Only one of d_invalidate() and d_set_mounted() must succeed. For
1292 * this reason take rename_lock and d_lock on dentry and ancestors.
1294 int d_set_mounted(struct dentry *dentry)
1298 write_seqlock(&rename_lock);
1299 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1300 /* Need exclusion wrt. d_invalidate() */
1301 spin_lock(&p->d_lock);
1302 if (unlikely(d_unhashed(p))) {
1303 spin_unlock(&p->d_lock);
1306 spin_unlock(&p->d_lock);
1308 spin_lock(&dentry->d_lock);
1309 if (!d_unlinked(dentry)) {
1310 dentry->d_flags |= DCACHE_MOUNTED;
1313 spin_unlock(&dentry->d_lock);
1315 write_sequnlock(&rename_lock);
1320 * Search the dentry child list of the specified parent,
1321 * and move any unused dentries to the end of the unused
1322 * list for prune_dcache(). We descend to the next level
1323 * whenever the d_subdirs list is non-empty and continue
1326 * It returns zero iff there are no unused children,
1327 * otherwise it returns the number of children moved to
1328 * the end of the unused list. This may not be the total
1329 * number of unused children, because select_parent can
1330 * drop the lock and return early due to latency
1334 struct select_data {
1335 struct dentry *start;
1336 struct list_head dispose;
1340 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1342 struct select_data *data = _data;
1343 enum d_walk_ret ret = D_WALK_CONTINUE;
1345 if (data->start == dentry)
1348 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1351 if (dentry->d_flags & DCACHE_LRU_LIST)
1353 if (!dentry->d_lockref.count) {
1354 d_shrink_add(dentry, &data->dispose);
1359 * We can return to the caller if we have found some (this
1360 * ensures forward progress). We'll be coming back to find
1363 if (!list_empty(&data->dispose))
1364 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1370 * shrink_dcache_parent - prune dcache
1371 * @parent: parent of entries to prune
1373 * Prune the dcache to remove unused children of the parent dentry.
1375 void shrink_dcache_parent(struct dentry *parent)
1378 struct select_data data;
1380 INIT_LIST_HEAD(&data.dispose);
1381 data.start = parent;
1384 d_walk(parent, &data, select_collect, NULL);
1388 shrink_dentry_list(&data.dispose);
1392 EXPORT_SYMBOL(shrink_dcache_parent);
1394 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1396 /* it has busy descendents; complain about those instead */
1397 if (!list_empty(&dentry->d_subdirs))
1398 return D_WALK_CONTINUE;
1400 /* root with refcount 1 is fine */
1401 if (dentry == _data && dentry->d_lockref.count == 1)
1402 return D_WALK_CONTINUE;
1404 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1405 " still in use (%d) [unmount of %s %s]\n",
1408 dentry->d_inode->i_ino : 0UL,
1410 dentry->d_lockref.count,
1411 dentry->d_sb->s_type->name,
1412 dentry->d_sb->s_id);
1414 return D_WALK_CONTINUE;
1417 static void do_one_tree(struct dentry *dentry)
1419 shrink_dcache_parent(dentry);
1420 d_walk(dentry, dentry, umount_check, NULL);
1426 * destroy the dentries attached to a superblock on unmounting
1428 void shrink_dcache_for_umount(struct super_block *sb)
1430 struct dentry *dentry;
1432 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1434 dentry = sb->s_root;
1436 do_one_tree(dentry);
1438 while (!hlist_bl_empty(&sb->s_anon)) {
1439 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1440 do_one_tree(dentry);
1444 struct detach_data {
1445 struct select_data select;
1446 struct dentry *mountpoint;
1448 static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1450 struct detach_data *data = _data;
1452 if (d_mountpoint(dentry)) {
1453 __dget_dlock(dentry);
1454 data->mountpoint = dentry;
1458 return select_collect(&data->select, dentry);
1461 static void check_and_drop(void *_data)
1463 struct detach_data *data = _data;
1465 if (!data->mountpoint && !data->select.found)
1466 __d_drop(data->select.start);
1470 * d_invalidate - detach submounts, prune dcache, and drop
1471 * @dentry: dentry to invalidate (aka detach, prune and drop)
1475 * The final d_drop is done as an atomic operation relative to
1476 * rename_lock ensuring there are no races with d_set_mounted. This
1477 * ensures there are no unhashed dentries on the path to a mountpoint.
1479 void d_invalidate(struct dentry *dentry)
1482 * If it's already been dropped, return OK.
1484 spin_lock(&dentry->d_lock);
1485 if (d_unhashed(dentry)) {
1486 spin_unlock(&dentry->d_lock);
1489 spin_unlock(&dentry->d_lock);
1491 /* Negative dentries can be dropped without further checks */
1492 if (!dentry->d_inode) {
1498 struct detach_data data;
1500 data.mountpoint = NULL;
1501 INIT_LIST_HEAD(&data.select.dispose);
1502 data.select.start = dentry;
1503 data.select.found = 0;
1505 d_walk(dentry, &data, detach_and_collect, check_and_drop);
1507 if (data.select.found)
1508 shrink_dentry_list(&data.select.dispose);
1510 if (data.mountpoint) {
1511 detach_mounts(data.mountpoint);
1512 dput(data.mountpoint);
1515 if (!data.mountpoint && !data.select.found)
1521 EXPORT_SYMBOL(d_invalidate);
1524 * __d_alloc - allocate a dcache entry
1525 * @sb: filesystem it will belong to
1526 * @name: qstr of the name
1528 * Allocates a dentry. It returns %NULL if there is insufficient memory
1529 * available. On a success the dentry is returned. The name passed in is
1530 * copied and the copy passed in may be reused after this call.
1533 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1535 struct dentry *dentry;
1538 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1543 * We guarantee that the inline name is always NUL-terminated.
1544 * This way the memcpy() done by the name switching in rename
1545 * will still always have a NUL at the end, even if we might
1546 * be overwriting an internal NUL character
1548 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1549 if (unlikely(!name)) {
1550 static const struct qstr anon = QSTR_INIT("/", 1);
1552 dname = dentry->d_iname;
1553 } else if (name->len > DNAME_INLINE_LEN-1) {
1554 size_t size = offsetof(struct external_name, name[1]);
1555 struct external_name *p = kmalloc(size + name->len,
1556 GFP_KERNEL_ACCOUNT);
1558 kmem_cache_free(dentry_cache, dentry);
1561 atomic_set(&p->u.count, 1);
1563 if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
1564 kasan_unpoison_shadow(dname,
1565 round_up(name->len + 1, sizeof(unsigned long)));
1567 dname = dentry->d_iname;
1570 dentry->d_name.len = name->len;
1571 dentry->d_name.hash = name->hash;
1572 memcpy(dname, name->name, name->len);
1573 dname[name->len] = 0;
1575 /* Make sure we always see the terminating NUL character */
1577 dentry->d_name.name = dname;
1579 dentry->d_lockref.count = 1;
1580 dentry->d_flags = 0;
1581 spin_lock_init(&dentry->d_lock);
1582 seqcount_init(&dentry->d_seq);
1583 dentry->d_inode = NULL;
1584 dentry->d_parent = dentry;
1586 dentry->d_op = NULL;
1587 dentry->d_fsdata = NULL;
1588 INIT_HLIST_BL_NODE(&dentry->d_hash);
1589 INIT_LIST_HEAD(&dentry->d_lru);
1590 INIT_LIST_HEAD(&dentry->d_subdirs);
1591 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1592 INIT_LIST_HEAD(&dentry->d_child);
1593 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1595 this_cpu_inc(nr_dentry);
1601 * d_alloc - allocate a dcache entry
1602 * @parent: parent of entry to allocate
1603 * @name: qstr of the name
1605 * Allocates a dentry. It returns %NULL if there is insufficient memory
1606 * available. On a success the dentry is returned. The name passed in is
1607 * copied and the copy passed in may be reused after this call.
1609 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1611 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1615 spin_lock(&parent->d_lock);
1617 * don't need child lock because it is not subject
1618 * to concurrency here
1620 __dget_dlock(parent);
1621 dentry->d_parent = parent;
1622 list_add(&dentry->d_child, &parent->d_subdirs);
1623 spin_unlock(&parent->d_lock);
1627 EXPORT_SYMBOL(d_alloc);
1630 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1631 * @sb: the superblock
1632 * @name: qstr of the name
1634 * For a filesystem that just pins its dentries in memory and never
1635 * performs lookups at all, return an unhashed IS_ROOT dentry.
1637 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1639 return __d_alloc(sb, name);
1641 EXPORT_SYMBOL(d_alloc_pseudo);
1643 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1648 q.hash_len = hashlen_string(name);
1649 return d_alloc(parent, &q);
1651 EXPORT_SYMBOL(d_alloc_name);
1653 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1655 WARN_ON_ONCE(dentry->d_op);
1656 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1658 DCACHE_OP_REVALIDATE |
1659 DCACHE_OP_WEAK_REVALIDATE |
1661 DCACHE_OP_SELECT_INODE |
1667 dentry->d_flags |= DCACHE_OP_HASH;
1669 dentry->d_flags |= DCACHE_OP_COMPARE;
1670 if (op->d_revalidate)
1671 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1672 if (op->d_weak_revalidate)
1673 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1675 dentry->d_flags |= DCACHE_OP_DELETE;
1677 dentry->d_flags |= DCACHE_OP_PRUNE;
1678 if (op->d_select_inode)
1679 dentry->d_flags |= DCACHE_OP_SELECT_INODE;
1681 dentry->d_flags |= DCACHE_OP_REAL;
1684 EXPORT_SYMBOL(d_set_d_op);
1688 * d_set_fallthru - Mark a dentry as falling through to a lower layer
1689 * @dentry - The dentry to mark
1691 * Mark a dentry as falling through to the lower layer (as set with
1692 * d_pin_lower()). This flag may be recorded on the medium.
1694 void d_set_fallthru(struct dentry *dentry)
1696 spin_lock(&dentry->d_lock);
1697 dentry->d_flags |= DCACHE_FALLTHRU;
1698 spin_unlock(&dentry->d_lock);
1700 EXPORT_SYMBOL(d_set_fallthru);
1702 static unsigned d_flags_for_inode(struct inode *inode)
1704 unsigned add_flags = DCACHE_REGULAR_TYPE;
1707 return DCACHE_MISS_TYPE;
1709 if (S_ISDIR(inode->i_mode)) {
1710 add_flags = DCACHE_DIRECTORY_TYPE;
1711 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1712 if (unlikely(!inode->i_op->lookup))
1713 add_flags = DCACHE_AUTODIR_TYPE;
1715 inode->i_opflags |= IOP_LOOKUP;
1717 goto type_determined;
1720 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1721 if (unlikely(inode->i_op->get_link)) {
1722 add_flags = DCACHE_SYMLINK_TYPE;
1723 goto type_determined;
1725 inode->i_opflags |= IOP_NOFOLLOW;
1728 if (unlikely(!S_ISREG(inode->i_mode)))
1729 add_flags = DCACHE_SPECIAL_TYPE;
1732 if (unlikely(IS_AUTOMOUNT(inode)))
1733 add_flags |= DCACHE_NEED_AUTOMOUNT;
1737 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1739 unsigned add_flags = d_flags_for_inode(inode);
1740 WARN_ON(d_in_lookup(dentry));
1742 spin_lock(&dentry->d_lock);
1743 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1744 raw_write_seqcount_begin(&dentry->d_seq);
1745 __d_set_inode_and_type(dentry, inode, add_flags);
1746 raw_write_seqcount_end(&dentry->d_seq);
1747 fsnotify_update_flags(dentry);
1748 spin_unlock(&dentry->d_lock);
1752 * d_instantiate - fill in inode information for a dentry
1753 * @entry: dentry to complete
1754 * @inode: inode to attach to this dentry
1756 * Fill in inode information in the entry.
1758 * This turns negative dentries into productive full members
1761 * NOTE! This assumes that the inode count has been incremented
1762 * (or otherwise set) by the caller to indicate that it is now
1763 * in use by the dcache.
1766 void d_instantiate(struct dentry *entry, struct inode * inode)
1768 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1770 security_d_instantiate(entry, inode);
1771 spin_lock(&inode->i_lock);
1772 __d_instantiate(entry, inode);
1773 spin_unlock(&inode->i_lock);
1776 EXPORT_SYMBOL(d_instantiate);
1779 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1780 * @entry: dentry to complete
1781 * @inode: inode to attach to this dentry
1783 * Fill in inode information in the entry. If a directory alias is found, then
1784 * return an error (and drop inode). Together with d_materialise_unique() this
1785 * guarantees that a directory inode may never have more than one alias.
1787 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1789 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1791 security_d_instantiate(entry, inode);
1792 spin_lock(&inode->i_lock);
1793 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1794 spin_unlock(&inode->i_lock);
1798 __d_instantiate(entry, inode);
1799 spin_unlock(&inode->i_lock);
1803 EXPORT_SYMBOL(d_instantiate_no_diralias);
1805 struct dentry *d_make_root(struct inode *root_inode)
1807 struct dentry *res = NULL;
1810 res = __d_alloc(root_inode->i_sb, NULL);
1812 d_instantiate(res, root_inode);
1818 EXPORT_SYMBOL(d_make_root);
1820 static struct dentry * __d_find_any_alias(struct inode *inode)
1822 struct dentry *alias;
1824 if (hlist_empty(&inode->i_dentry))
1826 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1832 * d_find_any_alias - find any alias for a given inode
1833 * @inode: inode to find an alias for
1835 * If any aliases exist for the given inode, take and return a
1836 * reference for one of them. If no aliases exist, return %NULL.
1838 struct dentry *d_find_any_alias(struct inode *inode)
1842 spin_lock(&inode->i_lock);
1843 de = __d_find_any_alias(inode);
1844 spin_unlock(&inode->i_lock);
1847 EXPORT_SYMBOL(d_find_any_alias);
1849 static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
1856 return ERR_PTR(-ESTALE);
1858 return ERR_CAST(inode);
1860 res = d_find_any_alias(inode);
1864 tmp = __d_alloc(inode->i_sb, NULL);
1866 res = ERR_PTR(-ENOMEM);
1870 security_d_instantiate(tmp, inode);
1871 spin_lock(&inode->i_lock);
1872 res = __d_find_any_alias(inode);
1874 spin_unlock(&inode->i_lock);
1879 /* attach a disconnected dentry */
1880 add_flags = d_flags_for_inode(inode);
1883 add_flags |= DCACHE_DISCONNECTED;
1885 spin_lock(&tmp->d_lock);
1886 __d_set_inode_and_type(tmp, inode, add_flags);
1887 hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1888 hlist_bl_lock(&tmp->d_sb->s_anon);
1889 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1890 hlist_bl_unlock(&tmp->d_sb->s_anon);
1891 spin_unlock(&tmp->d_lock);
1892 spin_unlock(&inode->i_lock);
1902 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1903 * @inode: inode to allocate the dentry for
1905 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1906 * similar open by handle operations. The returned dentry may be anonymous,
1907 * or may have a full name (if the inode was already in the cache).
1909 * When called on a directory inode, we must ensure that the inode only ever
1910 * has one dentry. If a dentry is found, that is returned instead of
1911 * allocating a new one.
1913 * On successful return, the reference to the inode has been transferred
1914 * to the dentry. In case of an error the reference on the inode is released.
1915 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1916 * be passed in and the error will be propagated to the return value,
1917 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1919 struct dentry *d_obtain_alias(struct inode *inode)
1921 return __d_obtain_alias(inode, 1);
1923 EXPORT_SYMBOL(d_obtain_alias);
1926 * d_obtain_root - find or allocate a dentry for a given inode
1927 * @inode: inode to allocate the dentry for
1929 * Obtain an IS_ROOT dentry for the root of a filesystem.
1931 * We must ensure that directory inodes only ever have one dentry. If a
1932 * dentry is found, that is returned instead of allocating a new one.
1934 * On successful return, the reference to the inode has been transferred
1935 * to the dentry. In case of an error the reference on the inode is
1936 * released. A %NULL or IS_ERR inode may be passed in and will be the
1937 * error will be propagate to the return value, with a %NULL @inode
1938 * replaced by ERR_PTR(-ESTALE).
1940 struct dentry *d_obtain_root(struct inode *inode)
1942 return __d_obtain_alias(inode, 0);
1944 EXPORT_SYMBOL(d_obtain_root);
1947 * d_add_ci - lookup or allocate new dentry with case-exact name
1948 * @inode: the inode case-insensitive lookup has found
1949 * @dentry: the negative dentry that was passed to the parent's lookup func
1950 * @name: the case-exact name to be associated with the returned dentry
1952 * This is to avoid filling the dcache with case-insensitive names to the
1953 * same inode, only the actual correct case is stored in the dcache for
1954 * case-insensitive filesystems.
1956 * For a case-insensitive lookup match and if the the case-exact dentry
1957 * already exists in in the dcache, use it and return it.
1959 * If no entry exists with the exact case name, allocate new dentry with
1960 * the exact case, and return the spliced entry.
1962 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1965 struct dentry *found, *res;
1968 * First check if a dentry matching the name already exists,
1969 * if not go ahead and create it now.
1971 found = d_hash_and_lookup(dentry->d_parent, name);
1976 if (d_in_lookup(dentry)) {
1977 found = d_alloc_parallel(dentry->d_parent, name,
1979 if (IS_ERR(found) || !d_in_lookup(found)) {
1984 found = d_alloc(dentry->d_parent, name);
1987 return ERR_PTR(-ENOMEM);
1990 res = d_splice_alias(inode, found);
1997 EXPORT_SYMBOL(d_add_ci);
2000 * Do the slow-case of the dentry name compare.
2002 * Unlike the dentry_cmp() function, we need to atomically
2003 * load the name and length information, so that the
2004 * filesystem can rely on them, and can use the 'name' and
2005 * 'len' information without worrying about walking off the
2006 * end of memory etc.
2008 * Thus the read_seqcount_retry() and the "duplicate" info
2009 * in arguments (the low-level filesystem should not look
2010 * at the dentry inode or name contents directly, since
2011 * rename can change them while we're in RCU mode).
2013 enum slow_d_compare {
2019 static noinline enum slow_d_compare slow_dentry_cmp(
2020 const struct dentry *parent,
2021 struct dentry *dentry,
2023 const struct qstr *name)
2025 int tlen = dentry->d_name.len;
2026 const char *tname = dentry->d_name.name;
2028 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2030 return D_COMP_SEQRETRY;
2032 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2033 return D_COMP_NOMATCH;
2038 * __d_lookup_rcu - search for a dentry (racy, store-free)
2039 * @parent: parent dentry
2040 * @name: qstr of name we wish to find
2041 * @seqp: returns d_seq value at the point where the dentry was found
2042 * Returns: dentry, or NULL
2044 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2045 * resolution (store-free path walking) design described in
2046 * Documentation/filesystems/path-lookup.txt.
2048 * This is not to be used outside core vfs.
2050 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2051 * held, and rcu_read_lock held. The returned dentry must not be stored into
2052 * without taking d_lock and checking d_seq sequence count against @seq
2055 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2058 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2059 * the returned dentry, so long as its parent's seqlock is checked after the
2060 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2061 * is formed, giving integrity down the path walk.
2063 * NOTE! The caller *has* to check the resulting dentry against the sequence
2064 * number we've returned before using any of the resulting dentry state!
2066 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2067 const struct qstr *name,
2070 u64 hashlen = name->hash_len;
2071 const unsigned char *str = name->name;
2072 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
2073 struct hlist_bl_node *node;
2074 struct dentry *dentry;
2077 * Note: There is significant duplication with __d_lookup_rcu which is
2078 * required to prevent single threaded performance regressions
2079 * especially on architectures where smp_rmb (in seqcounts) are costly.
2080 * Keep the two functions in sync.
2084 * The hash list is protected using RCU.
2086 * Carefully use d_seq when comparing a candidate dentry, to avoid
2087 * races with d_move().
2089 * It is possible that concurrent renames can mess up our list
2090 * walk here and result in missing our dentry, resulting in the
2091 * false-negative result. d_lookup() protects against concurrent
2092 * renames using rename_lock seqlock.
2094 * See Documentation/filesystems/path-lookup.txt for more details.
2096 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2101 * The dentry sequence count protects us from concurrent
2102 * renames, and thus protects parent and name fields.
2104 * The caller must perform a seqcount check in order
2105 * to do anything useful with the returned dentry.
2107 * NOTE! We do a "raw" seqcount_begin here. That means that
2108 * we don't wait for the sequence count to stabilize if it
2109 * is in the middle of a sequence change. If we do the slow
2110 * dentry compare, we will do seqretries until it is stable,
2111 * and if we end up with a successful lookup, we actually
2112 * want to exit RCU lookup anyway.
2114 seq = raw_seqcount_begin(&dentry->d_seq);
2115 if (dentry->d_parent != parent)
2117 if (d_unhashed(dentry))
2120 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2121 if (dentry->d_name.hash != hashlen_hash(hashlen))
2124 switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2127 case D_COMP_NOMATCH:
2134 if (dentry->d_name.hash_len != hashlen)
2137 if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2144 * d_lookup - search for a dentry
2145 * @parent: parent dentry
2146 * @name: qstr of name we wish to find
2147 * Returns: dentry, or NULL
2149 * d_lookup searches the children of the parent dentry for the name in
2150 * question. If the dentry is found its reference count is incremented and the
2151 * dentry is returned. The caller must use dput to free the entry when it has
2152 * finished using it. %NULL is returned if the dentry does not exist.
2154 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2156 struct dentry *dentry;
2160 seq = read_seqbegin(&rename_lock);
2161 dentry = __d_lookup(parent, name);
2164 } while (read_seqretry(&rename_lock, seq));
2167 EXPORT_SYMBOL(d_lookup);
2170 * __d_lookup - search for a dentry (racy)
2171 * @parent: parent dentry
2172 * @name: qstr of name we wish to find
2173 * Returns: dentry, or NULL
2175 * __d_lookup is like d_lookup, however it may (rarely) return a
2176 * false-negative result due to unrelated rename activity.
2178 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2179 * however it must be used carefully, eg. with a following d_lookup in
2180 * the case of failure.
2182 * __d_lookup callers must be commented.
2184 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2186 unsigned int len = name->len;
2187 unsigned int hash = name->hash;
2188 const unsigned char *str = name->name;
2189 struct hlist_bl_head *b = d_hash(parent, hash);
2190 struct hlist_bl_node *node;
2191 struct dentry *found = NULL;
2192 struct dentry *dentry;
2195 * Note: There is significant duplication with __d_lookup_rcu which is
2196 * required to prevent single threaded performance regressions
2197 * especially on architectures where smp_rmb (in seqcounts) are costly.
2198 * Keep the two functions in sync.
2202 * The hash list is protected using RCU.
2204 * Take d_lock when comparing a candidate dentry, to avoid races
2207 * It is possible that concurrent renames can mess up our list
2208 * walk here and result in missing our dentry, resulting in the
2209 * false-negative result. d_lookup() protects against concurrent
2210 * renames using rename_lock seqlock.
2212 * See Documentation/filesystems/path-lookup.txt for more details.
2216 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2218 if (dentry->d_name.hash != hash)
2221 spin_lock(&dentry->d_lock);
2222 if (dentry->d_parent != parent)
2224 if (d_unhashed(dentry))
2228 * It is safe to compare names since d_move() cannot
2229 * change the qstr (protected by d_lock).
2231 if (parent->d_flags & DCACHE_OP_COMPARE) {
2232 int tlen = dentry->d_name.len;
2233 const char *tname = dentry->d_name.name;
2234 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2237 if (dentry->d_name.len != len)
2239 if (dentry_cmp(dentry, str, len))
2243 dentry->d_lockref.count++;
2245 spin_unlock(&dentry->d_lock);
2248 spin_unlock(&dentry->d_lock);
2256 * d_hash_and_lookup - hash the qstr then search for a dentry
2257 * @dir: Directory to search in
2258 * @name: qstr of name we wish to find
2260 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2262 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2265 * Check for a fs-specific hash function. Note that we must
2266 * calculate the standard hash first, as the d_op->d_hash()
2267 * routine may choose to leave the hash value unchanged.
2269 name->hash = full_name_hash(name->name, name->len);
2270 if (dir->d_flags & DCACHE_OP_HASH) {
2271 int err = dir->d_op->d_hash(dir, name);
2272 if (unlikely(err < 0))
2273 return ERR_PTR(err);
2275 return d_lookup(dir, name);
2277 EXPORT_SYMBOL(d_hash_and_lookup);
2280 * When a file is deleted, we have two options:
2281 * - turn this dentry into a negative dentry
2282 * - unhash this dentry and free it.
2284 * Usually, we want to just turn this into
2285 * a negative dentry, but if anybody else is
2286 * currently using the dentry or the inode
2287 * we can't do that and we fall back on removing
2288 * it from the hash queues and waiting for
2289 * it to be deleted later when it has no users
2293 * d_delete - delete a dentry
2294 * @dentry: The dentry to delete
2296 * Turn the dentry into a negative dentry if possible, otherwise
2297 * remove it from the hash queues so it can be deleted later
2300 void d_delete(struct dentry * dentry)
2302 struct inode *inode;
2305 * Are we the only user?
2308 spin_lock(&dentry->d_lock);
2309 inode = dentry->d_inode;
2310 isdir = S_ISDIR(inode->i_mode);
2311 if (dentry->d_lockref.count == 1) {
2312 if (!spin_trylock(&inode->i_lock)) {
2313 spin_unlock(&dentry->d_lock);
2317 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2318 dentry_unlink_inode(dentry);
2319 fsnotify_nameremove(dentry, isdir);
2323 if (!d_unhashed(dentry))
2326 spin_unlock(&dentry->d_lock);
2328 fsnotify_nameremove(dentry, isdir);
2330 EXPORT_SYMBOL(d_delete);
2332 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2334 BUG_ON(!d_unhashed(entry));
2336 entry->d_flags |= DCACHE_RCUACCESS;
2337 hlist_bl_add_head_rcu(&entry->d_hash, b);
2341 static void _d_rehash(struct dentry * entry)
2343 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2347 * d_rehash - add an entry back to the hash
2348 * @entry: dentry to add to the hash
2350 * Adds a dentry to the hash according to its name.
2353 void d_rehash(struct dentry * entry)
2355 spin_lock(&entry->d_lock);
2357 spin_unlock(&entry->d_lock);
2359 EXPORT_SYMBOL(d_rehash);
2361 static inline unsigned start_dir_add(struct inode *dir)
2365 unsigned n = dir->i_dir_seq;
2366 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2372 static inline void end_dir_add(struct inode *dir, unsigned n)
2374 smp_store_release(&dir->i_dir_seq, n + 2);
2377 static void d_wait_lookup(struct dentry *dentry)
2379 if (d_in_lookup(dentry)) {
2380 DECLARE_WAITQUEUE(wait, current);
2381 add_wait_queue(dentry->d_wait, &wait);
2383 set_current_state(TASK_UNINTERRUPTIBLE);
2384 spin_unlock(&dentry->d_lock);
2386 spin_lock(&dentry->d_lock);
2387 } while (d_in_lookup(dentry));
2391 struct dentry *d_alloc_parallel(struct dentry *parent,
2392 const struct qstr *name,
2393 wait_queue_head_t *wq)
2395 unsigned int len = name->len;
2396 unsigned int hash = name->hash;
2397 const unsigned char *str = name->name;
2398 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2399 struct hlist_bl_node *node;
2400 struct dentry *new = d_alloc(parent, name);
2401 struct dentry *dentry;
2402 unsigned seq, r_seq, d_seq;
2405 return ERR_PTR(-ENOMEM);
2409 seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
2410 r_seq = read_seqbegin(&rename_lock);
2411 dentry = __d_lookup_rcu(parent, name, &d_seq);
2412 if (unlikely(dentry)) {
2413 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2417 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2426 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2431 if (unlikely(parent->d_inode->i_dir_seq != seq)) {
2438 * No changes for the parent since the beginning of d_lookup().
2439 * Since all removals from the chain happen with hlist_bl_lock(),
2440 * any potential in-lookup matches are going to stay here until
2441 * we unlock the chain. All fields are stable in everything
2444 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2445 if (dentry->d_name.hash != hash)
2447 if (dentry->d_parent != parent)
2449 if (d_unhashed(dentry))
2451 if (parent->d_flags & DCACHE_OP_COMPARE) {
2452 int tlen = dentry->d_name.len;
2453 const char *tname = dentry->d_name.name;
2454 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2457 if (dentry->d_name.len != len)
2459 if (dentry_cmp(dentry, str, len))
2464 /* somebody is doing lookup for it right now; wait for it */
2465 spin_lock(&dentry->d_lock);
2466 d_wait_lookup(dentry);
2468 * it's not in-lookup anymore; in principle we should repeat
2469 * everything from dcache lookup, but it's likely to be what
2470 * d_lookup() would've found anyway. If it is, just return it;
2471 * otherwise we really have to repeat the whole thing.
2473 if (unlikely(dentry->d_name.hash != hash))
2475 if (unlikely(dentry->d_parent != parent))
2477 if (unlikely(d_unhashed(dentry)))
2479 if (parent->d_flags & DCACHE_OP_COMPARE) {
2480 int tlen = dentry->d_name.len;
2481 const char *tname = dentry->d_name.name;
2482 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2485 if (unlikely(dentry->d_name.len != len))
2487 if (unlikely(dentry_cmp(dentry, str, len)))
2490 /* OK, it *is* a hashed match; return it */
2491 spin_unlock(&dentry->d_lock);
2495 /* we can't take ->d_lock here; it's OK, though. */
2496 new->d_flags |= DCACHE_PAR_LOOKUP;
2498 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2502 spin_unlock(&dentry->d_lock);
2506 EXPORT_SYMBOL(d_alloc_parallel);
2508 void __d_lookup_done(struct dentry *dentry)
2510 struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2511 dentry->d_name.hash);
2513 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2514 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2515 wake_up_all(dentry->d_wait);
2516 dentry->d_wait = NULL;
2518 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2519 INIT_LIST_HEAD(&dentry->d_lru);
2521 EXPORT_SYMBOL(__d_lookup_done);
2523 /* inode->i_lock held if inode is non-NULL */
2525 static inline void __d_add(struct dentry *dentry, struct inode *inode)
2527 struct inode *dir = NULL;
2529 spin_lock(&dentry->d_lock);
2530 if (unlikely(d_in_lookup(dentry))) {
2531 dir = dentry->d_parent->d_inode;
2532 n = start_dir_add(dir);
2533 __d_lookup_done(dentry);
2536 unsigned add_flags = d_flags_for_inode(inode);
2537 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2538 raw_write_seqcount_begin(&dentry->d_seq);
2539 __d_set_inode_and_type(dentry, inode, add_flags);
2540 raw_write_seqcount_end(&dentry->d_seq);
2541 fsnotify_update_flags(dentry);
2545 end_dir_add(dir, n);
2546 spin_unlock(&dentry->d_lock);
2548 spin_unlock(&inode->i_lock);
2552 * d_add - add dentry to hash queues
2553 * @entry: dentry to add
2554 * @inode: The inode to attach to this dentry
2556 * This adds the entry to the hash queues and initializes @inode.
2557 * The entry was actually filled in earlier during d_alloc().
2560 void d_add(struct dentry *entry, struct inode *inode)
2563 security_d_instantiate(entry, inode);
2564 spin_lock(&inode->i_lock);
2566 __d_add(entry, inode);
2568 EXPORT_SYMBOL(d_add);
2571 * d_exact_alias - find and hash an exact unhashed alias
2572 * @entry: dentry to add
2573 * @inode: The inode to go with this dentry
2575 * If an unhashed dentry with the same name/parent and desired
2576 * inode already exists, hash and return it. Otherwise, return
2579 * Parent directory should be locked.
2581 struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2583 struct dentry *alias;
2584 int len = entry->d_name.len;
2585 const char *name = entry->d_name.name;
2586 unsigned int hash = entry->d_name.hash;
2588 spin_lock(&inode->i_lock);
2589 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2591 * Don't need alias->d_lock here, because aliases with
2592 * d_parent == entry->d_parent are not subject to name or
2593 * parent changes, because the parent inode i_mutex is held.
2595 if (alias->d_name.hash != hash)
2597 if (alias->d_parent != entry->d_parent)
2599 if (alias->d_name.len != len)
2601 if (dentry_cmp(alias, name, len))
2603 spin_lock(&alias->d_lock);
2604 if (!d_unhashed(alias)) {
2605 spin_unlock(&alias->d_lock);
2608 __dget_dlock(alias);
2610 spin_unlock(&alias->d_lock);
2612 spin_unlock(&inode->i_lock);
2615 spin_unlock(&inode->i_lock);
2618 EXPORT_SYMBOL(d_exact_alias);
2621 * dentry_update_name_case - update case insensitive dentry with a new name
2622 * @dentry: dentry to be updated
2625 * Update a case insensitive dentry with new case of name.
2627 * dentry must have been returned by d_lookup with name @name. Old and new
2628 * name lengths must match (ie. no d_compare which allows mismatched name
2631 * Parent inode i_mutex must be held over d_lookup and into this call (to
2632 * keep renames and concurrent inserts, and readdir(2) away).
2634 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2636 BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
2637 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2639 spin_lock(&dentry->d_lock);
2640 write_seqcount_begin(&dentry->d_seq);
2641 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2642 write_seqcount_end(&dentry->d_seq);
2643 spin_unlock(&dentry->d_lock);
2645 EXPORT_SYMBOL(dentry_update_name_case);
2647 static void swap_names(struct dentry *dentry, struct dentry *target)
2649 if (unlikely(dname_external(target))) {
2650 if (unlikely(dname_external(dentry))) {
2652 * Both external: swap the pointers
2654 swap(target->d_name.name, dentry->d_name.name);
2657 * dentry:internal, target:external. Steal target's
2658 * storage and make target internal.
2660 memcpy(target->d_iname, dentry->d_name.name,
2661 dentry->d_name.len + 1);
2662 dentry->d_name.name = target->d_name.name;
2663 target->d_name.name = target->d_iname;
2666 if (unlikely(dname_external(dentry))) {
2668 * dentry:external, target:internal. Give dentry's
2669 * storage to target and make dentry internal
2671 memcpy(dentry->d_iname, target->d_name.name,
2672 target->d_name.len + 1);
2673 target->d_name.name = dentry->d_name.name;
2674 dentry->d_name.name = dentry->d_iname;
2677 * Both are internal.
2680 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2681 kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
2682 kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
2683 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2684 swap(((long *) &dentry->d_iname)[i],
2685 ((long *) &target->d_iname)[i]);
2689 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2692 static void copy_name(struct dentry *dentry, struct dentry *target)
2694 struct external_name *old_name = NULL;
2695 if (unlikely(dname_external(dentry)))
2696 old_name = external_name(dentry);
2697 if (unlikely(dname_external(target))) {
2698 atomic_inc(&external_name(target)->u.count);
2699 dentry->d_name = target->d_name;
2701 memcpy(dentry->d_iname, target->d_name.name,
2702 target->d_name.len + 1);
2703 dentry->d_name.name = dentry->d_iname;
2704 dentry->d_name.hash_len = target->d_name.hash_len;
2706 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2707 kfree_rcu(old_name, u.head);
2710 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2713 * XXXX: do we really need to take target->d_lock?
2715 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2716 spin_lock(&target->d_parent->d_lock);
2718 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2719 spin_lock(&dentry->d_parent->d_lock);
2720 spin_lock_nested(&target->d_parent->d_lock,
2721 DENTRY_D_LOCK_NESTED);
2723 spin_lock(&target->d_parent->d_lock);
2724 spin_lock_nested(&dentry->d_parent->d_lock,
2725 DENTRY_D_LOCK_NESTED);
2728 if (target < dentry) {
2729 spin_lock_nested(&target->d_lock, 2);
2730 spin_lock_nested(&dentry->d_lock, 3);
2732 spin_lock_nested(&dentry->d_lock, 2);
2733 spin_lock_nested(&target->d_lock, 3);
2737 static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
2739 if (target->d_parent != dentry->d_parent)
2740 spin_unlock(&dentry->d_parent->d_lock);
2741 if (target->d_parent != target)
2742 spin_unlock(&target->d_parent->d_lock);
2743 spin_unlock(&target->d_lock);
2744 spin_unlock(&dentry->d_lock);
2748 * When switching names, the actual string doesn't strictly have to
2749 * be preserved in the target - because we're dropping the target
2750 * anyway. As such, we can just do a simple memcpy() to copy over
2751 * the new name before we switch, unless we are going to rehash
2752 * it. Note that if we *do* unhash the target, we are not allowed
2753 * to rehash it without giving it a new name/hash key - whether
2754 * we swap or overwrite the names here, resulting name won't match
2755 * the reality in filesystem; it's only there for d_path() purposes.
2756 * Note that all of this is happening under rename_lock, so the
2757 * any hash lookup seeing it in the middle of manipulations will
2758 * be discarded anyway. So we do not care what happens to the hash
2762 * __d_move - move a dentry
2763 * @dentry: entry to move
2764 * @target: new dentry
2765 * @exchange: exchange the two dentries
2767 * Update the dcache to reflect the move of a file name. Negative
2768 * dcache entries should not be moved in this way. Caller must hold
2769 * rename_lock, the i_mutex of the source and target directories,
2770 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2772 static void __d_move(struct dentry *dentry, struct dentry *target,
2775 struct inode *dir = NULL;
2777 if (!dentry->d_inode)
2778 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2780 BUG_ON(d_ancestor(dentry, target));
2781 BUG_ON(d_ancestor(target, dentry));
2783 dentry_lock_for_move(dentry, target);
2784 if (unlikely(d_in_lookup(target))) {
2785 dir = target->d_parent->d_inode;
2786 n = start_dir_add(dir);
2787 __d_lookup_done(target);
2790 write_seqcount_begin(&dentry->d_seq);
2791 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2793 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2796 * Move the dentry to the target hash queue. Don't bother checking
2797 * for the same hash queue because of how unlikely it is.
2800 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2803 * Unhash the target (d_delete() is not usable here). If exchanging
2804 * the two dentries, then rehash onto the other's hash queue.
2809 d_hash(dentry->d_parent, dentry->d_name.hash));
2812 /* Switch the names.. */
2814 swap_names(dentry, target);
2816 copy_name(dentry, target);
2818 /* ... and switch them in the tree */
2819 if (IS_ROOT(dentry)) {
2820 /* splicing a tree */
2821 dentry->d_parent = target->d_parent;
2822 target->d_parent = target;
2823 list_del_init(&target->d_child);
2824 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2826 /* swapping two dentries */
2827 swap(dentry->d_parent, target->d_parent);
2828 list_move(&target->d_child, &target->d_parent->d_subdirs);
2829 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2831 fsnotify_update_flags(target);
2832 fsnotify_update_flags(dentry);
2835 write_seqcount_end(&target->d_seq);
2836 write_seqcount_end(&dentry->d_seq);
2839 end_dir_add(dir, n);
2840 dentry_unlock_for_move(dentry, target);
2844 * d_move - move a dentry
2845 * @dentry: entry to move
2846 * @target: new dentry
2848 * Update the dcache to reflect the move of a file name. Negative
2849 * dcache entries should not be moved in this way. See the locking
2850 * requirements for __d_move.
2852 void d_move(struct dentry *dentry, struct dentry *target)
2854 write_seqlock(&rename_lock);
2855 __d_move(dentry, target, false);
2856 write_sequnlock(&rename_lock);
2858 EXPORT_SYMBOL(d_move);
2861 * d_exchange - exchange two dentries
2862 * @dentry1: first dentry
2863 * @dentry2: second dentry
2865 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2867 write_seqlock(&rename_lock);
2869 WARN_ON(!dentry1->d_inode);
2870 WARN_ON(!dentry2->d_inode);
2871 WARN_ON(IS_ROOT(dentry1));
2872 WARN_ON(IS_ROOT(dentry2));
2874 __d_move(dentry1, dentry2, true);
2876 write_sequnlock(&rename_lock);
2880 * d_ancestor - search for an ancestor
2881 * @p1: ancestor dentry
2884 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2885 * an ancestor of p2, else NULL.
2887 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2891 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2892 if (p->d_parent == p1)
2899 * This helper attempts to cope with remotely renamed directories
2901 * It assumes that the caller is already holding
2902 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2904 * Note: If ever the locking in lock_rename() changes, then please
2905 * remember to update this too...
2907 static int __d_unalias(struct inode *inode,
2908 struct dentry *dentry, struct dentry *alias)
2910 struct mutex *m1 = NULL;
2911 struct rw_semaphore *m2 = NULL;
2914 /* If alias and dentry share a parent, then no extra locks required */
2915 if (alias->d_parent == dentry->d_parent)
2918 /* See lock_rename() */
2919 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2921 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2922 if (!inode_trylock_shared(alias->d_parent->d_inode))
2924 m2 = &alias->d_parent->d_inode->i_rwsem;
2926 __d_move(alias, dentry, false);
2937 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2938 * @inode: the inode which may have a disconnected dentry
2939 * @dentry: a negative dentry which we want to point to the inode.
2941 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2942 * place of the given dentry and return it, else simply d_add the inode
2943 * to the dentry and return NULL.
2945 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2946 * we should error out: directories can't have multiple aliases.
2948 * This is needed in the lookup routine of any filesystem that is exportable
2949 * (via knfsd) so that we can build dcache paths to directories effectively.
2951 * If a dentry was found and moved, then it is returned. Otherwise NULL
2952 * is returned. This matches the expected return value of ->lookup.
2954 * Cluster filesystems may call this function with a negative, hashed dentry.
2955 * In that case, we know that the inode will be a regular file, and also this
2956 * will only occur during atomic_open. So we need to check for the dentry
2957 * being already hashed only in the final case.
2959 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2962 return ERR_CAST(inode);
2964 BUG_ON(!d_unhashed(dentry));
2969 security_d_instantiate(dentry, inode);
2970 spin_lock(&inode->i_lock);
2971 if (S_ISDIR(inode->i_mode)) {
2972 struct dentry *new = __d_find_any_alias(inode);
2973 if (unlikely(new)) {
2974 /* The reference to new ensures it remains an alias */
2975 spin_unlock(&inode->i_lock);
2976 write_seqlock(&rename_lock);
2977 if (unlikely(d_ancestor(new, dentry))) {
2978 write_sequnlock(&rename_lock);
2980 new = ERR_PTR(-ELOOP);
2981 pr_warn_ratelimited(
2982 "VFS: Lookup of '%s' in %s %s"
2983 " would have caused loop\n",
2984 dentry->d_name.name,
2985 inode->i_sb->s_type->name,
2987 } else if (!IS_ROOT(new)) {
2988 int err = __d_unalias(inode, dentry, new);
2989 write_sequnlock(&rename_lock);
2995 __d_move(new, dentry, false);
2996 write_sequnlock(&rename_lock);
3003 __d_add(dentry, inode);
3006 EXPORT_SYMBOL(d_splice_alias);
3008 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
3012 return -ENAMETOOLONG;
3014 memcpy(*buffer, str, namelen);
3019 * prepend_name - prepend a pathname in front of current buffer pointer
3020 * @buffer: buffer pointer
3021 * @buflen: allocated length of the buffer
3022 * @name: name string and length qstr structure
3024 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
3025 * make sure that either the old or the new name pointer and length are
3026 * fetched. However, there may be mismatch between length and pointer.
3027 * The length cannot be trusted, we need to copy it byte-by-byte until
3028 * the length is reached or a null byte is found. It also prepends "/" at
3029 * the beginning of the name. The sequence number check at the caller will
3030 * retry it again when a d_move() does happen. So any garbage in the buffer
3031 * due to mismatched pointer and length will be discarded.
3033 * Data dependency barrier is needed to make sure that we see that terminating
3034 * NUL. Alpha strikes again, film at 11...
3036 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
3038 const char *dname = ACCESS_ONCE(name->name);
3039 u32 dlen = ACCESS_ONCE(name->len);
3042 smp_read_barrier_depends();
3044 *buflen -= dlen + 1;
3046 return -ENAMETOOLONG;
3047 p = *buffer -= dlen + 1;
3059 * prepend_path - Prepend path string to a buffer
3060 * @path: the dentry/vfsmount to report
3061 * @root: root vfsmnt/dentry
3062 * @buffer: pointer to the end of the buffer
3063 * @buflen: pointer to buffer length
3065 * The function will first try to write out the pathname without taking any
3066 * lock other than the RCU read lock to make sure that dentries won't go away.
3067 * It only checks the sequence number of the global rename_lock as any change
3068 * in the dentry's d_seq will be preceded by changes in the rename_lock
3069 * sequence number. If the sequence number had been changed, it will restart
3070 * the whole pathname back-tracing sequence again by taking the rename_lock.
3071 * In this case, there is no need to take the RCU read lock as the recursive
3072 * parent pointer references will keep the dentry chain alive as long as no
3073 * rename operation is performed.
3075 static int prepend_path(const struct path *path,
3076 const struct path *root,
3077 char **buffer, int *buflen)
3079 struct dentry *dentry;
3080 struct vfsmount *vfsmnt;
3083 unsigned seq, m_seq = 0;
3089 read_seqbegin_or_lock(&mount_lock, &m_seq);
3096 dentry = path->dentry;
3098 mnt = real_mount(vfsmnt);
3099 read_seqbegin_or_lock(&rename_lock, &seq);
3100 while (dentry != root->dentry || vfsmnt != root->mnt) {
3101 struct dentry * parent;
3103 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
3104 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
3106 if (dentry != vfsmnt->mnt_root) {
3113 if (mnt != parent) {
3114 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
3120 error = is_mounted(vfsmnt) ? 1 : 2;
3123 parent = dentry->d_parent;
3125 error = prepend_name(&bptr, &blen, &dentry->d_name);
3133 if (need_seqretry(&rename_lock, seq)) {
3137 done_seqretry(&rename_lock, seq);
3141 if (need_seqretry(&mount_lock, m_seq)) {
3145 done_seqretry(&mount_lock, m_seq);
3147 if (error >= 0 && bptr == *buffer) {
3149 error = -ENAMETOOLONG;
3159 * __d_path - return the path of a dentry
3160 * @path: the dentry/vfsmount to report
3161 * @root: root vfsmnt/dentry
3162 * @buf: buffer to return value in
3163 * @buflen: buffer length
3165 * Convert a dentry into an ASCII path name.
3167 * Returns a pointer into the buffer or an error code if the
3168 * path was too long.
3170 * "buflen" should be positive.
3172 * If the path is not reachable from the supplied root, return %NULL.
3174 char *__d_path(const struct path *path,
3175 const struct path *root,
3176 char *buf, int buflen)
3178 char *res = buf + buflen;
3181 prepend(&res, &buflen, "\0", 1);
3182 error = prepend_path(path, root, &res, &buflen);
3185 return ERR_PTR(error);
3191 char *d_absolute_path(const struct path *path,
3192 char *buf, int buflen)
3194 struct path root = {};
3195 char *res = buf + buflen;
3198 prepend(&res, &buflen, "\0", 1);
3199 error = prepend_path(path, &root, &res, &buflen);
3204 return ERR_PTR(error);
3209 * same as __d_path but appends "(deleted)" for unlinked files.
3211 static int path_with_deleted(const struct path *path,
3212 const struct path *root,
3213 char **buf, int *buflen)
3215 prepend(buf, buflen, "\0", 1);
3216 if (d_unlinked(path->dentry)) {
3217 int error = prepend(buf, buflen, " (deleted)", 10);
3222 return prepend_path(path, root, buf, buflen);
3225 static int prepend_unreachable(char **buffer, int *buflen)
3227 return prepend(buffer, buflen, "(unreachable)", 13);
3230 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3235 seq = read_seqcount_begin(&fs->seq);
3237 } while (read_seqcount_retry(&fs->seq, seq));
3241 * d_path - return the path of a dentry
3242 * @path: path to report
3243 * @buf: buffer to return value in
3244 * @buflen: buffer length
3246 * Convert a dentry into an ASCII path name. If the entry has been deleted
3247 * the string " (deleted)" is appended. Note that this is ambiguous.
3249 * Returns a pointer into the buffer or an error code if the path was
3250 * too long. Note: Callers should use the returned pointer, not the passed
3251 * in buffer, to use the name! The implementation often starts at an offset
3252 * into the buffer, and may leave 0 bytes at the start.
3254 * "buflen" should be positive.
3256 char *d_path(const struct path *path, char *buf, int buflen)
3258 char *res = buf + buflen;
3263 * We have various synthetic filesystems that never get mounted. On
3264 * these filesystems dentries are never used for lookup purposes, and
3265 * thus don't need to be hashed. They also don't need a name until a
3266 * user wants to identify the object in /proc/pid/fd/. The little hack
3267 * below allows us to generate a name for these objects on demand:
3269 * Some pseudo inodes are mountable. When they are mounted
3270 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
3271 * and instead have d_path return the mounted path.
3273 if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3274 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3275 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3278 get_fs_root_rcu(current->fs, &root);
3279 error = path_with_deleted(path, &root, &res, &buflen);
3283 res = ERR_PTR(error);
3286 EXPORT_SYMBOL(d_path);
3289 * Helper function for dentry_operations.d_dname() members
3291 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3292 const char *fmt, ...)
3298 va_start(args, fmt);
3299 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3302 if (sz > sizeof(temp) || sz > buflen)
3303 return ERR_PTR(-ENAMETOOLONG);
3305 buffer += buflen - sz;
3306 return memcpy(buffer, temp, sz);
3309 char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3311 char *end = buffer + buflen;
3312 /* these dentries are never renamed, so d_lock is not needed */
3313 if (prepend(&end, &buflen, " (deleted)", 11) ||
3314 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3315 prepend(&end, &buflen, "/", 1))
3316 end = ERR_PTR(-ENAMETOOLONG);
3319 EXPORT_SYMBOL(simple_dname);
3322 * Write full pathname from the root of the filesystem into the buffer.
3324 static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3326 struct dentry *dentry;
3339 prepend(&end, &len, "\0", 1);
3343 read_seqbegin_or_lock(&rename_lock, &seq);
3344 while (!IS_ROOT(dentry)) {
3345 struct dentry *parent = dentry->d_parent;
3348 error = prepend_name(&end, &len, &dentry->d_name);
3357 if (need_seqretry(&rename_lock, seq)) {
3361 done_seqretry(&rename_lock, seq);
3366 return ERR_PTR(-ENAMETOOLONG);
3369 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3371 return __dentry_path(dentry, buf, buflen);
3373 EXPORT_SYMBOL(dentry_path_raw);
3375 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3380 if (d_unlinked(dentry)) {
3382 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3386 retval = __dentry_path(dentry, buf, buflen);
3387 if (!IS_ERR(retval) && p)
3388 *p = '/'; /* restore '/' overriden with '\0' */
3391 return ERR_PTR(-ENAMETOOLONG);
3394 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3400 seq = read_seqcount_begin(&fs->seq);
3403 } while (read_seqcount_retry(&fs->seq, seq));
3407 * NOTE! The user-level library version returns a
3408 * character pointer. The kernel system call just
3409 * returns the length of the buffer filled (which
3410 * includes the ending '\0' character), or a negative
3411 * error value. So libc would do something like
3413 * char *getcwd(char * buf, size_t size)
3417 * retval = sys_getcwd(buf, size);
3424 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3427 struct path pwd, root;
3428 char *page = __getname();
3434 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3437 if (!d_unlinked(pwd.dentry)) {
3439 char *cwd = page + PATH_MAX;
3440 int buflen = PATH_MAX;
3442 prepend(&cwd, &buflen, "\0", 1);
3443 error = prepend_path(&pwd, &root, &cwd, &buflen);
3449 /* Unreachable from current root */
3451 error = prepend_unreachable(&cwd, &buflen);
3457 len = PATH_MAX + page - cwd;
3460 if (copy_to_user(buf, cwd, len))
3473 * Test whether new_dentry is a subdirectory of old_dentry.
3475 * Trivially implemented using the dcache structure
3479 * is_subdir - is new dentry a subdirectory of old_dentry
3480 * @new_dentry: new dentry
3481 * @old_dentry: old dentry
3483 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3484 * Returns false otherwise.
3485 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3488 bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3493 if (new_dentry == old_dentry)
3497 /* for restarting inner loop in case of seq retry */
3498 seq = read_seqbegin(&rename_lock);
3500 * Need rcu_readlock to protect against the d_parent trashing
3504 if (d_ancestor(old_dentry, new_dentry))
3509 } while (read_seqretry(&rename_lock, seq));
3514 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3516 struct dentry *root = data;
3517 if (dentry != root) {
3518 if (d_unhashed(dentry) || !dentry->d_inode)
3521 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3522 dentry->d_flags |= DCACHE_GENOCIDE;
3523 dentry->d_lockref.count--;
3526 return D_WALK_CONTINUE;
3529 void d_genocide(struct dentry *parent)
3531 d_walk(parent, parent, d_genocide_kill, NULL);
3534 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3536 inode_dec_link_count(inode);
3537 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3538 !hlist_unhashed(&dentry->d_u.d_alias) ||
3539 !d_unlinked(dentry));
3540 spin_lock(&dentry->d_parent->d_lock);
3541 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3542 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3543 (unsigned long long)inode->i_ino);
3544 spin_unlock(&dentry->d_lock);
3545 spin_unlock(&dentry->d_parent->d_lock);
3546 d_instantiate(dentry, inode);
3548 EXPORT_SYMBOL(d_tmpfile);
3550 static __initdata unsigned long dhash_entries;
3551 static int __init set_dhash_entries(char *str)
3555 dhash_entries = simple_strtoul(str, &str, 0);
3558 __setup("dhash_entries=", set_dhash_entries);
3560 static void __init dcache_init_early(void)
3564 /* If hashes are distributed across NUMA nodes, defer
3565 * hash allocation until vmalloc space is available.
3571 alloc_large_system_hash("Dentry cache",
3572 sizeof(struct hlist_bl_head),
3581 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3582 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3585 static void __init dcache_init(void)
3590 * A constructor could be added for stable state like the lists,
3591 * but it is probably not worth it because of the cache nature
3594 dentry_cache = KMEM_CACHE(dentry,
3595 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT);
3597 /* Hash may have been set up in dcache_init_early */
3602 alloc_large_system_hash("Dentry cache",
3603 sizeof(struct hlist_bl_head),
3612 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3613 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3616 /* SLAB cache for __getname() consumers */
3617 struct kmem_cache *names_cachep __read_mostly;
3618 EXPORT_SYMBOL(names_cachep);
3620 EXPORT_SYMBOL(d_genocide);
3622 void __init vfs_caches_init_early(void)
3624 dcache_init_early();
3628 void __init vfs_caches_init(void)
3630 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3631 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3636 files_maxfiles_init();