4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * cl_device and cl_device_type implementation for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
44 #include "../include/obd.h"
45 #include "../include/lustre_lite.h"
46 #include "llite_internal.h"
47 #include "vvp_internal.h"
49 /*****************************************************************************
51 * Vvp device and device type functions.
56 * vvp_ prefix stands for "Vfs Vm Posix". It corresponds to historical
57 * "llite_" (var. "ll_") prefix.
60 static struct kmem_cache *ll_thread_kmem;
61 struct kmem_cache *vvp_lock_kmem;
62 struct kmem_cache *vvp_object_kmem;
63 struct kmem_cache *vvp_req_kmem;
64 static struct kmem_cache *vvp_session_kmem;
65 static struct kmem_cache *vvp_thread_kmem;
67 static struct lu_kmem_descr vvp_caches[] = {
69 .ckd_cache = &ll_thread_kmem,
70 .ckd_name = "ll_thread_kmem",
71 .ckd_size = sizeof(struct ll_thread_info),
74 .ckd_cache = &vvp_lock_kmem,
75 .ckd_name = "vvp_lock_kmem",
76 .ckd_size = sizeof(struct vvp_lock),
79 .ckd_cache = &vvp_object_kmem,
80 .ckd_name = "vvp_object_kmem",
81 .ckd_size = sizeof(struct vvp_object),
84 .ckd_cache = &vvp_req_kmem,
85 .ckd_name = "vvp_req_kmem",
86 .ckd_size = sizeof(struct vvp_req),
89 .ckd_cache = &vvp_session_kmem,
90 .ckd_name = "vvp_session_kmem",
91 .ckd_size = sizeof(struct vvp_session)
94 .ckd_cache = &vvp_thread_kmem,
95 .ckd_name = "vvp_thread_kmem",
96 .ckd_size = sizeof(struct vvp_thread_info),
103 static void *ll_thread_key_init(const struct lu_context *ctx,
104 struct lu_context_key *key)
106 struct vvp_thread_info *info;
108 info = kmem_cache_zalloc(ll_thread_kmem, GFP_NOFS);
110 info = ERR_PTR(-ENOMEM);
114 static void ll_thread_key_fini(const struct lu_context *ctx,
115 struct lu_context_key *key, void *data)
117 struct vvp_thread_info *info = data;
119 kmem_cache_free(ll_thread_kmem, info);
122 struct lu_context_key ll_thread_key = {
123 .lct_tags = LCT_CL_THREAD,
124 .lct_init = ll_thread_key_init,
125 .lct_fini = ll_thread_key_fini
128 static void *vvp_session_key_init(const struct lu_context *ctx,
129 struct lu_context_key *key)
131 struct vvp_session *session;
133 session = kmem_cache_zalloc(vvp_session_kmem, GFP_NOFS);
135 session = ERR_PTR(-ENOMEM);
139 static void vvp_session_key_fini(const struct lu_context *ctx,
140 struct lu_context_key *key, void *data)
142 struct vvp_session *session = data;
144 kmem_cache_free(vvp_session_kmem, session);
147 struct lu_context_key vvp_session_key = {
148 .lct_tags = LCT_SESSION,
149 .lct_init = vvp_session_key_init,
150 .lct_fini = vvp_session_key_fini
153 void *vvp_thread_key_init(const struct lu_context *ctx,
154 struct lu_context_key *key)
156 struct vvp_thread_info *vti;
158 vti = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS);
160 vti = ERR_PTR(-ENOMEM);
164 void vvp_thread_key_fini(const struct lu_context *ctx,
165 struct lu_context_key *key, void *data)
167 struct vvp_thread_info *vti = data;
169 kmem_cache_free(vvp_thread_kmem, vti);
172 struct lu_context_key vvp_thread_key = {
173 .lct_tags = LCT_CL_THREAD,
174 .lct_init = vvp_thread_key_init,
175 .lct_fini = vvp_thread_key_fini
178 /* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
179 LU_TYPE_INIT_FINI(vvp, &vvp_thread_key, &ll_thread_key, &vvp_session_key);
181 static const struct lu_device_operations vvp_lu_ops = {
182 .ldo_object_alloc = vvp_object_alloc
185 static const struct cl_device_operations vvp_cl_ops = {
186 .cdo_req_init = vvp_req_init
189 static struct lu_device *vvp_device_free(const struct lu_env *env,
192 struct vvp_device *vdv = lu2vvp_dev(d);
193 struct cl_site *site = lu2cl_site(d->ld_site);
194 struct lu_device *next = cl2lu_dev(vdv->vdv_next);
200 cl_device_fini(lu2cl_dev(d));
205 static struct lu_device *vvp_device_alloc(const struct lu_env *env,
206 struct lu_device_type *t,
207 struct lustre_cfg *cfg)
209 struct vvp_device *vdv;
210 struct lu_device *lud;
211 struct cl_site *site;
214 vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
216 return ERR_PTR(-ENOMEM);
218 lud = &vdv->vdv_cl.cd_lu_dev;
219 cl_device_init(&vdv->vdv_cl, t);
220 vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
221 vdv->vdv_cl.cd_ops = &vvp_cl_ops;
223 site = kzalloc(sizeof(*site), GFP_NOFS);
225 rc = cl_site_init(site, &vdv->vdv_cl);
227 rc = lu_site_init_finish(&site->cs_lu);
229 LASSERT(!lud->ld_site);
230 CERROR("Cannot init lu_site, rc %d.\n", rc);
237 vvp_device_free(env, lud);
243 static int vvp_device_init(const struct lu_env *env, struct lu_device *d,
244 const char *name, struct lu_device *next)
246 struct vvp_device *vdv;
250 vdv->vdv_next = lu2cl_dev(next);
252 LASSERT(d->ld_site && next->ld_type);
253 next->ld_site = d->ld_site;
254 rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
255 next->ld_type->ldt_name,
259 lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
264 static struct lu_device *vvp_device_fini(const struct lu_env *env,
267 return cl2lu_dev(lu2vvp_dev(d)->vdv_next);
270 static const struct lu_device_type_operations vvp_device_type_ops = {
271 .ldto_init = vvp_type_init,
272 .ldto_fini = vvp_type_fini,
274 .ldto_start = vvp_type_start,
275 .ldto_stop = vvp_type_stop,
277 .ldto_device_alloc = vvp_device_alloc,
278 .ldto_device_free = vvp_device_free,
279 .ldto_device_init = vvp_device_init,
280 .ldto_device_fini = vvp_device_fini,
283 struct lu_device_type vvp_device_type = {
284 .ldt_tags = LU_DEVICE_CL,
285 .ldt_name = LUSTRE_VVP_NAME,
286 .ldt_ops = &vvp_device_type_ops,
287 .ldt_ctx_tags = LCT_CL_THREAD
291 * A mutex serializing calls to vvp_inode_fini() under extreme memory
292 * pressure, when environments cannot be allocated.
294 int vvp_global_init(void)
298 rc = lu_kmem_init(vvp_caches);
302 rc = lu_device_type_init(&vvp_device_type);
309 lu_kmem_fini(vvp_caches);
314 void vvp_global_fini(void)
316 lu_device_type_fini(&vvp_device_type);
317 lu_kmem_fini(vvp_caches);
320 /*****************************************************************************
322 * mirror obd-devices into cl devices.
326 int cl_sb_init(struct super_block *sb)
328 struct ll_sb_info *sbi;
329 struct cl_device *cl;
335 env = cl_env_get(&refcheck);
337 cl = cl_type_setup(env, NULL, &vvp_device_type,
338 sbi->ll_dt_exp->exp_obd->obd_lu_dev);
340 cl2vvp_dev(cl)->vdv_sb = sb;
342 sbi->ll_site = cl2lu_dev(cl)->ld_site;
344 cl_env_put(env, &refcheck);
351 int cl_sb_fini(struct super_block *sb)
353 struct ll_sb_info *sbi;
355 struct cl_device *cld;
360 env = cl_env_get(&refcheck);
365 cl_stack_fini(env, cld);
369 cl_env_put(env, &refcheck);
372 CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
373 result = PTR_ERR(env);
376 * If mount failed (sbi->ll_cl == NULL), and this there are no other
377 * mounts, stop device types manually (this usually happens
378 * automatically when last device is destroyed).
384 /****************************************************************************
386 * debugfs/lustre/llite/$MNT/dump_page_cache
388 ****************************************************************************/
391 * To represent contents of a page cache as a byte stream, following
392 * information if encoded in 64bit offset:
394 * - file hash bucket in lu_site::ls_hash[] 28bits
396 * - how far file is from bucket head 4bits
398 * - page index 32bits
400 * First two data identify a file in the cache uniquely.
403 #define PGC_OBJ_SHIFT (32 + 4)
404 #define PGC_DEPTH_SHIFT (32)
406 struct vvp_pgcache_id {
412 struct lu_object_header *vpi_obj;
415 static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id)
417 CLASSERT(sizeof(pos) == sizeof(__u64));
419 id->vpi_index = pos & 0xffffffff;
420 id->vpi_depth = (pos >> PGC_DEPTH_SHIFT) & 0xf;
421 id->vpi_bucket = (unsigned long long)pos >> PGC_OBJ_SHIFT;
424 static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
427 ((__u64)id->vpi_index) |
428 ((__u64)id->vpi_depth << PGC_DEPTH_SHIFT) |
429 ((__u64)id->vpi_bucket << PGC_OBJ_SHIFT);
432 static int vvp_pgcache_obj_get(struct cfs_hash *hs, struct cfs_hash_bd *bd,
433 struct hlist_node *hnode, void *data)
435 struct vvp_pgcache_id *id = data;
436 struct lu_object_header *hdr = cfs_hash_object(hs, hnode);
438 if (id->vpi_curdep-- > 0)
439 return 0; /* continue */
441 if (lu_object_is_dying(hdr))
444 cfs_hash_get(hs, hnode);
449 static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
450 struct lu_device *dev,
451 struct vvp_pgcache_id *id)
453 LASSERT(lu_device_is_cl(dev));
455 id->vpi_depth &= 0xf;
457 id->vpi_curdep = id->vpi_depth;
459 cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
460 vvp_pgcache_obj_get, id);
462 struct lu_object *lu_obj;
464 lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
466 lu_object_ref_add(lu_obj, "dump", current);
467 return lu2cl(lu_obj);
469 lu_object_put(env, lu_object_top(id->vpi_obj));
471 } else if (id->vpi_curdep > 0) {
477 static loff_t vvp_pgcache_find(const struct lu_env *env,
478 struct lu_device *dev, loff_t pos)
480 struct cl_object *clob;
481 struct lu_site *site;
482 struct vvp_pgcache_id id;
485 vvp_pgcache_id_unpack(pos, &id);
488 if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
490 clob = vvp_pgcache_obj(env, dev, &id);
492 struct inode *inode = vvp_object_inode(clob);
496 nr = find_get_pages_contig(inode->i_mapping,
497 id.vpi_index, 1, &vmpage);
499 id.vpi_index = vmpage->index;
500 /* Cant support over 16T file */
501 nr = !(vmpage->index > 0xffffffff);
505 lu_object_ref_del(&clob->co_lu, "dump", current);
506 cl_object_put(env, clob);
508 return vvp_pgcache_id_pack(&id);
510 /* to the next object. */
513 if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
519 #define seq_page_flag(seq, page, flag, has_flags) do { \
520 if (test_bit(PG_##flag, &(page)->flags)) { \
521 seq_printf(seq, "%s"#flag, has_flags ? "|" : ""); \
526 static void vvp_pgcache_page_show(const struct lu_env *env,
527 struct seq_file *seq, struct cl_page *page)
529 struct vvp_page *vpg;
533 vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
534 vmpage = vpg->vpg_page;
535 seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [",
539 vpg->vpg_write_queued ? "wq" : "- ",
540 vpg->vpg_defer_uptodate ? "du" : "- ",
541 PageWriteback(vmpage) ? "wb" : "-",
542 vmpage, PFID(ll_inode2fid(vmpage->mapping->host)),
543 vmpage->mapping->host, vmpage->index,
546 seq_page_flag(seq, vmpage, locked, has_flags);
547 seq_page_flag(seq, vmpage, error, has_flags);
548 seq_page_flag(seq, vmpage, referenced, has_flags);
549 seq_page_flag(seq, vmpage, uptodate, has_flags);
550 seq_page_flag(seq, vmpage, dirty, has_flags);
551 seq_page_flag(seq, vmpage, writeback, has_flags);
552 seq_printf(seq, "%s]\n", has_flags ? "" : "-");
555 static int vvp_pgcache_show(struct seq_file *f, void *v)
558 struct ll_sb_info *sbi;
559 struct cl_object *clob;
561 struct vvp_pgcache_id id;
565 env = cl_env_get(&refcheck);
568 vvp_pgcache_id_unpack(pos, &id);
570 clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
572 struct inode *inode = vvp_object_inode(clob);
573 struct cl_page *page = NULL;
576 result = find_get_pages_contig(inode->i_mapping,
581 page = cl_vmpage_page(vmpage, clob);
586 seq_printf(f, "%8x@" DFID ": ", id.vpi_index,
587 PFID(lu_object_fid(&clob->co_lu)));
589 vvp_pgcache_page_show(env, f, page);
590 cl_page_put(env, page);
592 seq_puts(f, "missing\n");
594 lu_object_ref_del(&clob->co_lu, "dump", current);
595 cl_object_put(env, clob);
597 seq_printf(f, "%llx missing\n", pos);
599 cl_env_put(env, &refcheck);
602 result = PTR_ERR(env);
607 static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
609 struct ll_sb_info *sbi;
615 env = cl_env_get(&refcheck);
618 if (sbi->ll_site->ls_obj_hash->hs_cur_bits > 64 - PGC_OBJ_SHIFT)
619 pos = ERR_PTR(-EFBIG);
621 *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev,
626 cl_env_put(env, &refcheck);
631 static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos)
633 struct ll_sb_info *sbi;
637 env = cl_env_get(&refcheck);
640 *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev, *pos + 1);
643 cl_env_put(env, &refcheck);
648 static void vvp_pgcache_stop(struct seq_file *f, void *v)
653 static const struct seq_operations vvp_pgcache_ops = {
654 .start = vvp_pgcache_start,
655 .next = vvp_pgcache_next,
656 .stop = vvp_pgcache_stop,
657 .show = vvp_pgcache_show
660 static int vvp_dump_pgcache_seq_open(struct inode *inode, struct file *filp)
662 struct seq_file *seq;
665 rc = seq_open(filp, &vvp_pgcache_ops);
669 seq = filp->private_data;
670 seq->private = inode->i_private;
675 const struct file_operations vvp_dump_pgcache_file_ops = {
676 .owner = THIS_MODULE,
677 .open = vvp_dump_pgcache_seq_open,
680 .release = seq_release,