4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
42 #define DEBUG_SUBSYSTEM S_CLASS
44 #include "../../include/linux/libcfs/libcfs.h"
45 #include "../include/obd_class.h"
46 #include "../include/obd_support.h"
47 #include <linux/list.h>
49 #include "../include/cl_object.h"
50 #include "cl_internal.h"
52 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
54 # define PASSERT(env, page, expr) \
56 if (unlikely(!(expr))) { \
57 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
62 # define PINVRNT(env, page, exp) \
63 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
66 * Internal version of cl_page_get().
68 * This function can be used to obtain initial reference to previously
69 * unreferenced cached object. It can be called only if concurrent page
70 * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
71 * associated with \a page.
73 * Use with care! Not exported.
75 static void cl_page_get_trust(struct cl_page *page)
77 LASSERT(atomic_read(&page->cp_ref) > 0);
78 atomic_inc(&page->cp_ref);
82 * Returns a slice within a page, corresponding to the given layer in the
87 static const struct cl_page_slice *
88 cl_page_at_trusted(const struct cl_page *page,
89 const struct lu_device_type *dtype)
91 const struct cl_page_slice *slice;
93 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
94 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
100 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
102 struct cl_object *obj = page->cp_obj;
104 PASSERT(env, page, list_empty(&page->cp_batch));
105 PASSERT(env, page, !page->cp_owner);
106 PASSERT(env, page, !page->cp_req);
107 PASSERT(env, page, page->cp_state == CPS_FREEING);
109 while (!list_empty(&page->cp_layers)) {
110 struct cl_page_slice *slice;
112 slice = list_entry(page->cp_layers.next,
113 struct cl_page_slice, cpl_linkage);
114 list_del_init(page->cp_layers.next);
115 if (unlikely(slice->cpl_ops->cpo_fini))
116 slice->cpl_ops->cpo_fini(env, slice);
118 lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
119 cl_object_put(env, obj);
120 lu_ref_fini(&page->cp_reference);
125 * Helper function updating page state. This is the only place in the code
126 * where cl_page::cp_state field is mutated.
128 static inline void cl_page_state_set_trust(struct cl_page *page,
129 enum cl_page_state state)
132 *(enum cl_page_state *)&page->cp_state = state;
135 struct cl_page *cl_page_alloc(const struct lu_env *env,
136 struct cl_object *o, pgoff_t ind,
138 enum cl_page_type type)
140 struct cl_page *page;
141 struct lu_object_header *head;
143 page = kzalloc(cl_object_header(o)->coh_page_bufsize, GFP_NOFS);
147 atomic_set(&page->cp_ref, 1);
150 lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
152 page->cp_vmpage = vmpage;
153 cl_page_state_set_trust(page, CPS_CACHED);
154 page->cp_type = type;
155 INIT_LIST_HEAD(&page->cp_layers);
156 INIT_LIST_HEAD(&page->cp_batch);
157 INIT_LIST_HEAD(&page->cp_flight);
158 mutex_init(&page->cp_mutex);
159 lu_ref_init(&page->cp_reference);
160 head = o->co_lu.lo_header;
161 list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
162 if (o->co_ops->coo_page_init) {
163 result = o->co_ops->coo_page_init(env, o, page,
166 cl_page_delete0(env, page);
167 cl_page_free(env, page);
168 page = ERR_PTR(result);
174 page = ERR_PTR(-ENOMEM);
178 EXPORT_SYMBOL(cl_page_alloc);
181 * Returns a cl_page with index \a idx at the object \a o, and associated with
182 * the VM page \a vmpage.
184 * This is the main entry point into the cl_page caching interface. First, a
185 * cache (implemented as a per-object radix tree) is consulted. If page is
186 * found there, it is returned immediately. Otherwise new page is allocated
187 * and returned. In any case, additional reference to page is acquired.
189 * \see cl_object_find(), cl_lock_find()
191 struct cl_page *cl_page_find(const struct lu_env *env,
193 pgoff_t idx, struct page *vmpage,
194 enum cl_page_type type)
196 struct cl_page *page = NULL;
197 struct cl_object_header *hdr;
199 LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
202 hdr = cl_object_header(o);
204 CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
205 idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
207 if (type == CPT_CACHEABLE) {
209 * vmpage lock is used to protect the child/parent
212 KLASSERT(PageLocked(vmpage));
214 * cl_vmpage_page() can be called here without any locks as
216 * - "vmpage" is locked (which prevents ->private from
217 * concurrent updates), and
219 * - "o" cannot be destroyed while current thread holds a
222 page = cl_vmpage_page(vmpage, o);
228 /* allocate and initialize cl_page */
229 page = cl_page_alloc(env, o, idx, vmpage, type);
232 EXPORT_SYMBOL(cl_page_find);
234 static inline int cl_page_invariant(const struct cl_page *pg)
237 * Page invariant is protected by a VM lock.
239 LINVRNT(cl_page_is_vmlocked(NULL, pg));
241 return cl_page_in_use_noref(pg);
244 static void cl_page_state_set0(const struct lu_env *env,
245 struct cl_page *page, enum cl_page_state state)
247 enum cl_page_state old;
250 * Matrix of allowed state transitions [old][new], for sanity
253 static const int allowed_transitions[CPS_NR][CPS_NR] = {
256 [CPS_OWNED] = 1, /* io finds existing cached page */
258 [CPS_PAGEOUT] = 1, /* write-out from the cache */
259 [CPS_FREEING] = 1, /* eviction on the memory pressure */
262 [CPS_CACHED] = 1, /* release to the cache */
264 [CPS_PAGEIN] = 1, /* start read immediately */
265 [CPS_PAGEOUT] = 1, /* start write immediately */
266 [CPS_FREEING] = 1, /* lock invalidation or truncate */
269 [CPS_CACHED] = 1, /* io completion */
276 [CPS_CACHED] = 1, /* io completion */
291 old = page->cp_state;
292 PASSERT(env, page, allowed_transitions[old][state]);
293 CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
294 PASSERT(env, page, page->cp_state == old);
295 PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner));
296 cl_page_state_set_trust(page, state);
299 static void cl_page_state_set(const struct lu_env *env,
300 struct cl_page *page, enum cl_page_state state)
302 cl_page_state_set0(env, page, state);
306 * Acquires an additional reference to a page.
308 * This can be called only by caller already possessing a reference to \a
311 * \see cl_object_get(), cl_lock_get().
313 void cl_page_get(struct cl_page *page)
315 cl_page_get_trust(page);
317 EXPORT_SYMBOL(cl_page_get);
320 * Releases a reference to a page.
322 * When last reference is released, page is returned to the cache, unless it
323 * is in cl_page_state::CPS_FREEING state, in which case it is immediately
326 * \see cl_object_put(), cl_lock_put().
328 void cl_page_put(const struct lu_env *env, struct cl_page *page)
330 CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
331 atomic_read(&page->cp_ref));
333 if (atomic_dec_and_test(&page->cp_ref)) {
334 LASSERT(page->cp_state == CPS_FREEING);
336 LASSERT(atomic_read(&page->cp_ref) == 0);
337 PASSERT(env, page, !page->cp_owner);
338 PASSERT(env, page, list_empty(&page->cp_batch));
340 * Page is no longer reachable by other threads. Tear
343 cl_page_free(env, page);
346 EXPORT_SYMBOL(cl_page_put);
349 * Returns a cl_page associated with a VM page, and given cl_object.
351 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
353 struct cl_page *page;
355 KLASSERT(PageLocked(vmpage));
358 * NOTE: absence of races and liveness of data are guaranteed by page
359 * lock on a "vmpage". That works because object destruction has
360 * bottom-to-top pass.
363 page = (struct cl_page *)vmpage->private;
365 cl_page_get_trust(page);
366 LASSERT(page->cp_type == CPT_CACHEABLE);
370 EXPORT_SYMBOL(cl_vmpage_page);
372 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
373 const struct lu_device_type *dtype)
375 return cl_page_at_trusted(page, dtype);
377 EXPORT_SYMBOL(cl_page_at);
379 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
381 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
383 const struct lu_env *__env = (_env); \
384 struct cl_page *__page = (_page); \
385 const struct cl_page_slice *__scan; \
387 ptrdiff_t __op = (_op); \
388 int (*__method)_proto; \
391 list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
392 __method = *(void **)((char *)__scan->cpl_ops + __op); \
394 __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
404 #define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \
406 const struct lu_env *__env = (_env); \
407 struct cl_page *__page = (_page); \
408 const struct cl_page_slice *__scan; \
410 ptrdiff_t __op = (_op); \
411 int (*__method)_proto; \
414 list_for_each_entry_reverse(__scan, &__page->cp_layers, \
416 __method = *(void **)((char *)__scan->cpl_ops + __op); \
418 __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
428 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
430 const struct lu_env *__env = (_env); \
431 struct cl_page *__page = (_page); \
432 const struct cl_page_slice *__scan; \
433 ptrdiff_t __op = (_op); \
434 void (*__method)_proto; \
436 list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
437 __method = *(void **)((char *)__scan->cpl_ops + __op); \
439 (*__method)(__env, __scan, ## __VA_ARGS__); \
443 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
445 const struct lu_env *__env = (_env); \
446 struct cl_page *__page = (_page); \
447 const struct cl_page_slice *__scan; \
448 ptrdiff_t __op = (_op); \
449 void (*__method)_proto; \
451 list_for_each_entry_reverse(__scan, &__page->cp_layers, cpl_linkage) { \
452 __method = *(void **)((char *)__scan->cpl_ops + __op); \
454 (*__method)(__env, __scan, ## __VA_ARGS__); \
458 static int cl_page_invoke(const struct lu_env *env,
459 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
462 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
463 return CL_PAGE_INVOKE(env, page, op,
464 (const struct lu_env *,
465 const struct cl_page_slice *, struct cl_io *),
469 static void cl_page_invoid(const struct lu_env *env,
470 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
473 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
474 CL_PAGE_INVOID(env, page, op,
475 (const struct lu_env *,
476 const struct cl_page_slice *, struct cl_io *), io);
479 static void cl_page_owner_clear(struct cl_page *page)
481 if (page->cp_owner) {
482 LASSERT(page->cp_owner->ci_owned_nr > 0);
483 page->cp_owner->ci_owned_nr--;
484 page->cp_owner = NULL;
485 page->cp_task = NULL;
489 static void cl_page_owner_set(struct cl_page *page)
491 page->cp_owner->ci_owned_nr++;
494 void cl_page_disown0(const struct lu_env *env,
495 struct cl_io *io, struct cl_page *pg)
497 enum cl_page_state state;
499 state = pg->cp_state;
500 PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
501 PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
502 cl_page_owner_clear(pg);
504 if (state == CPS_OWNED)
505 cl_page_state_set(env, pg, CPS_CACHED);
507 * Completion call-backs are executed in the bottom-up order, so that
508 * uppermost layer (llite), responsible for VFS/VM interaction runs
509 * last and can release locks safely.
511 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
512 (const struct lu_env *,
513 const struct cl_page_slice *, struct cl_io *),
518 * returns true, iff page is owned by the given io.
520 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
522 struct cl_io *top = cl_io_top((struct cl_io *)io);
523 LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
524 return pg->cp_state == CPS_OWNED && pg->cp_owner == top;
526 EXPORT_SYMBOL(cl_page_is_owned);
529 * Try to own a page by IO.
531 * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
532 * into cl_page_state::CPS_OWNED state.
534 * \pre !cl_page_is_owned(pg, io)
535 * \post result == 0 iff cl_page_is_owned(pg, io)
539 * \retval -ve failure, e.g., page was destroyed (and landed in
540 * cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
541 * or, page was owned by another thread, or in IO.
543 * \see cl_page_disown()
544 * \see cl_page_operations::cpo_own()
545 * \see cl_page_own_try()
548 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
549 struct cl_page *pg, int nonblock)
553 PINVRNT(env, pg, !cl_page_is_owned(pg, io));
557 if (pg->cp_state == CPS_FREEING) {
560 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
561 (const struct lu_env *,
562 const struct cl_page_slice *,
563 struct cl_io *, int),
566 PASSERT(env, pg, !pg->cp_owner);
567 PASSERT(env, pg, !pg->cp_req);
568 pg->cp_owner = cl_io_top(io);
569 pg->cp_task = current;
570 cl_page_owner_set(pg);
571 if (pg->cp_state != CPS_FREEING) {
572 cl_page_state_set(env, pg, CPS_OWNED);
574 cl_page_disown0(env, io, pg);
579 PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
584 * Own a page, might be blocked.
586 * \see cl_page_own0()
588 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
590 return cl_page_own0(env, io, pg, 0);
592 EXPORT_SYMBOL(cl_page_own);
595 * Nonblock version of cl_page_own().
597 * \see cl_page_own0()
599 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
602 return cl_page_own0(env, io, pg, 1);
604 EXPORT_SYMBOL(cl_page_own_try);
607 * Assume page ownership.
609 * Called when page is already locked by the hosting VM.
611 * \pre !cl_page_is_owned(pg, io)
612 * \post cl_page_is_owned(pg, io)
614 * \see cl_page_operations::cpo_assume()
616 void cl_page_assume(const struct lu_env *env,
617 struct cl_io *io, struct cl_page *pg)
619 PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
623 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
624 PASSERT(env, pg, !pg->cp_owner);
625 pg->cp_owner = cl_io_top(io);
626 pg->cp_task = current;
627 cl_page_owner_set(pg);
628 cl_page_state_set(env, pg, CPS_OWNED);
630 EXPORT_SYMBOL(cl_page_assume);
633 * Releases page ownership without unlocking the page.
635 * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
636 * underlying VM page (as VM is supposed to do this itself).
638 * \pre cl_page_is_owned(pg, io)
639 * \post !cl_page_is_owned(pg, io)
641 * \see cl_page_assume()
643 void cl_page_unassume(const struct lu_env *env,
644 struct cl_io *io, struct cl_page *pg)
646 PINVRNT(env, pg, cl_page_is_owned(pg, io));
647 PINVRNT(env, pg, cl_page_invariant(pg));
650 cl_page_owner_clear(pg);
651 cl_page_state_set(env, pg, CPS_CACHED);
652 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
653 (const struct lu_env *,
654 const struct cl_page_slice *, struct cl_io *),
657 EXPORT_SYMBOL(cl_page_unassume);
660 * Releases page ownership.
662 * Moves page into cl_page_state::CPS_CACHED.
664 * \pre cl_page_is_owned(pg, io)
665 * \post !cl_page_is_owned(pg, io)
668 * \see cl_page_operations::cpo_disown()
670 void cl_page_disown(const struct lu_env *env,
671 struct cl_io *io, struct cl_page *pg)
673 PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
674 pg->cp_state == CPS_FREEING);
677 cl_page_disown0(env, io, pg);
679 EXPORT_SYMBOL(cl_page_disown);
682 * Called when page is to be removed from the object, e.g., as a result of
685 * Calls cl_page_operations::cpo_discard() top-to-bottom.
687 * \pre cl_page_is_owned(pg, io)
689 * \see cl_page_operations::cpo_discard()
691 void cl_page_discard(const struct lu_env *env,
692 struct cl_io *io, struct cl_page *pg)
694 PINVRNT(env, pg, cl_page_is_owned(pg, io));
695 PINVRNT(env, pg, cl_page_invariant(pg));
697 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
699 EXPORT_SYMBOL(cl_page_discard);
702 * Version of cl_page_delete() that can be called for not fully constructed
703 * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
704 * path. Doesn't check page invariant.
706 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
708 PASSERT(env, pg, pg->cp_state != CPS_FREEING);
711 * Severe all ways to obtain new pointers to @pg.
713 cl_page_owner_clear(pg);
715 cl_page_state_set0(env, pg, CPS_FREEING);
717 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
718 (const struct lu_env *,
719 const struct cl_page_slice *));
723 * Called when a decision is made to throw page out of memory.
725 * Notifies all layers about page destruction by calling
726 * cl_page_operations::cpo_delete() method top-to-bottom.
728 * Moves page into cl_page_state::CPS_FREEING state (this is the only place
729 * where transition to this state happens).
731 * Eliminates all venues through which new references to the page can be
734 * - removes page from the radix trees,
736 * - breaks linkage from VM page to cl_page.
738 * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
739 * drain after some time, at which point page will be recycled.
741 * \pre VM page is locked
742 * \post pg->cp_state == CPS_FREEING
744 * \see cl_page_operations::cpo_delete()
746 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
748 PINVRNT(env, pg, cl_page_invariant(pg));
749 cl_page_delete0(env, pg);
751 EXPORT_SYMBOL(cl_page_delete);
754 * Marks page up-to-date.
756 * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
757 * layer responsible for VM interaction has to mark/clear page as up-to-date
758 * by the \a uptodate argument.
760 * \see cl_page_operations::cpo_export()
762 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
764 PINVRNT(env, pg, cl_page_invariant(pg));
765 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
766 (const struct lu_env *,
767 const struct cl_page_slice *, int), uptodate);
769 EXPORT_SYMBOL(cl_page_export);
772 * Returns true, iff \a pg is VM locked in a suitable sense by the calling
775 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
778 const struct cl_page_slice *slice;
780 slice = container_of(pg->cp_layers.next,
781 const struct cl_page_slice, cpl_linkage);
782 PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked);
784 * Call ->cpo_is_vmlocked() directly instead of going through
785 * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
786 * cl_page_invariant().
788 result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
789 PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
790 return result == -EBUSY;
792 EXPORT_SYMBOL(cl_page_is_vmlocked);
794 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
796 return crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN;
799 static void cl_page_io_start(const struct lu_env *env,
800 struct cl_page *pg, enum cl_req_type crt)
803 * Page is queued for IO, change its state.
805 cl_page_owner_clear(pg);
806 cl_page_state_set(env, pg, cl_req_type_state(crt));
810 * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
811 * called top-to-bottom. Every layer either agrees to submit this page (by
812 * returning 0), or requests to omit this page (by returning -EALREADY). Layer
813 * handling interactions with the VM also has to inform VM that page is under
816 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
817 struct cl_page *pg, enum cl_req_type crt)
821 PINVRNT(env, pg, cl_page_is_owned(pg, io));
822 PINVRNT(env, pg, cl_page_invariant(pg));
823 PINVRNT(env, pg, crt < CRT_NR);
826 * XXX this has to be called bottom-to-top, so that llite can set up
827 * PG_writeback without risking other layers deciding to skip this
832 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
834 cl_page_io_start(env, pg, crt);
836 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
839 EXPORT_SYMBOL(cl_page_prep);
842 * Notify layers about transfer completion.
844 * Invoked by transfer sub-system (which is a part of osc) to notify layers
845 * that a transfer, of which this page is a part of has completed.
847 * Completion call-backs are executed in the bottom-up order, so that
848 * uppermost layer (llite), responsible for the VFS/VM interaction runs last
849 * and can release locks safely.
851 * \pre pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
852 * \post pg->cp_state == CPS_CACHED
854 * \see cl_page_operations::cpo_completion()
856 void cl_page_completion(const struct lu_env *env,
857 struct cl_page *pg, enum cl_req_type crt, int ioret)
859 struct cl_sync_io *anchor = pg->cp_sync_io;
861 PASSERT(env, pg, crt < CRT_NR);
862 /* cl_page::cp_req already cleared by the caller (osc_completion()) */
863 PASSERT(env, pg, !pg->cp_req);
864 PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
866 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
867 if (crt == CRT_READ && ioret == 0) {
868 PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
869 pg->cp_flags |= CPF_READ_COMPLETED;
872 cl_page_state_set(env, pg, CPS_CACHED);
875 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
876 (const struct lu_env *,
877 const struct cl_page_slice *, int), ioret);
879 LASSERT(cl_page_is_vmlocked(env, pg));
880 LASSERT(pg->cp_sync_io == anchor);
881 pg->cp_sync_io = NULL;
884 * As page->cp_obj is pinned by a reference from page->cp_req, it is
885 * safe to call cl_page_put() without risking object destruction in a
886 * non-blocking context.
888 cl_page_put(env, pg);
891 cl_sync_io_note(env, anchor, ioret);
893 EXPORT_SYMBOL(cl_page_completion);
896 * Notify layers that transfer formation engine decided to yank this page from
897 * the cache and to make it a part of a transfer.
899 * \pre pg->cp_state == CPS_CACHED
900 * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
902 * \see cl_page_operations::cpo_make_ready()
904 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
905 enum cl_req_type crt)
909 PINVRNT(env, pg, crt < CRT_NR);
913 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
914 (const struct lu_env *,
915 const struct cl_page_slice *));
917 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
918 cl_page_io_start(env, pg, crt);
920 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
923 EXPORT_SYMBOL(cl_page_make_ready);
926 * Called if a pge is being written back by kernel's intention.
928 * \pre cl_page_is_owned(pg, io)
929 * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
931 * \see cl_page_operations::cpo_flush()
933 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
938 PINVRNT(env, pg, cl_page_is_owned(pg, io));
939 PINVRNT(env, pg, cl_page_invariant(pg));
941 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
943 CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
946 EXPORT_SYMBOL(cl_page_flush);
949 * Checks whether page is protected by any extent lock is at least required
952 * \return the same as in cl_page_operations::cpo_is_under_lock() method.
953 * \see cl_page_operations::cpo_is_under_lock()
955 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
956 struct cl_page *page, pgoff_t *max_index)
960 PINVRNT(env, page, cl_page_invariant(page));
962 rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
963 (const struct lu_env *,
964 const struct cl_page_slice *,
965 struct cl_io *, pgoff_t *),
969 EXPORT_SYMBOL(cl_page_is_under_lock);
972 * Tells transfer engine that only part of a page is to be transmitted.
974 * \see cl_page_operations::cpo_clip()
976 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
979 PINVRNT(env, pg, cl_page_invariant(pg));
981 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
982 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
983 (const struct lu_env *,
984 const struct cl_page_slice *, int, int),
987 EXPORT_SYMBOL(cl_page_clip);
990 * Prints human readable representation of \a pg to the \a f.
992 void cl_page_header_print(const struct lu_env *env, void *cookie,
993 lu_printer_t printer, const struct cl_page *pg)
995 (*printer)(env, cookie,
996 "page@%p[%d %p %d %d %d %p %p %#x]\n",
997 pg, atomic_read(&pg->cp_ref), pg->cp_obj,
998 pg->cp_state, pg->cp_error, pg->cp_type,
999 pg->cp_owner, pg->cp_req, pg->cp_flags);
1001 EXPORT_SYMBOL(cl_page_header_print);
1004 * Prints human readable representation of \a pg to the \a f.
1006 void cl_page_print(const struct lu_env *env, void *cookie,
1007 lu_printer_t printer, const struct cl_page *pg)
1009 cl_page_header_print(env, cookie, printer, pg);
1010 CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
1011 (const struct lu_env *env,
1012 const struct cl_page_slice *slice,
1013 void *cookie, lu_printer_t p), cookie, printer);
1014 (*printer)(env, cookie, "end page@%p\n", pg);
1016 EXPORT_SYMBOL(cl_page_print);
1019 * Cancel a page which is still in a transfer.
1021 int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1023 return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
1024 (const struct lu_env *,
1025 const struct cl_page_slice *));
1027 EXPORT_SYMBOL(cl_page_cancel);
1030 * Converts a byte offset within object \a obj into a page index.
1032 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1037 return (loff_t)idx << PAGE_SHIFT;
1039 EXPORT_SYMBOL(cl_offset);
1042 * Converts a page index into a byte offset within object \a obj.
1044 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1049 return offset >> PAGE_SHIFT;
1051 EXPORT_SYMBOL(cl_index);
1053 int cl_page_size(const struct cl_object *obj)
1055 return 1 << PAGE_SHIFT;
1057 EXPORT_SYMBOL(cl_page_size);
1060 * Adds page slice to the compound page.
1062 * This is called by cl_object_operations::coo_page_init() methods to add a
1063 * per-layer state to the page. New state is added at the end of
1064 * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1066 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1068 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1069 struct cl_object *obj, pgoff_t index,
1070 const struct cl_page_operations *ops)
1072 list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1073 slice->cpl_obj = obj;
1074 slice->cpl_index = index;
1075 slice->cpl_ops = ops;
1076 slice->cpl_page = page;
1078 EXPORT_SYMBOL(cl_page_slice_add);