4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
38 #define DEBUG_SUBSYSTEM S_CLASS
40 #include "../include/obd_class.h"
41 #include "../include/obd_support.h"
42 #include "../include/lustre_fid.h"
43 #include <linux/list.h>
44 #include <linux/sched.h>
45 #include "../include/cl_object.h"
46 #include "cl_internal.h"
48 /*****************************************************************************
54 #define cl_io_for_each(slice, io) \
55 list_for_each_entry((slice), &io->ci_layers, cis_linkage)
56 #define cl_io_for_each_reverse(slice, io) \
57 list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
59 static inline int cl_io_type_is_valid(enum cl_io_type type)
61 return CIT_READ <= type && type < CIT_OP_NR;
64 static inline int cl_io_is_loopable(const struct cl_io *io)
66 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
70 * Returns true iff there is an IO ongoing in the given environment.
72 int cl_io_is_going(const struct lu_env *env)
74 return cl_env_info(env)->clt_current_io != NULL;
76 EXPORT_SYMBOL(cl_io_is_going);
79 * cl_io invariant that holds at all times when exported cl_io_*() functions
80 * are entered and left.
82 static int cl_io_invariant(const struct cl_io *io)
89 * io can own pages only when it is ongoing. Sub-io might
90 * still be in CIS_LOCKED state when top-io is in
93 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
94 (io->ci_state == CIS_LOCKED && up));
98 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
100 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
102 struct cl_io_slice *slice;
103 struct cl_thread_info *info;
105 LINVRNT(cl_io_type_is_valid(io->ci_type));
106 LINVRNT(cl_io_invariant(io));
108 while (!list_empty(&io->ci_layers)) {
109 slice = container_of(io->ci_layers.prev, struct cl_io_slice,
111 list_del_init(&slice->cis_linkage);
112 if (slice->cis_iop->op[io->ci_type].cio_fini)
113 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
115 * Invalidate slice to catch use after free. This assumes that
116 * slices are allocated within session and can be touched
117 * after ->cio_fini() returns.
119 slice->cis_io = NULL;
121 io->ci_state = CIS_FINI;
122 info = cl_env_info(env);
123 if (info->clt_current_io == io)
124 info->clt_current_io = NULL;
126 /* sanity check for layout change */
127 switch (io->ci_type) {
134 LASSERT(!io->ci_need_restart);
138 /* Check ignore layout change conf */
139 LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
140 !io->ci_need_restart));
146 EXPORT_SYMBOL(cl_io_fini);
148 static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
149 enum cl_io_type iot, struct cl_object *obj)
151 struct cl_object *scan;
154 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
155 LINVRNT(cl_io_type_is_valid(iot));
156 LINVRNT(cl_io_invariant(io));
159 INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
160 INIT_LIST_HEAD(&io->ci_lockset.cls_done);
161 INIT_LIST_HEAD(&io->ci_layers);
164 cl_object_for_each(scan, obj) {
165 if (scan->co_ops->coo_io_init) {
166 result = scan->co_ops->coo_io_init(env, scan, io);
172 io->ci_state = CIS_INIT;
177 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
179 * \pre obj != cl_object_top(obj)
181 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
182 enum cl_io_type iot, struct cl_object *obj)
184 struct cl_thread_info *info = cl_env_info(env);
186 LASSERT(obj != cl_object_top(obj));
187 if (!info->clt_current_io)
188 info->clt_current_io = io;
189 return cl_io_init0(env, io, iot, obj);
191 EXPORT_SYMBOL(cl_io_sub_init);
194 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
196 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
197 * what the latter returned.
199 * \pre obj == cl_object_top(obj)
200 * \pre cl_io_type_is_valid(iot)
201 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
203 int cl_io_init(const struct lu_env *env, struct cl_io *io,
204 enum cl_io_type iot, struct cl_object *obj)
206 struct cl_thread_info *info = cl_env_info(env);
208 LASSERT(obj == cl_object_top(obj));
209 LASSERT(!info->clt_current_io);
211 info->clt_current_io = io;
212 return cl_io_init0(env, io, iot, obj);
214 EXPORT_SYMBOL(cl_io_init);
217 * Initialize read or write io.
219 * \pre iot == CIT_READ || iot == CIT_WRITE
221 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
222 enum cl_io_type iot, loff_t pos, size_t count)
224 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
227 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
228 "io range: %u [%llu, %llu) %u %u\n",
229 iot, (__u64)pos, (__u64)pos + count,
230 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
231 io->u.ci_rw.crw_pos = pos;
232 io->u.ci_rw.crw_count = count;
233 return cl_io_init(env, io, iot, io->ci_obj);
235 EXPORT_SYMBOL(cl_io_rw_init);
237 static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
238 const struct cl_lock_descr *d1)
240 return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
241 lu_object_fid(&d1->cld_obj->co_lu));
245 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
247 static void cl_io_locks_sort(struct cl_io *io)
251 /* hidden treasure: bubble sort for now. */
253 struct cl_io_lock_link *curr;
254 struct cl_io_lock_link *prev;
255 struct cl_io_lock_link *temp;
260 list_for_each_entry_safe(curr, temp,
261 &io->ci_lockset.cls_todo,
264 switch (cl_lock_descr_sort(&prev->cill_descr,
265 &curr->cill_descr)) {
268 * IMPOSSIBLE: Identical locks are
275 list_move_tail(&curr->cill_linkage,
276 &prev->cill_linkage);
278 continue; /* don't change prev: it's
281 case -1: /* already in order */
290 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
291 const struct cl_lock_descr *d1)
293 d0->cld_start = min(d0->cld_start, d1->cld_start);
294 d0->cld_end = max(d0->cld_end, d1->cld_end);
296 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
297 d0->cld_mode = CLM_WRITE;
299 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
300 d0->cld_mode = CLM_GROUP;
303 static int cl_lockset_merge(const struct cl_lockset *set,
304 const struct cl_lock_descr *need)
306 struct cl_io_lock_link *scan;
308 list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
309 if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
312 /* Merge locks for the same object because ldlm lock server
313 * may expand the lock extent, otherwise there is a deadlock
314 * case if two conflicted locks are queueud for the same object
315 * and lock server expands one lock to overlap the another.
316 * The side effect is that it can generate a multi-stripe lock
317 * that may cause casacading problem
319 cl_lock_descr_merge(&scan->cill_descr, need);
320 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
321 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
322 scan->cill_descr.cld_end);
328 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
329 struct cl_lockset *set)
331 struct cl_io_lock_link *link;
332 struct cl_io_lock_link *temp;
336 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
337 result = cl_lock_request(env, io, &link->cill_lock);
341 list_move(&link->cill_linkage, &set->cls_done);
347 * Takes locks necessary for the current iteration of io.
349 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
350 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
353 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
355 const struct cl_io_slice *scan;
358 LINVRNT(cl_io_is_loopable(io));
359 LINVRNT(io->ci_state == CIS_IT_STARTED);
360 LINVRNT(cl_io_invariant(io));
362 cl_io_for_each(scan, io) {
363 if (!scan->cis_iop->op[io->ci_type].cio_lock)
365 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
370 cl_io_locks_sort(io);
371 result = cl_lockset_lock(env, io, &io->ci_lockset);
374 cl_io_unlock(env, io);
376 io->ci_state = CIS_LOCKED;
379 EXPORT_SYMBOL(cl_io_lock);
382 * Release locks takes by io.
384 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
386 struct cl_lockset *set;
387 struct cl_io_lock_link *link;
388 struct cl_io_lock_link *temp;
389 const struct cl_io_slice *scan;
391 LASSERT(cl_io_is_loopable(io));
392 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
393 LINVRNT(cl_io_invariant(io));
395 set = &io->ci_lockset;
397 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
398 list_del_init(&link->cill_linkage);
400 link->cill_fini(env, link);
403 list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
404 list_del_init(&link->cill_linkage);
405 cl_lock_release(env, &link->cill_lock);
407 link->cill_fini(env, link);
410 cl_io_for_each_reverse(scan, io) {
411 if (scan->cis_iop->op[io->ci_type].cio_unlock)
412 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
414 io->ci_state = CIS_UNLOCKED;
415 LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
417 EXPORT_SYMBOL(cl_io_unlock);
420 * Prepares next iteration of io.
422 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
423 * layers a chance to modify io parameters, e.g., so that lov can restrict io
424 * to a single stripe.
426 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
428 const struct cl_io_slice *scan;
431 LINVRNT(cl_io_is_loopable(io));
432 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
433 LINVRNT(cl_io_invariant(io));
436 cl_io_for_each(scan, io) {
437 if (!scan->cis_iop->op[io->ci_type].cio_iter_init)
439 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
445 io->ci_state = CIS_IT_STARTED;
448 EXPORT_SYMBOL(cl_io_iter_init);
451 * Finalizes io iteration.
453 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
455 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
457 const struct cl_io_slice *scan;
459 LINVRNT(cl_io_is_loopable(io));
460 LINVRNT(io->ci_state == CIS_UNLOCKED);
461 LINVRNT(cl_io_invariant(io));
463 cl_io_for_each_reverse(scan, io) {
464 if (scan->cis_iop->op[io->ci_type].cio_iter_fini)
465 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
467 io->ci_state = CIS_IT_ENDED;
469 EXPORT_SYMBOL(cl_io_iter_fini);
472 * Records that read or write io progressed \a nob bytes forward.
474 static void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io,
477 const struct cl_io_slice *scan;
479 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
481 LINVRNT(cl_io_is_loopable(io));
482 LINVRNT(cl_io_invariant(io));
484 io->u.ci_rw.crw_pos += nob;
485 io->u.ci_rw.crw_count -= nob;
487 /* layers have to be notified. */
488 cl_io_for_each_reverse(scan, io) {
489 if (scan->cis_iop->op[io->ci_type].cio_advance)
490 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
496 * Adds a lock to a lockset.
498 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
499 struct cl_io_lock_link *link)
503 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr)) {
506 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
511 EXPORT_SYMBOL(cl_io_lock_add);
513 static void cl_free_io_lock_link(const struct lu_env *env,
514 struct cl_io_lock_link *link)
520 * Allocates new lock link, and uses it to add a lock to a lockset.
522 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
523 struct cl_lock_descr *descr)
525 struct cl_io_lock_link *link;
528 link = kzalloc(sizeof(*link), GFP_NOFS);
530 link->cill_descr = *descr;
531 link->cill_fini = cl_free_io_lock_link;
532 result = cl_io_lock_add(env, io, link);
533 if (result) /* lock match */
534 link->cill_fini(env, link);
541 EXPORT_SYMBOL(cl_io_lock_alloc_add);
544 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
546 int cl_io_start(const struct lu_env *env, struct cl_io *io)
548 const struct cl_io_slice *scan;
551 LINVRNT(cl_io_is_loopable(io));
552 LINVRNT(io->ci_state == CIS_LOCKED);
553 LINVRNT(cl_io_invariant(io));
555 io->ci_state = CIS_IO_GOING;
556 cl_io_for_each(scan, io) {
557 if (!scan->cis_iop->op[io->ci_type].cio_start)
559 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
567 EXPORT_SYMBOL(cl_io_start);
570 * Wait until current io iteration is finished by calling
571 * cl_io_operations::cio_end() bottom-to-top.
573 void cl_io_end(const struct lu_env *env, struct cl_io *io)
575 const struct cl_io_slice *scan;
577 LINVRNT(cl_io_is_loopable(io));
578 LINVRNT(io->ci_state == CIS_IO_GOING);
579 LINVRNT(cl_io_invariant(io));
581 cl_io_for_each_reverse(scan, io) {
582 if (scan->cis_iop->op[io->ci_type].cio_end)
583 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
584 /* TODO: error handling. */
586 io->ci_state = CIS_IO_FINISHED;
588 EXPORT_SYMBOL(cl_io_end);
590 static const struct cl_page_slice *
591 cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
593 const struct cl_page_slice *slice;
595 slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
601 * Called by read io, when page has to be read from the server.
603 * \see cl_io_operations::cio_read_page()
605 int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
606 struct cl_page *page)
608 const struct cl_io_slice *scan;
609 struct cl_2queue *queue;
612 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
613 LINVRNT(cl_page_is_owned(page, io));
614 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
615 LINVRNT(cl_io_invariant(io));
617 queue = &io->ci_queue;
619 cl_2queue_init(queue);
621 * ->cio_read_page() methods called in the loop below are supposed to
622 * never block waiting for network (the only subtle point is the
623 * creation of new pages for read-ahead that might result in cache
624 * shrinking, but currently only clean pages are shrunk and this
625 * requires no network io).
627 * Should this ever starts blocking, retry loop would be needed for
628 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
630 cl_io_for_each(scan, io) {
631 if (scan->cis_iop->cio_read_page) {
632 const struct cl_page_slice *slice;
634 slice = cl_io_slice_page(scan, page);
636 result = scan->cis_iop->cio_read_page(env, scan, slice);
641 if (result == 0 && queue->c2_qin.pl_nr > 0)
642 result = cl_io_submit_rw(env, io, CRT_READ, queue);
644 * Unlock unsent pages in case of error.
646 cl_page_list_disown(env, io, &queue->c2_qin);
647 cl_2queue_fini(env, queue);
650 EXPORT_SYMBOL(cl_io_read_page);
653 * Commit a list of contiguous pages into writeback cache.
655 * \returns 0 if all pages committed, or errcode if error occurred.
656 * \see cl_io_operations::cio_commit_async()
658 int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
659 struct cl_page_list *queue, int from, int to,
662 const struct cl_io_slice *scan;
665 cl_io_for_each(scan, io) {
666 if (!scan->cis_iop->cio_commit_async)
668 result = scan->cis_iop->cio_commit_async(env, scan, queue,
675 EXPORT_SYMBOL(cl_io_commit_async);
678 * Submits a list of pages for immediate io.
680 * After the function gets returned, The submitted pages are moved to
681 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
682 * to be submitted, and the pages are errant to submit.
684 * \returns 0 if at least one page was submitted, error code otherwise.
685 * \see cl_io_operations::cio_submit()
687 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
688 enum cl_req_type crt, struct cl_2queue *queue)
690 const struct cl_io_slice *scan;
693 cl_io_for_each(scan, io) {
694 if (!scan->cis_iop->cio_submit)
696 result = scan->cis_iop->cio_submit(env, scan, crt, queue);
701 * If ->cio_submit() failed, no pages were sent.
703 LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
706 EXPORT_SYMBOL(cl_io_submit_rw);
708 static void cl_page_list_assume(const struct lu_env *env,
709 struct cl_io *io, struct cl_page_list *plist);
712 * Submit a sync_io and wait for the IO to be finished, or error happens.
713 * If \a timeout is zero, it means to wait for the IO unconditionally.
715 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
716 enum cl_req_type iot, struct cl_2queue *queue,
719 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
723 cl_page_list_for_each(pg, &queue->c2_qin) {
724 LASSERT(!pg->cp_sync_io);
725 pg->cp_sync_io = anchor;
728 cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end);
729 rc = cl_io_submit_rw(env, io, iot, queue);
732 * If some pages weren't sent for any reason (e.g.,
733 * read found up-to-date pages in the cache, or write found
734 * clean pages), count them as completed to avoid infinite
737 cl_page_list_for_each(pg, &queue->c2_qin) {
738 pg->cp_sync_io = NULL;
739 cl_sync_io_note(env, anchor, 1);
742 /* wait for the IO to be finished. */
743 rc = cl_sync_io_wait(env, anchor, timeout);
744 cl_page_list_assume(env, io, &queue->c2_qout);
746 LASSERT(list_empty(&queue->c2_qout.pl_pages));
747 cl_page_list_for_each(pg, &queue->c2_qin)
748 pg->cp_sync_io = NULL;
752 EXPORT_SYMBOL(cl_io_submit_sync);
757 * Pumps io through iterations calling
759 * - cl_io_iter_init()
769 * - cl_io_iter_fini()
771 * repeatedly until there is no more io to do.
773 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
777 LINVRNT(cl_io_is_loopable(io));
783 result = cl_io_iter_init(env, io);
786 result = cl_io_lock(env, io);
789 * Notify layers that locks has been taken,
792 * - llite: kms, short read;
793 * - llite: generic_file_read();
795 result = cl_io_start(env, io);
797 * Send any remaining pending
800 * - llite: ll_rw_stats_tally.
803 cl_io_unlock(env, io);
804 cl_io_rw_advance(env, io, io->ci_nob - nob);
807 cl_io_iter_fini(env, io);
808 } while (result == 0 && io->ci_continue);
810 result = io->ci_result;
811 return result < 0 ? result : 0;
813 EXPORT_SYMBOL(cl_io_loop);
816 * Adds io slice to the cl_io.
818 * This is called by cl_object_operations::coo_io_init() methods to add a
819 * per-layer state to the io. New state is added at the end of
820 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
822 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
824 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
825 struct cl_object *obj,
826 const struct cl_io_operations *ops)
828 struct list_head *linkage = &slice->cis_linkage;
830 LASSERT((!linkage->prev && !linkage->next) ||
831 list_empty(linkage));
833 list_add_tail(linkage, &io->ci_layers);
835 slice->cis_obj = obj;
836 slice->cis_iop = ops;
838 EXPORT_SYMBOL(cl_io_slice_add);
841 * Initializes page list.
843 void cl_page_list_init(struct cl_page_list *plist)
846 INIT_LIST_HEAD(&plist->pl_pages);
847 plist->pl_owner = current;
849 EXPORT_SYMBOL(cl_page_list_init);
852 * Adds a page to a page list.
854 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
856 /* it would be better to check that page is owned by "current" io, but
857 * it is not passed here.
859 LASSERT(page->cp_owner);
860 LINVRNT(plist->pl_owner == current);
863 mutex_lock(&page->cp_mutex);
865 LASSERT(list_empty(&page->cp_batch));
866 list_add_tail(&page->cp_batch, &plist->pl_pages);
868 lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
871 EXPORT_SYMBOL(cl_page_list_add);
874 * Removes a page from a page list.
876 void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist,
877 struct cl_page *page)
879 LASSERT(plist->pl_nr > 0);
880 LINVRNT(plist->pl_owner == current);
882 list_del_init(&page->cp_batch);
884 mutex_unlock(&page->cp_mutex);
887 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
888 cl_page_put(env, page);
890 EXPORT_SYMBOL(cl_page_list_del);
893 * Moves a page from one page list to another.
895 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
896 struct cl_page *page)
898 LASSERT(src->pl_nr > 0);
899 LINVRNT(dst->pl_owner == current);
900 LINVRNT(src->pl_owner == current);
902 list_move_tail(&page->cp_batch, &dst->pl_pages);
905 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
908 EXPORT_SYMBOL(cl_page_list_move);
911 * Moves a page from one page list to the head of another list.
913 void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
914 struct cl_page *page)
916 LASSERT(src->pl_nr > 0);
917 LINVRNT(dst->pl_owner == current);
918 LINVRNT(src->pl_owner == current);
920 list_move(&page->cp_batch, &dst->pl_pages);
923 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
926 EXPORT_SYMBOL(cl_page_list_move_head);
929 * splice the cl_page_list, just as list head does
931 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
933 struct cl_page *page;
936 LINVRNT(list->pl_owner == current);
937 LINVRNT(head->pl_owner == current);
939 cl_page_list_for_each_safe(page, tmp, list)
940 cl_page_list_move(head, list, page);
942 EXPORT_SYMBOL(cl_page_list_splice);
944 void cl_page_disown0(const struct lu_env *env,
945 struct cl_io *io, struct cl_page *pg);
948 * Disowns pages in a queue.
950 void cl_page_list_disown(const struct lu_env *env,
951 struct cl_io *io, struct cl_page_list *plist)
953 struct cl_page *page;
954 struct cl_page *temp;
956 LINVRNT(plist->pl_owner == current);
958 cl_page_list_for_each_safe(page, temp, plist) {
959 LASSERT(plist->pl_nr > 0);
961 list_del_init(&page->cp_batch);
963 mutex_unlock(&page->cp_mutex);
967 * cl_page_disown0 rather than usual cl_page_disown() is used,
968 * because pages are possibly in CPS_FREEING state already due
969 * to the call to cl_page_list_discard().
972 * XXX cl_page_disown0() will fail if page is not locked.
974 cl_page_disown0(env, io, page);
975 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
977 cl_page_put(env, page);
980 EXPORT_SYMBOL(cl_page_list_disown);
983 * Releases pages from queue.
985 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
987 struct cl_page *page;
988 struct cl_page *temp;
990 LINVRNT(plist->pl_owner == current);
992 cl_page_list_for_each_safe(page, temp, plist)
993 cl_page_list_del(env, plist, page);
994 LASSERT(plist->pl_nr == 0);
996 EXPORT_SYMBOL(cl_page_list_fini);
999 * Assumes all pages in a queue.
1001 static void cl_page_list_assume(const struct lu_env *env,
1002 struct cl_io *io, struct cl_page_list *plist)
1004 struct cl_page *page;
1006 LINVRNT(plist->pl_owner == current);
1008 cl_page_list_for_each(page, plist)
1009 cl_page_assume(env, io, page);
1013 * Discards all pages in a queue.
1015 static void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1016 struct cl_page_list *plist)
1018 struct cl_page *page;
1020 LINVRNT(plist->pl_owner == current);
1021 cl_page_list_for_each(page, plist)
1022 cl_page_discard(env, io, page);
1026 * Initialize dual page queue.
1028 void cl_2queue_init(struct cl_2queue *queue)
1030 cl_page_list_init(&queue->c2_qin);
1031 cl_page_list_init(&queue->c2_qout);
1033 EXPORT_SYMBOL(cl_2queue_init);
1036 * Disown pages in both lists of a 2-queue.
1038 void cl_2queue_disown(const struct lu_env *env,
1039 struct cl_io *io, struct cl_2queue *queue)
1041 cl_page_list_disown(env, io, &queue->c2_qin);
1042 cl_page_list_disown(env, io, &queue->c2_qout);
1044 EXPORT_SYMBOL(cl_2queue_disown);
1047 * Discard (truncate) pages in both lists of a 2-queue.
1049 void cl_2queue_discard(const struct lu_env *env,
1050 struct cl_io *io, struct cl_2queue *queue)
1052 cl_page_list_discard(env, io, &queue->c2_qin);
1053 cl_page_list_discard(env, io, &queue->c2_qout);
1055 EXPORT_SYMBOL(cl_2queue_discard);
1058 * Finalize both page lists of a 2-queue.
1060 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1062 cl_page_list_fini(env, &queue->c2_qout);
1063 cl_page_list_fini(env, &queue->c2_qin);
1065 EXPORT_SYMBOL(cl_2queue_fini);
1068 * Initialize a 2-queue to contain \a page in its incoming page list.
1070 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1072 cl_2queue_init(queue);
1074 * Add a page to the incoming page list of 2-queue.
1076 cl_page_list_add(&queue->c2_qin, page);
1078 EXPORT_SYMBOL(cl_2queue_init_page);
1081 * Returns top-level io.
1083 * \see cl_object_top()
1085 struct cl_io *cl_io_top(struct cl_io *io)
1087 while (io->ci_parent)
1091 EXPORT_SYMBOL(cl_io_top);
1094 * Adds request slice to the compound request.
1096 * This is called by cl_device_operations::cdo_req_init() methods to add a
1097 * per-layer state to the request. New state is added at the end of
1098 * cl_req::crq_layers list, that is, it is at the bottom of the stack.
1100 * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
1102 void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
1103 struct cl_device *dev,
1104 const struct cl_req_operations *ops)
1106 list_add_tail(&slice->crs_linkage, &req->crq_layers);
1107 slice->crs_dev = dev;
1108 slice->crs_ops = ops;
1109 slice->crs_req = req;
1111 EXPORT_SYMBOL(cl_req_slice_add);
1113 static void cl_req_free(const struct lu_env *env, struct cl_req *req)
1117 LASSERT(list_empty(&req->crq_pages));
1118 LASSERT(req->crq_nrpages == 0);
1119 LINVRNT(list_empty(&req->crq_layers));
1120 LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o));
1123 for (i = 0; i < req->crq_nrobjs; ++i) {
1124 struct cl_object *obj = req->crq_o[i].ro_obj;
1127 lu_object_ref_del_at(&obj->co_lu,
1128 &req->crq_o[i].ro_obj_ref,
1130 cl_object_put(env, obj);
1138 static int cl_req_init(const struct lu_env *env, struct cl_req *req,
1139 struct cl_page *page)
1141 struct cl_device *dev;
1142 struct cl_page_slice *slice;
1146 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
1147 dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
1148 if (dev->cd_ops->cdo_req_init) {
1149 result = dev->cd_ops->cdo_req_init(env, dev, req);
1158 * Invokes per-request transfer completion call-backs
1159 * (cl_req_operations::cro_completion()) bottom-to-top.
1161 void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
1163 struct cl_req_slice *slice;
1166 * for the lack of list_for_each_entry_reverse_safe()...
1168 while (!list_empty(&req->crq_layers)) {
1169 slice = list_entry(req->crq_layers.prev,
1170 struct cl_req_slice, crs_linkage);
1171 list_del_init(&slice->crs_linkage);
1172 if (slice->crs_ops->cro_completion)
1173 slice->crs_ops->cro_completion(env, slice, rc);
1175 cl_req_free(env, req);
1177 EXPORT_SYMBOL(cl_req_completion);
1180 * Allocates new transfer request.
1182 struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
1183 enum cl_req_type crt, int nr_objects)
1187 LINVRNT(nr_objects > 0);
1189 req = kzalloc(sizeof(*req), GFP_NOFS);
1193 req->crq_type = crt;
1194 INIT_LIST_HEAD(&req->crq_pages);
1195 INIT_LIST_HEAD(&req->crq_layers);
1197 req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]),
1200 req->crq_nrobjs = nr_objects;
1201 result = cl_req_init(env, req, page);
1206 cl_req_completion(env, req, result);
1207 req = ERR_PTR(result);
1210 req = ERR_PTR(-ENOMEM);
1214 EXPORT_SYMBOL(cl_req_alloc);
1217 * Adds a page to a request.
1219 void cl_req_page_add(const struct lu_env *env,
1220 struct cl_req *req, struct cl_page *page)
1222 struct cl_object *obj;
1223 struct cl_req_obj *rqo;
1226 LASSERT(list_empty(&page->cp_flight));
1227 LASSERT(!page->cp_req);
1229 CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
1230 req, req->crq_type, req->crq_nrpages);
1232 list_add_tail(&page->cp_flight, &req->crq_pages);
1235 obj = cl_object_top(page->cp_obj);
1236 for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
1240 lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
1245 LASSERT(i < req->crq_nrobjs);
1247 EXPORT_SYMBOL(cl_req_page_add);
1250 * Removes a page from a request.
1252 void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
1254 struct cl_req *req = page->cp_req;
1256 LASSERT(!list_empty(&page->cp_flight));
1257 LASSERT(req->crq_nrpages > 0);
1259 list_del_init(&page->cp_flight);
1261 page->cp_req = NULL;
1263 EXPORT_SYMBOL(cl_req_page_done);
1266 * Notifies layers that request is about to depart by calling
1267 * cl_req_operations::cro_prep() top-to-bottom.
1269 int cl_req_prep(const struct lu_env *env, struct cl_req *req)
1273 const struct cl_req_slice *slice;
1276 * Check that the caller of cl_req_alloc() didn't lie about the number
1279 for (i = 0; i < req->crq_nrobjs; ++i)
1280 LASSERT(req->crq_o[i].ro_obj);
1283 list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1284 if (slice->crs_ops->cro_prep) {
1285 result = slice->crs_ops->cro_prep(env, slice);
1292 EXPORT_SYMBOL(cl_req_prep);
1295 * Fills in attributes that are passed to server together with transfer. Only
1296 * attributes from \a flags may be touched. This can be called multiple times
1297 * for the same request.
1299 void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
1300 struct cl_req_attr *attr, u64 flags)
1302 const struct cl_req_slice *slice;
1303 struct cl_page *page;
1306 LASSERT(!list_empty(&req->crq_pages));
1308 /* Take any page to use as a model. */
1309 page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
1311 for (i = 0; i < req->crq_nrobjs; ++i) {
1312 list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1313 const struct cl_page_slice *scan;
1314 const struct cl_object *obj;
1316 scan = cl_page_at(page,
1317 slice->crs_dev->cd_lu_dev.ld_type);
1318 obj = scan->cpl_obj;
1319 if (slice->crs_ops->cro_attr_set)
1320 slice->crs_ops->cro_attr_set(env, slice, obj,
1325 EXPORT_SYMBOL(cl_req_attr_set);
1327 /* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
1328 * wait for the IO to finish.
1330 void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
1332 wake_up_all(&anchor->csi_waitq);
1334 /* it's safe to nuke or reuse anchor now */
1335 atomic_set(&anchor->csi_barrier, 0);
1337 EXPORT_SYMBOL(cl_sync_io_end);
1340 * Initialize synchronous io wait anchor
1342 void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
1343 void (*end)(const struct lu_env *, struct cl_sync_io *))
1345 memset(anchor, 0, sizeof(*anchor));
1346 init_waitqueue_head(&anchor->csi_waitq);
1347 atomic_set(&anchor->csi_sync_nr, nr);
1348 atomic_set(&anchor->csi_barrier, nr > 0);
1349 anchor->csi_sync_rc = 0;
1350 anchor->csi_end_io = end;
1353 EXPORT_SYMBOL(cl_sync_io_init);
1356 * Wait until all IO completes. Transfer completion routine has to call
1357 * cl_sync_io_note() for every entity.
1359 int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
1362 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
1366 LASSERT(timeout >= 0);
1368 rc = l_wait_event(anchor->csi_waitq,
1369 atomic_read(&anchor->csi_sync_nr) == 0,
1372 CERROR("IO failed: %d, still wait for %d remaining entries\n",
1373 rc, atomic_read(&anchor->csi_sync_nr));
1375 lwi = (struct l_wait_info) { 0 };
1376 (void)l_wait_event(anchor->csi_waitq,
1377 atomic_read(&anchor->csi_sync_nr) == 0,
1380 rc = anchor->csi_sync_rc;
1382 LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
1384 /* wait until cl_sync_io_note() has done wakeup */
1385 while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
1391 EXPORT_SYMBOL(cl_sync_io_wait);
1394 * Indicate that transfer of a single page completed.
1396 void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
1399 if (anchor->csi_sync_rc == 0 && ioret < 0)
1400 anchor->csi_sync_rc = ioret;
1402 * Synchronous IO done without releasing page lock (e.g., as a part of
1403 * ->{prepare,commit}_write(). Completion is used to signal the end of
1406 LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
1407 if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
1408 LASSERT(anchor->csi_end_io);
1409 anchor->csi_end_io(env, anchor);
1410 /* Can't access anchor any more */
1413 EXPORT_SYMBOL(cl_sync_io_note);