4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Implementation of cl_io for OSC layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_OSC
40 #include "osc_cl_internal.h"
46 /*****************************************************************************
52 static struct osc_req *cl2osc_req(const struct cl_req_slice *slice)
54 LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type);
55 return container_of0(slice, struct osc_req, or_cl);
58 static struct osc_io *cl2osc_io(const struct lu_env *env,
59 const struct cl_io_slice *slice)
61 struct osc_io *oio = container_of0(slice, struct osc_io, oi_cl);
63 LINVRNT(oio == osc_env_io(env));
67 static struct osc_page *osc_cl_page_osc(struct cl_page *page,
68 struct osc_object *osc)
70 const struct cl_page_slice *slice;
73 slice = cl_object_page_slice(&osc->oo_cl, page);
75 slice = cl_page_at(page, &osc_device_type);
78 return cl2osc_page(slice);
81 /*****************************************************************************
87 static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
92 * An implementation of cl_io_operations::cio_io_submit() method for osc
93 * layer. Iterates over pages in the in-queue, prepares each for io by calling
94 * cl_page_prep() and then either submits them through osc_io_submit_page()
95 * or, if page is already submitted, changes osc flags through
96 * osc_set_async_flags().
98 static int osc_io_submit(const struct lu_env *env,
99 const struct cl_io_slice *ios,
100 enum cl_req_type crt, struct cl_2queue *queue)
102 struct cl_page *page;
104 struct client_obd *cli = NULL;
105 struct osc_object *osc = NULL; /* to keep gcc happy */
106 struct osc_page *opg;
110 struct cl_page_list *qin = &queue->c2_qin;
111 struct cl_page_list *qout = &queue->c2_qout;
118 LASSERT(qin->pl_nr > 0);
120 CDEBUG(D_CACHE, "%d %d\n", qin->pl_nr, crt);
122 osc = cl2osc(ios->cis_obj);
124 max_pages = cli->cl_max_pages_per_rpc;
126 cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
127 brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0;
130 * NOTE: here @page is a top-level page. This is done to avoid
131 * creation of sub-page-list.
133 cl_page_list_for_each_safe(page, tmp, qin) {
134 struct osc_async_page *oap;
140 opg = osc_cl_page_osc(page, osc);
142 LASSERT(osc == oap->oap_obj);
144 if (!list_empty(&oap->oap_pending_item) ||
145 !list_empty(&oap->oap_rpc_item)) {
146 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
152 result = cl_page_prep(env, io, page, crt);
155 if (result != -EALREADY)
158 * Handle -EALREADY error: for read case, the page is
159 * already in UPTODATE state; for write, the page
166 cl_page_list_move(qout, qin, page);
167 spin_lock(&oap->oap_lock);
168 oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
169 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
170 spin_unlock(&oap->oap_lock);
172 osc_page_submit(env, opg, crt, brw_flags);
173 list_add_tail(&oap->oap_pending_item, &list);
174 if (++queued == max_pages) {
176 result = osc_queue_sync_pages(env, osc, &list, cmd,
184 result = osc_queue_sync_pages(env, osc, &list, cmd, brw_flags);
186 CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
187 return qout->pl_nr > 0 ? 0 : result;
191 * This is called when a page is accessed within file in a way that creates
192 * new page, if one were missing (i.e., if there were a hole at that place in
193 * the file, or accessed page is beyond the current file size).
195 * Expand stripe KMS if necessary.
197 static void osc_page_touch_at(const struct lu_env *env,
198 struct cl_object *obj, pgoff_t idx, unsigned to)
200 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
201 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
205 /* offset within stripe */
206 kms = cl_offset(obj, idx) + to;
208 cl_object_attr_lock(obj);
212 * ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
216 CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n",
217 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
218 loi->loi_lvb.lvb_size);
220 attr->cat_ctime = LTIME_S(CURRENT_TIME);
221 attr->cat_mtime = attr->cat_ctime;
222 valid = CAT_MTIME | CAT_CTIME;
223 if (kms > loi->loi_kms) {
227 if (kms > loi->loi_lvb.lvb_size) {
228 attr->cat_size = kms;
231 cl_object_attr_set(env, obj, attr, valid);
232 cl_object_attr_unlock(obj);
235 static int osc_io_commit_async(const struct lu_env *env,
236 const struct cl_io_slice *ios,
237 struct cl_page_list *qin, int from, int to,
240 struct cl_io *io = ios->cis_io;
241 struct osc_io *oio = cl2osc_io(env, ios);
242 struct osc_object *osc = cl2osc(ios->cis_obj);
243 struct cl_page *page;
244 struct cl_page *last_page;
245 struct osc_page *opg;
248 LASSERT(qin->pl_nr > 0);
250 /* Handle partial page cases */
251 last_page = cl_page_list_last(qin);
252 if (oio->oi_lockless) {
253 page = cl_page_list_first(qin);
254 if (page == last_page) {
255 cl_page_clip(env, page, from, to);
258 cl_page_clip(env, page, from, PAGE_SIZE);
260 cl_page_clip(env, last_page, 0, to);
264 while (qin->pl_nr > 0) {
265 struct osc_async_page *oap;
267 page = cl_page_list_first(qin);
268 opg = osc_cl_page_osc(page, osc);
271 if (!list_empty(&oap->oap_rpc_item)) {
272 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
278 /* The page may be already in dirty cache. */
279 if (list_empty(&oap->oap_pending_item)) {
280 result = osc_page_cache_add(env, &opg->ops_cl, io);
285 osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
286 page == last_page ? to : PAGE_SIZE);
288 cl_page_list_del(env, qin, page);
290 (*cb)(env, io, page);
291 /* Can't access page any more. Page can be in transfer and
292 * complete at any time.
296 /* for sync write, kernel will wait for this page to be flushed before
297 * osc_io_end() is called, so release it earlier.
298 * for mkwrite(), it's known there is no further pages.
300 if (cl_io_is_sync_write(io) && oio->oi_active) {
301 osc_extent_release(env, oio->oi_active);
302 oio->oi_active = NULL;
305 CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
309 static int osc_io_rw_iter_init(const struct lu_env *env,
310 const struct cl_io_slice *ios)
312 struct cl_io *io = ios->cis_io;
313 struct osc_io *oio = osc_env_io(env);
314 struct osc_object *osc = cl2osc(ios->cis_obj);
315 struct client_obd *cli = osc_cli(osc);
318 unsigned int max_pages;
320 if (cl_io_is_append(io))
323 npages = io->u.ci_rw.crw_count >> PAGE_SHIFT;
324 if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
327 max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
328 if (npages > max_pages)
331 c = atomic_read(cli->cl_lru_left);
332 if (c < npages && osc_lru_reclaim(cli) > 0)
333 c = atomic_read(cli->cl_lru_left);
334 while (c >= npages) {
335 if (c == atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) {
336 oio->oi_lru_reserved = npages;
339 c = atomic_read(cli->cl_lru_left);
345 static void osc_io_rw_iter_fini(const struct lu_env *env,
346 const struct cl_io_slice *ios)
348 struct osc_io *oio = osc_env_io(env);
349 struct osc_object *osc = cl2osc(ios->cis_obj);
350 struct client_obd *cli = osc_cli(osc);
352 if (oio->oi_lru_reserved > 0) {
353 atomic_add(oio->oi_lru_reserved, cli->cl_lru_left);
354 oio->oi_lru_reserved = 0;
356 oio->oi_write_osclock = NULL;
359 static int osc_io_fault_start(const struct lu_env *env,
360 const struct cl_io_slice *ios)
363 struct cl_fault_io *fio;
366 fio = &io->u.ci_fault;
367 CDEBUG(D_INFO, "%lu %d %d\n",
368 fio->ft_index, fio->ft_writable, fio->ft_nob);
370 * If mapping is writeable, adjust kms to cover this page,
371 * but do not extend kms beyond actual file size.
374 if (fio->ft_writable)
375 osc_page_touch_at(env, ios->cis_obj,
376 fio->ft_index, fio->ft_nob);
380 static int osc_async_upcall(void *a, int rc)
382 struct osc_async_cbargs *args = a;
385 complete(&args->opc_sync);
390 * Checks that there are no pages being written in the extent being truncated.
392 static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
393 struct osc_page *ops, void *cbdata)
395 struct cl_page *page = ops->ops_cl.cpl_page;
396 struct osc_async_page *oap;
397 __u64 start = *(__u64 *)cbdata;
400 if (oap->oap_cmd & OBD_BRW_WRITE &&
401 !list_empty(&oap->oap_pending_item))
402 CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
403 start, current->comm);
405 if (PageLocked(page->cp_vmpage))
406 CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
407 ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK);
409 return CLP_GANG_OKAY;
412 static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
413 struct osc_io *oio, __u64 size)
415 struct cl_object *clob;
419 clob = oio->oi_cl.cis_obj;
420 start = cl_index(clob, size);
421 partial = cl_offset(clob, start) < size;
424 * Complain if there are pages in the truncated region.
426 osc_page_gang_lookup(env, io, cl2osc(clob),
427 start + partial, CL_PAGE_EOF,
428 trunc_check_cb, (void *)&size);
431 static int osc_io_setattr_start(const struct lu_env *env,
432 const struct cl_io_slice *slice)
434 struct cl_io *io = slice->cis_io;
435 struct osc_io *oio = cl2osc_io(env, slice);
436 struct cl_object *obj = slice->cis_obj;
437 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
438 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
439 struct obdo *oa = &oio->oi_oa;
440 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
441 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
442 unsigned int ia_valid = io->u.ci_setattr.sa_valid;
444 struct obd_info oinfo = { };
446 /* truncate cache dirty pages first */
447 if (cl_io_is_trunc(io))
448 result = osc_cache_truncate_start(env, oio, cl2osc(obj), size);
450 if (result == 0 && oio->oi_lockless == 0) {
451 cl_object_attr_lock(obj);
452 result = cl_object_attr_get(env, obj, attr);
454 struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
455 unsigned int cl_valid = 0;
457 if (ia_valid & ATTR_SIZE) {
458 attr->cat_size = size;
459 attr->cat_kms = size;
460 cl_valid = CAT_SIZE | CAT_KMS;
462 if (ia_valid & ATTR_MTIME_SET) {
463 attr->cat_mtime = lvb->lvb_mtime;
464 cl_valid |= CAT_MTIME;
466 if (ia_valid & ATTR_ATIME_SET) {
467 attr->cat_atime = lvb->lvb_atime;
468 cl_valid |= CAT_ATIME;
470 if (ia_valid & ATTR_CTIME_SET) {
471 attr->cat_ctime = lvb->lvb_ctime;
472 cl_valid |= CAT_CTIME;
474 result = cl_object_attr_set(env, obj, attr, cl_valid);
476 cl_object_attr_unlock(obj);
478 memset(oa, 0, sizeof(*oa));
480 oa->o_oi = loi->loi_oi;
481 oa->o_mtime = attr->cat_mtime;
482 oa->o_atime = attr->cat_atime;
483 oa->o_ctime = attr->cat_ctime;
484 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
485 OBD_MD_FLCTIME | OBD_MD_FLMTIME;
486 if (ia_valid & ATTR_SIZE) {
488 oa->o_blocks = OBD_OBJECT_EOF;
489 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
491 if (oio->oi_lockless) {
492 oa->o_flags = OBD_FL_SRVLOCK;
493 oa->o_valid |= OBD_MD_FLFLAGS;
496 LASSERT(oio->oi_lockless == 0);
500 init_completion(&cbargs->opc_sync);
502 if (ia_valid & ATTR_SIZE)
503 result = osc_punch_base(osc_export(cl2osc(obj)),
504 &oinfo, osc_async_upcall,
505 cbargs, PTLRPCD_SET);
507 result = osc_setattr_async_base(osc_export(cl2osc(obj)),
510 cbargs, PTLRPCD_SET);
511 cbargs->opc_rpc_sent = result == 0;
516 static void osc_io_setattr_end(const struct lu_env *env,
517 const struct cl_io_slice *slice)
519 struct cl_io *io = slice->cis_io;
520 struct osc_io *oio = cl2osc_io(env, slice);
521 struct cl_object *obj = slice->cis_obj;
522 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
525 if (cbargs->opc_rpc_sent) {
526 wait_for_completion(&cbargs->opc_sync);
527 result = cbargs->opc_rc;
528 io->ci_result = cbargs->opc_rc;
531 if (oio->oi_lockless) {
532 /* lockless truncate */
533 struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
535 LASSERT(cl_io_is_trunc(io));
536 /* XXX: Need a lock. */
537 osd->od_stats.os_lockless_truncates++;
541 if (cl_io_is_trunc(io)) {
542 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
544 osc_trunc_check(env, io, oio, size);
546 osc_cache_truncate_end(env, oio, cl2osc(obj));
547 oio->oi_trunc = NULL;
552 static int osc_io_read_start(const struct lu_env *env,
553 const struct cl_io_slice *slice)
555 struct cl_object *obj = slice->cis_obj;
556 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
559 if (!slice->cis_io->ci_noatime) {
560 cl_object_attr_lock(obj);
561 attr->cat_atime = ktime_get_real_seconds();
562 rc = cl_object_attr_set(env, obj, attr, CAT_ATIME);
563 cl_object_attr_unlock(obj);
568 static int osc_io_write_start(const struct lu_env *env,
569 const struct cl_io_slice *slice)
571 struct cl_object *obj = slice->cis_obj;
572 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
575 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
576 cl_object_attr_lock(obj);
577 attr->cat_ctime = ktime_get_real_seconds();
578 attr->cat_mtime = attr->cat_ctime;
579 rc = cl_object_attr_set(env, obj, attr, CAT_MTIME | CAT_CTIME);
580 cl_object_attr_unlock(obj);
585 static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
586 struct cl_fsync_io *fio)
588 struct osc_io *oio = osc_env_io(env);
589 struct obdo *oa = &oio->oi_oa;
590 struct obd_info *oinfo = &oio->oi_info;
591 struct lov_oinfo *loi = obj->oo_oinfo;
592 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
595 memset(oa, 0, sizeof(*oa));
596 oa->o_oi = loi->loi_oi;
597 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
599 /* reload size abd blocks for start and end of sync range */
600 oa->o_size = fio->fi_start;
601 oa->o_blocks = fio->fi_end;
602 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
604 obdo_set_parent_fid(oa, fio->fi_fid);
606 memset(oinfo, 0, sizeof(*oinfo));
608 init_completion(&cbargs->opc_sync);
610 rc = osc_sync_base(osc_export(obj), oinfo, osc_async_upcall, cbargs,
615 static int osc_io_fsync_start(const struct lu_env *env,
616 const struct cl_io_slice *slice)
618 struct cl_io *io = slice->cis_io;
619 struct cl_fsync_io *fio = &io->u.ci_fsync;
620 struct cl_object *obj = slice->cis_obj;
621 struct osc_object *osc = cl2osc(obj);
622 pgoff_t start = cl_index(obj, fio->fi_start);
623 pgoff_t end = cl_index(obj, fio->fi_end);
626 if (fio->fi_end == OBD_OBJECT_EOF)
629 result = osc_cache_writeback_range(env, osc, start, end, 0,
630 fio->fi_mode == CL_FSYNC_DISCARD);
632 fio->fi_nr_written += result;
635 if (fio->fi_mode == CL_FSYNC_ALL) {
638 /* we have to wait for writeback to finish before we can
639 * send OST_SYNC RPC. This is bad because it causes extents
640 * to be written osc by osc. However, we usually start
641 * writeback before CL_FSYNC_ALL so this won't have any real
644 rc = osc_cache_wait_range(env, osc, start, end);
647 rc = osc_fsync_ost(env, osc, fio);
655 static void osc_io_fsync_end(const struct lu_env *env,
656 const struct cl_io_slice *slice)
658 struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync;
659 struct cl_object *obj = slice->cis_obj;
660 pgoff_t start = cl_index(obj, fio->fi_start);
661 pgoff_t end = cl_index(obj, fio->fi_end);
664 if (fio->fi_mode == CL_FSYNC_LOCAL) {
665 result = osc_cache_wait_range(env, cl2osc(obj), start, end);
666 } else if (fio->fi_mode == CL_FSYNC_ALL) {
667 struct osc_io *oio = cl2osc_io(env, slice);
668 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
670 wait_for_completion(&cbargs->opc_sync);
672 result = cbargs->opc_rc;
674 slice->cis_io->ci_result = result;
677 static void osc_io_end(const struct lu_env *env,
678 const struct cl_io_slice *slice)
680 struct osc_io *oio = cl2osc_io(env, slice);
682 if (oio->oi_active) {
683 osc_extent_release(env, oio->oi_active);
684 oio->oi_active = NULL;
688 static const struct cl_io_operations osc_io_ops = {
691 .cio_start = osc_io_read_start,
692 .cio_fini = osc_io_fini
695 .cio_iter_init = osc_io_rw_iter_init,
696 .cio_iter_fini = osc_io_rw_iter_fini,
697 .cio_start = osc_io_write_start,
698 .cio_end = osc_io_end,
699 .cio_fini = osc_io_fini
702 .cio_start = osc_io_setattr_start,
703 .cio_end = osc_io_setattr_end
706 .cio_start = osc_io_fault_start,
707 .cio_end = osc_io_end,
708 .cio_fini = osc_io_fini
711 .cio_start = osc_io_fsync_start,
712 .cio_end = osc_io_fsync_end,
713 .cio_fini = osc_io_fini
716 .cio_fini = osc_io_fini
719 .cio_submit = osc_io_submit,
720 .cio_commit_async = osc_io_commit_async
723 /*****************************************************************************
725 * Transfer operations.
729 static int osc_req_prep(const struct lu_env *env,
730 const struct cl_req_slice *slice)
735 static void osc_req_completion(const struct lu_env *env,
736 const struct cl_req_slice *slice, int ioret)
740 or = cl2osc_req(slice);
741 kmem_cache_free(osc_req_kmem, or);
745 * Implementation of struct cl_req_operations::cro_attr_set() for osc
746 * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
749 static void osc_req_attr_set(const struct lu_env *env,
750 const struct cl_req_slice *slice,
751 const struct cl_object *obj,
752 struct cl_req_attr *attr, u64 flags)
754 struct lov_oinfo *oinfo;
755 struct cl_req *clerq;
756 struct cl_page *apage; /* _some_ page in @clerq */
757 struct ldlm_lock *lock; /* _some_ lock protecting @apage */
758 struct osc_page *opg;
762 oinfo = cl2osc(obj)->oo_oinfo;
763 lvb = &oinfo->loi_lvb;
766 if ((flags & OBD_MD_FLMTIME) != 0) {
767 oa->o_mtime = lvb->lvb_mtime;
768 oa->o_valid |= OBD_MD_FLMTIME;
770 if ((flags & OBD_MD_FLATIME) != 0) {
771 oa->o_atime = lvb->lvb_atime;
772 oa->o_valid |= OBD_MD_FLATIME;
774 if ((flags & OBD_MD_FLCTIME) != 0) {
775 oa->o_ctime = lvb->lvb_ctime;
776 oa->o_valid |= OBD_MD_FLCTIME;
778 if (flags & OBD_MD_FLGROUP) {
779 ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
780 oa->o_valid |= OBD_MD_FLGROUP;
782 if (flags & OBD_MD_FLID) {
783 ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
784 oa->o_valid |= OBD_MD_FLID;
786 if (flags & OBD_MD_FLHANDLE) {
787 clerq = slice->crs_req;
788 LASSERT(!list_empty(&clerq->crq_pages));
789 apage = container_of(clerq->crq_pages.next,
790 struct cl_page, cp_flight);
791 opg = osc_cl_page_osc(apage, NULL);
792 lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
794 if (!lock && !opg->ops_srvlock) {
795 struct ldlm_resource *res;
796 struct ldlm_res_id *resname;
798 CL_PAGE_DEBUG(D_ERROR, env, apage, "uncovered page!\n");
800 resname = &osc_env_info(env)->oti_resname;
801 ostid_build_res_name(&oinfo->loi_oi, resname);
802 res = ldlm_resource_get(
803 osc_export(cl2osc(obj))->exp_obd->obd_namespace,
804 NULL, resname, LDLM_EXTENT, 0);
805 ldlm_resource_dump(D_ERROR, res);
811 /* check for lockless io. */
813 oa->o_handle = lock->l_remote_handle;
814 oa->o_valid |= OBD_MD_FLHANDLE;
820 static const struct cl_req_operations osc_req_ops = {
821 .cro_prep = osc_req_prep,
822 .cro_attr_set = osc_req_attr_set,
823 .cro_completion = osc_req_completion
826 int osc_io_init(const struct lu_env *env,
827 struct cl_object *obj, struct cl_io *io)
829 struct osc_io *oio = osc_env_io(env);
831 CL_IO_SLICE_CLEAN(oio, oi_cl);
832 cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops);
836 int osc_req_init(const struct lu_env *env, struct cl_device *dev,
842 or = kmem_cache_zalloc(osc_req_kmem, GFP_NOFS);
844 cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);