4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 /** \defgroup PtlRPC Portal RPC and networking module.
38 * PortalRPC is the layer used by rest of lustre code to achieve network
39 * communications: establish connections with corresponding export and import
40 * states, listen for a service, send and receive RPCs.
41 * PortalRPC also includes base recovery framework: packet resending and
42 * replaying, reconnections, pinger.
44 * PortalRPC utilizes LNet as its transport layer.
57 #include "../../include/linux/libcfs/libcfs.h"
58 #include "../../include/linux/lnet/nidstr.h"
59 #include "../../include/linux/lnet/api.h"
60 #include "lustre/lustre_idl.h"
61 #include "lustre_ha.h"
62 #include "lustre_sec.h"
63 #include "lustre_import.h"
64 #include "lprocfs_status.h"
65 #include "lu_object.h"
66 #include "lustre_req_layout.h"
68 #include "obd_support.h"
69 #include "lustre_ver.h"
71 /* MD flags we _always_ use */
72 #define PTLRPC_MD_OPTIONS 0
75 * Max # of bulk operations in one request.
76 * In order for the client and server to properly negotiate the maximum
77 * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
78 * value. The client is free to limit the actual RPC size for any bulk
79 * transfer via cl_max_pages_per_rpc to some non-power-of-two value.
81 #define PTLRPC_BULK_OPS_BITS 2
82 #define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
84 * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
85 * should not be used on the server at all. Otherwise, it imposes a
86 * protocol limitation on the maximum RPC size that can be used by any
87 * RPC sent to that server in the future. Instead, the server should
88 * use the negotiated per-client ocd_brw_size to determine the bulk
91 #define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1))
94 * Define maxima for bulk I/O.
96 * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT
97 * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the
98 * currently supported maximum between peers at connect via ocd_brw_size.
100 #define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
101 #define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
102 #define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
104 #define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
105 #define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
106 #define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
107 #define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
108 #define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
109 #define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
111 /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
112 # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
113 # error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
115 # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
116 # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
118 # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
119 # error "PTLRPC_MAX_BRW_SIZE too big"
121 # if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT)
122 # error "PTLRPC_MAX_BRW_PAGES too big"
125 #define PTLRPC_NTHRS_INIT 2
130 * Constants determine how memory is used to buffer incoming service requests.
132 * ?_NBUFS # buffers to allocate when growing the pool
133 * ?_BUFSIZE # bytes in a single request buffer
134 * ?_MAXREQSIZE # maximum request service will receive
136 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
137 * of ?_NBUFS is added to the pool.
139 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
140 * considered full when less than ?_MAXREQSIZE is left in them.
145 * Constants determine how threads are created for ptlrpc service.
147 * ?_NTHRS_INIT # threads to create for each service partition on
148 * initializing. If it's non-affinity service and
149 * there is only one partition, it's the overall #
150 * threads for the service while initializing.
151 * ?_NTHRS_BASE # threads should be created at least for each
152 * ptlrpc partition to keep the service healthy.
153 * It's the low-water mark of threads upper-limit
154 * for each partition.
155 * ?_THR_FACTOR # threads can be added on threads upper-limit for
156 * each CPU core. This factor is only for reference,
157 * we might decrease value of factor if number of cores
158 * per CPT is above a limit.
159 * ?_NTHRS_MAX # overall threads can be created for a service,
160 * it's a soft limit because if service is running
161 * on machine with hundreds of cores and tens of
162 * CPU partitions, we need to guarantee each partition
163 * has ?_NTHRS_BASE threads, which means total threads
164 * will be ?_NTHRS_BASE * number_of_cpts which can
165 * exceed ?_NTHRS_MAX.
169 * #define MDS_NTHRS_INIT 2
170 * #define MDS_NTHRS_BASE 64
171 * #define MDS_NTHRS_FACTOR 8
172 * #define MDS_NTHRS_MAX 1024
175 * ---------------------------------------------------------------------
176 * Server(A) has 16 cores, user configured it to 4 partitions so each
177 * partition has 4 cores, then actual number of service threads on each
179 * MDS_NTHRS_BASE(64) + cores(4) * MDS_NTHRS_FACTOR(8) = 96
181 * Total number of threads for the service is:
182 * 96 * partitions(4) = 384
185 * ---------------------------------------------------------------------
186 * Server(B) has 32 cores, user configured it to 4 partitions so each
187 * partition has 8 cores, then actual number of service threads on each
189 * MDS_NTHRS_BASE(64) + cores(8) * MDS_NTHRS_FACTOR(8) = 128
191 * Total number of threads for the service is:
192 * 128 * partitions(4) = 512
195 * ---------------------------------------------------------------------
196 * Server(B) has 96 cores, user configured it to 8 partitions so each
197 * partition has 12 cores, then actual number of service threads on each
199 * MDS_NTHRS_BASE(64) + cores(12) * MDS_NTHRS_FACTOR(8) = 160
201 * Total number of threads for the service is:
202 * 160 * partitions(8) = 1280
204 * However, it's above the soft limit MDS_NTHRS_MAX, so we choose this number
205 * as upper limit of threads number for each partition:
206 * MDS_NTHRS_MAX(1024) / partitions(8) = 128
209 * ---------------------------------------------------------------------
210 * Server(C) have a thousand of cores and user configured it to 32 partitions
211 * MDS_NTHRS_BASE(64) * 32 = 2048
213 * which is already above soft limit MDS_NTHRS_MAX(1024), but we still need
214 * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads
215 * to keep service healthy, so total number of threads will just be 2048.
217 * NB: we don't suggest to choose server with that many cores because backend
218 * filesystem itself, buffer cache, or underlying network stack might
219 * have some SMP scalability issues at that large scale.
221 * If user already has a fat machine with hundreds or thousands of cores,
222 * there are two choices for configuration:
223 * a) create CPU table from subset of all CPUs and run Lustre on
225 * b) bind service threads on a few partitions, see modparameters of
226 * MDS and OSS for details
228 * NB: these calculations (and examples below) are simplified to help
229 * understanding, the real implementation is a little more complex,
230 * please see ptlrpc_server_nthreads_check() for details.
235 * LDLM threads constants:
237 * Given 8 as factor and 24 as base threads number
240 * On 4-core machine we will have 24 + 8 * 4 = 56 threads.
243 * On 8-core machine with 2 partitions we will have 24 + 4 * 8 = 56
244 * threads for each partition and total threads number will be 112.
247 * On 64-core machine with 8 partitions we will need LDLM_NTHRS_BASE(24)
248 * threads for each partition to keep service healthy, so total threads
249 * number should be 24 * 8 = 192.
251 * So with these constants, threads number will be at the similar level
252 * of old versions, unless target machine has over a hundred cores
254 #define LDLM_THR_FACTOR 8
255 #define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT
256 #define LDLM_NTHRS_BASE 24
257 #define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128)
259 #define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
260 #define LDLM_CLIENT_NBUFS 1
261 #define LDLM_SERVER_NBUFS 64
262 #define LDLM_BUFSIZE (8 * 1024)
263 #define LDLM_MAXREQSIZE (5 * 1024)
264 #define LDLM_MAXREPSIZE (1024)
266 #define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */
268 #define OST_MAXREQSIZE (5 * 1024)
270 /* Macro to hide a typecast. */
271 #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
274 * Structure to single define portal connection.
276 struct ptlrpc_connection {
277 /** linkage for connections hash table */
278 struct hlist_node c_hash;
279 /** Our own lnet nid for this connection */
281 /** Remote side nid for this connection */
282 lnet_process_id_t c_peer;
283 /** UUID of the other side */
284 struct obd_uuid c_remote_uuid;
285 /** reference counter for this connection */
289 /** Client definition for PortalRPC */
290 struct ptlrpc_client {
291 /** What lnet portal does this client send messages to by default */
292 __u32 cli_request_portal;
293 /** What portal do we expect replies on */
294 __u32 cli_reply_portal;
295 /** Name of the client */
299 /** state flags of requests */
300 /* XXX only ones left are those used by the bulk descs as well! */
301 #define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
302 #define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
304 #define REQ_MAX_ACK_LOCKS 8
306 union ptlrpc_async_args {
308 * Scratchpad for passing args to completion interpreter. Users
309 * cast to the struct of their choosing, and CLASSERT that this is
310 * big enough. For _tons_ of context, kmalloc a struct and store
311 * a pointer to it here. The pointer_arg ensures this struct is at
312 * least big enough for that.
314 void *pointer_arg[11];
318 struct ptlrpc_request_set;
319 typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
320 typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *);
323 * Definition of request set structure.
324 * Request set is a list of requests (not necessary to the same target) that
325 * once populated with RPCs could be sent in parallel.
326 * There are two kinds of request sets. General purpose and with dedicated
327 * serving thread. Example of the latter is ptlrpcd set.
328 * For general purpose sets once request set started sending it is impossible
329 * to add new requests to such set.
330 * Provides a way to call "completion callbacks" when all requests in the set
333 struct ptlrpc_request_set {
334 atomic_t set_refcount;
335 /** number of in queue requests */
336 atomic_t set_new_count;
337 /** number of uncompleted requests */
338 atomic_t set_remaining;
339 /** wait queue to wait on for request events */
340 wait_queue_head_t set_waitq;
341 wait_queue_head_t *set_wakeup_ptr;
342 /** List of requests in the set */
343 struct list_head set_requests;
345 * List of completion callbacks to be called when the set is completed
346 * This is only used if \a set_interpret is NULL.
347 * Links struct ptlrpc_set_cbdata.
349 struct list_head set_cblist;
350 /** Completion callback, if only one. */
351 set_interpreter_func set_interpret;
352 /** opaq argument passed to completion \a set_interpret callback. */
355 * Lock for \a set_new_requests manipulations
356 * locked so that any old caller can communicate requests to
357 * the set holder who can then fold them into the lock-free set
359 spinlock_t set_new_req_lock;
360 /** List of new yet unsent requests. Only used with ptlrpcd now. */
361 struct list_head set_new_requests;
363 /** rq_status of requests that have been freed already */
365 /** Additional fields used by the flow control extension */
366 /** Maximum number of RPCs in flight */
367 int set_max_inflight;
368 /** Callback function used to generate RPCs */
369 set_producer_func set_producer;
370 /** opaq argument passed to the producer callback */
371 void *set_producer_arg;
375 * Description of a single ptrlrpc_set callback
377 struct ptlrpc_set_cbdata {
378 /** List linkage item */
379 struct list_head psc_item;
380 /** Pointer to interpreting function */
381 set_interpreter_func psc_interpret;
382 /** Opaq argument to pass to the callback */
386 struct ptlrpc_bulk_desc;
387 struct ptlrpc_service_part;
388 struct ptlrpc_service;
391 * ptlrpc callback & work item stuff
393 struct ptlrpc_cb_id {
394 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
395 void *cbid_arg; /* additional arg */
398 /** Maximum number of locks to fit into reply state */
399 #define RS_MAX_LOCKS 8
403 * Structure to define reply state on the server
404 * Reply state holds various reply message information. Also for "difficult"
405 * replies (rep-ack case) we store the state after sending reply and wait
406 * for the client to acknowledge the reception. In these cases locks could be
407 * added to the state for replay/failover consistency guarantees.
409 struct ptlrpc_reply_state {
410 /** Callback description */
411 struct ptlrpc_cb_id rs_cb_id;
412 /** Linkage for list of all reply states in a system */
413 struct list_head rs_list;
414 /** Linkage for list of all reply states on same export */
415 struct list_head rs_exp_list;
416 /** Linkage for list of all reply states for same obd */
417 struct list_head rs_obd_list;
419 struct list_head rs_debug_list;
421 /** A spinlock to protect the reply state flags */
423 /** Reply state flags */
424 unsigned long rs_difficult:1; /* ACK/commit stuff */
425 unsigned long rs_no_ack:1; /* no ACK, even for
428 unsigned long rs_scheduled:1; /* being handled? */
429 unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
430 unsigned long rs_handled:1; /* been handled yet? */
431 unsigned long rs_on_net:1; /* reply_out_callback pending? */
432 unsigned long rs_prealloc:1; /* rs from prealloc list */
433 unsigned long rs_committed:1;/* the transaction was committed
434 * and the rs was dispatched
436 /** Size of the state */
440 /** Transaction number */
444 struct obd_export *rs_export;
445 struct ptlrpc_service_part *rs_svcpt;
446 /** Lnet metadata handle for the reply */
447 lnet_handle_md_t rs_md_h;
448 atomic_t rs_refcount;
450 /** Context for the service thread */
451 struct ptlrpc_svc_ctx *rs_svc_ctx;
452 /** Reply buffer (actually sent to the client), encoded if needed */
453 struct lustre_msg *rs_repbuf; /* wrapper */
454 /** Size of the reply buffer */
455 int rs_repbuf_len; /* wrapper buf length */
456 /** Size of the reply message */
457 int rs_repdata_len; /* wrapper msg length */
459 * Actual reply message. Its content is encrypted (if needed) to
460 * produce reply buffer for actual sending. In simple case
461 * of no network encryption we just set \a rs_repbuf to \a rs_msg
463 struct lustre_msg *rs_msg; /* reply message */
465 /** Number of locks awaiting client ACK */
467 /** Handles of locks awaiting client reply ACK */
468 struct lustre_handle rs_locks[RS_MAX_LOCKS];
469 /** Lock modes of locks in \a rs_locks */
470 enum ldlm_mode rs_modes[RS_MAX_LOCKS];
473 struct ptlrpc_thread;
477 RQ_PHASE_NEW = 0xebc0de00,
478 RQ_PHASE_RPC = 0xebc0de01,
479 RQ_PHASE_BULK = 0xebc0de02,
480 RQ_PHASE_INTERPRET = 0xebc0de03,
481 RQ_PHASE_COMPLETE = 0xebc0de04,
482 RQ_PHASE_UNREGISTERING = 0xebc0de05,
483 RQ_PHASE_UNDEFINED = 0xebc0de06
486 /** Type of request interpreter call-back */
487 typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
488 struct ptlrpc_request *req,
492 * Definition of request pool structure.
493 * The pool is used to store empty preallocated requests for the case
494 * when we would actually need to send something without performing
495 * any allocations (to avoid e.g. OOM).
497 struct ptlrpc_request_pool {
498 /** Locks the list */
500 /** list of ptlrpc_request structs */
501 struct list_head prp_req_list;
502 /** Maximum message size that would fit into a request from this pool */
504 /** Function to allocate more requests for this pool */
505 int (*prp_populate)(struct ptlrpc_request_pool *, int);
514 * \defgroup nrs Network Request Scheduler
517 struct ptlrpc_nrs_policy;
518 struct ptlrpc_nrs_resource;
519 struct ptlrpc_nrs_request;
522 * NRS control operations.
524 * These are common for all policies.
526 enum ptlrpc_nrs_ctl {
528 * Not a valid opcode.
530 PTLRPC_NRS_CTL_INVALID,
532 * Activate the policy.
534 PTLRPC_NRS_CTL_START,
536 * Reserved for multiple primary policies, which may be a possibility
541 * Policies can start using opcodes from this value and onwards for
542 * their own purposes; the assigned value itself is arbitrary.
544 PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
548 * ORR policy operations
551 NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
552 NRS_CTL_ORR_WR_QUANTUM,
553 NRS_CTL_ORR_RD_OFF_TYPE,
554 NRS_CTL_ORR_WR_OFF_TYPE,
555 NRS_CTL_ORR_RD_SUPP_REQ,
556 NRS_CTL_ORR_WR_SUPP_REQ,
560 * NRS policy operations.
562 * These determine the behaviour of a policy, and are called in response to
565 struct ptlrpc_nrs_pol_ops {
567 * Called during policy registration; this operation is optional.
569 * \param[in,out] policy The policy being initialized
571 int (*op_policy_init) (struct ptlrpc_nrs_policy *policy);
573 * Called during policy unregistration; this operation is optional.
575 * \param[in,out] policy The policy being unregistered/finalized
577 void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy);
579 * Called when activating a policy via lprocfs; policies allocate and
580 * initialize their resources here; this operation is optional.
582 * \param[in,out] policy The policy being started
584 * \see nrs_policy_start_locked()
586 int (*op_policy_start) (struct ptlrpc_nrs_policy *policy);
588 * Called when deactivating a policy via lprocfs; policies deallocate
589 * their resources here; this operation is optional
591 * \param[in,out] policy The policy being stopped
593 * \see nrs_policy_stop0()
595 void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy);
597 * Used for policy-specific operations; i.e. not generic ones like
598 * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
599 * to an ioctl; this operation is optional.
601 * \param[in,out] policy The policy carrying out operation \a opc
602 * \param[in] opc The command operation being carried out
603 * \param[in,out] arg An generic buffer for communication between the
604 * user and the control operation
609 * \see ptlrpc_nrs_policy_control()
611 int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy,
612 enum ptlrpc_nrs_ctl opc, void *arg);
615 * Called when obtaining references to the resources of the resource
616 * hierarchy for a request that has arrived for handling at the PTLRPC
617 * service. Policies should return -ve for requests they do not wish
618 * to handle. This operation is mandatory.
620 * \param[in,out] policy The policy we're getting resources for.
621 * \param[in,out] nrq The request we are getting resources for.
622 * \param[in] parent The parent resource of the resource being
623 * requested; set to NULL if none.
624 * \param[out] resp The resource is to be returned here; the
625 * fallback policy in an NRS head should
626 * \e always return a non-NULL pointer value.
627 * \param[in] moving_req When set, signifies that this is an attempt
628 * to obtain resources for a request being moved
629 * to the high-priority NRS head by
630 * ldlm_lock_reorder_req().
631 * This implies two things:
632 * 1. We are under obd_export::exp_rpc_lock and
633 * so should not sleep.
634 * 2. We should not perform non-idempotent or can
635 * skip performing idempotent operations that
636 * were carried out when resources were first
637 * taken for the request when it was initialized
638 * in ptlrpc_nrs_req_initialize().
640 * \retval 0, +ve The level of the returned resource in the resource
641 * hierarchy; currently only 0 (for a non-leaf resource)
642 * and 1 (for a leaf resource) are supported by the
646 * \see ptlrpc_nrs_req_initialize()
647 * \see ptlrpc_nrs_hpreq_add_nolock()
649 int (*op_res_get) (struct ptlrpc_nrs_policy *policy,
650 struct ptlrpc_nrs_request *nrq,
651 const struct ptlrpc_nrs_resource *parent,
652 struct ptlrpc_nrs_resource **resp,
655 * Called when releasing references taken for resources in the resource
656 * hierarchy for the request; this operation is optional.
658 * \param[in,out] policy The policy the resource belongs to
659 * \param[in] res The resource to be freed
661 * \see ptlrpc_nrs_req_finalize()
662 * \see ptlrpc_nrs_hpreq_add_nolock()
664 void (*op_res_put) (struct ptlrpc_nrs_policy *policy,
665 const struct ptlrpc_nrs_resource *res);
668 * Obtains a request for handling from the policy, and optionally
669 * removes the request from the policy; this operation is mandatory.
671 * \param[in,out] policy The policy to poll
672 * \param[in] peek When set, signifies that we just want to
673 * examine the request, and not handle it, so the
674 * request is not removed from the policy.
675 * \param[in] force When set, it will force a policy to return a
676 * request if it has one queued.
678 * \retval NULL No request available for handling
679 * \retval valid-pointer The request polled for handling
681 * \see ptlrpc_nrs_req_get_nolock()
683 struct ptlrpc_nrs_request *
684 (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek,
687 * Called when attempting to add a request to a policy for later
688 * handling; this operation is mandatory.
690 * \param[in,out] policy The policy on which to enqueue \a nrq
691 * \param[in,out] nrq The request to enqueue
696 * \see ptlrpc_nrs_req_add_nolock()
698 int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy,
699 struct ptlrpc_nrs_request *nrq);
701 * Removes a request from the policy's set of pending requests. Normally
702 * called after a request has been polled successfully from the policy
703 * for handling; this operation is mandatory.
705 * \param[in,out] policy The policy the request \a nrq belongs to
706 * \param[in,out] nrq The request to dequeue
708 void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy,
709 struct ptlrpc_nrs_request *nrq);
711 * Called after the request being carried out. Could be used for
712 * job/resource control; this operation is optional.
714 * \param[in,out] policy The policy which is stopping to handle request
716 * \param[in,out] nrq The request
718 * \pre assert_spin_locked(&svcpt->scp_req_lock)
720 * \see ptlrpc_nrs_req_stop_nolock()
722 void (*op_req_stop) (struct ptlrpc_nrs_policy *policy,
723 struct ptlrpc_nrs_request *nrq);
725 * Registers the policy's lprocfs interface with a PTLRPC service.
727 * \param[in] svc The service
732 int (*op_lprocfs_init) (struct ptlrpc_service *svc);
734 * Unegisters the policy's lprocfs interface with a PTLRPC service.
736 * In cases of failed policy registration in
737 * \e ptlrpc_nrs_policy_register(), this function may be called for a
738 * service which has not registered the policy successfully, so
739 * implementations of this method should make sure their operations are
740 * safe in such cases.
742 * \param[in] svc The service
744 void (*op_lprocfs_fini) (struct ptlrpc_service *svc);
750 enum nrs_policy_flags {
752 * Fallback policy, use this flag only on a single supported policy per
753 * service. The flag cannot be used on policies that use
754 * \e PTLRPC_NRS_FL_REG_EXTERN
756 PTLRPC_NRS_FL_FALLBACK = (1 << 0),
758 * Start policy immediately after registering.
760 PTLRPC_NRS_FL_REG_START = (1 << 1),
762 * This is a policy registering from a module different to the one NRS
763 * core ships in (currently ptlrpc).
765 PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
771 * Denotes whether an NRS instance is for handling normal or high-priority
772 * RPCs, or whether an operation pertains to one or both of the NRS instances
775 enum ptlrpc_nrs_queue_type {
776 PTLRPC_NRS_QUEUE_REG = (1 << 0),
777 PTLRPC_NRS_QUEUE_HP = (1 << 1),
778 PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
784 * A PTLRPC service has at least one NRS head instance for handling normal
785 * priority RPCs, and may optionally have a second NRS head instance for
786 * handling high-priority RPCs. Each NRS head maintains a list of available
787 * policies, of which one and only one policy is acting as the fallback policy,
788 * and optionally a different policy may be acting as the primary policy. For
789 * all RPCs handled by this NRS head instance, NRS core will first attempt to
790 * enqueue the RPC using the primary policy (if any). The fallback policy is
791 * used in the following cases:
792 * - when there was no primary policy in the
793 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
795 * - when the primary policy that was at the
796 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
797 * RPC was initialized, denoted it did not wish, or for some other reason was
798 * not able to handle the request, by returning a non-valid NRS resource
800 * - when the primary policy that was at the
801 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
802 * RPC was initialized, fails later during the request enqueueing stage.
804 * \see nrs_resource_get_safe()
805 * \see nrs_request_enqueue()
809 /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
811 * List of registered policies
813 struct list_head nrs_policy_list;
815 * List of policies with queued requests. Policies that have any
816 * outstanding requests are queued here, and this list is queried
817 * in a round-robin manner from NRS core when obtaining a request
818 * for handling. This ensures that requests from policies that at some
819 * point transition away from the
820 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
822 struct list_head nrs_policy_queued;
824 * Service partition for this NRS head
826 struct ptlrpc_service_part *nrs_svcpt;
828 * Primary policy, which is the preferred policy for handling RPCs
830 struct ptlrpc_nrs_policy *nrs_policy_primary;
832 * Fallback policy, which is the backup policy for handling RPCs
834 struct ptlrpc_nrs_policy *nrs_policy_fallback;
836 * This NRS head handles either HP or regular requests
838 enum ptlrpc_nrs_queue_type nrs_queue_type;
840 * # queued requests from all policies in this NRS head
842 unsigned long nrs_req_queued;
844 * # scheduled requests from all policies in this NRS head
846 unsigned long nrs_req_started;
848 * # policies on this NRS
850 unsigned nrs_num_pols;
852 * This NRS head is in progress of starting a policy
854 unsigned nrs_policy_starting:1;
856 * In progress of shutting down the whole NRS head; used during
859 unsigned nrs_stopping:1;
862 #define NRS_POL_NAME_MAX 16
864 struct ptlrpc_nrs_pol_desc;
867 * Service compatibility predicate; this determines whether a policy is adequate
868 * for handling RPCs of a particular PTLRPC service.
870 * XXX:This should give the same result during policy registration and
871 * unregistration, and for all partitions of a service; so the result should not
872 * depend on temporal service or other properties, that may influence the
875 typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
876 const struct ptlrpc_nrs_pol_desc *desc);
878 struct ptlrpc_nrs_pol_conf {
880 * Human-readable policy name
882 char nc_name[NRS_POL_NAME_MAX];
884 * NRS operations for this policy
886 const struct ptlrpc_nrs_pol_ops *nc_ops;
888 * Service compatibility predicate
890 nrs_pol_desc_compat_t nc_compat;
892 * Set for policies that support a single ptlrpc service, i.e. ones that
893 * have \a pd_compat set to nrs_policy_compat_one(). The variable value
894 * depicts the name of the single service that such policies are
897 const char *nc_compat_svc_name;
899 * Owner module for this policy descriptor; policies registering from a
900 * different module to the one the NRS framework is held within
901 * (currently ptlrpc), should set this field to THIS_MODULE.
903 struct module *nc_owner;
905 * Policy registration flags; a bitmask of \e nrs_policy_flags
911 * NRS policy registering descriptor
913 * Is used to hold a description of a policy that can be passed to NRS core in
914 * order to register the policy with NRS heads in different PTLRPC services.
916 struct ptlrpc_nrs_pol_desc {
918 * Human-readable policy name
920 char pd_name[NRS_POL_NAME_MAX];
922 * Link into nrs_core::nrs_policies
924 struct list_head pd_list;
926 * NRS operations for this policy
928 const struct ptlrpc_nrs_pol_ops *pd_ops;
930 * Service compatibility predicate
932 nrs_pol_desc_compat_t pd_compat;
934 * Set for policies that are compatible with only one PTLRPC service.
936 * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
938 const char *pd_compat_svc_name;
940 * Owner module for this policy descriptor.
942 * We need to hold a reference to the module whenever we might make use
943 * of any of the module's contents, i.e.
944 * - If one or more instances of the policy are at a state where they
945 * might be handling a request, i.e.
946 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
947 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
948 * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
949 * is taken on the module when
950 * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
951 * becomes 0, so that we hold only one reference to the module maximum
954 * We do not need to hold a reference to the module, even though we
955 * might use code and data from the module, in the following cases:
956 * - During external policy registration, because this should happen in
957 * the module's init() function, in which case the module is safe from
958 * removal because a reference is being held on the module by the
959 * kernel, and iirc kmod (and I guess module-init-tools also) will
960 * serialize any racing processes properly anyway.
961 * - During external policy unregistration, because this should happen
962 * in a module's exit() function, and any attempts to start a policy
963 * instance would need to take a reference on the module, and this is
964 * not possible once we have reached the point where the exit()
966 * - During service registration and unregistration, as service setup
967 * and cleanup, and policy registration, unregistration and policy
968 * instance starting, are serialized by \e nrs_core::nrs_mutex, so
969 * as long as users adhere to the convention of registering policies
970 * in init() and unregistering them in module exit() functions, there
971 * should not be a race between these operations.
972 * - During any policy-specific lprocfs operations, because a reference
973 * is held by the kernel on a proc entry that has been entered by a
974 * syscall, so as long as proc entries are removed during unregistration time,
975 * then unregistration and lprocfs operations will be properly
978 struct module *pd_owner;
980 * Bitmask of \e nrs_policy_flags
984 * # of references on this descriptor
992 * Policies transition from one state to the other during their lifetime
994 enum ptlrpc_nrs_pol_state {
996 * Not a valid policy state.
998 NRS_POL_STATE_INVALID,
1000 * Policies are at this state either at the start of their life, or
1001 * transition here when the user selects a different policy to act
1002 * as the primary one.
1004 NRS_POL_STATE_STOPPED,
1006 * Policy is progress of stopping
1008 NRS_POL_STATE_STOPPING,
1010 * Policy is in progress of starting
1012 NRS_POL_STATE_STARTING,
1014 * A policy is in this state in two cases:
1015 * - it is the fallback policy, which is always in this state.
1016 * - it has been activated by the user; i.e. it is the primary policy,
1018 NRS_POL_STATE_STARTED,
1022 * NRS policy information
1024 * Used for obtaining information for the status of a policy via lprocfs
1026 struct ptlrpc_nrs_pol_info {
1030 char pi_name[NRS_POL_NAME_MAX];
1032 * Current policy state
1034 enum ptlrpc_nrs_pol_state pi_state;
1036 * # RPCs enqueued for later dispatching by the policy
1040 * # RPCs started for dispatch by the policy
1042 long pi_req_started;
1044 * Is this a fallback policy?
1046 unsigned pi_fallback:1;
1052 * There is one instance of this for each policy in each NRS head of each
1053 * PTLRPC service partition.
1055 struct ptlrpc_nrs_policy {
1057 * Linkage into the NRS head's list of policies,
1058 * ptlrpc_nrs:nrs_policy_list
1060 struct list_head pol_list;
1062 * Linkage into the NRS head's list of policies with enqueued
1063 * requests ptlrpc_nrs:nrs_policy_queued
1065 struct list_head pol_list_queued;
1067 * Current state of this policy
1069 enum ptlrpc_nrs_pol_state pol_state;
1071 * Bitmask of nrs_policy_flags
1075 * # RPCs enqueued for later dispatching by the policy
1077 long pol_req_queued;
1079 * # RPCs started for dispatch by the policy
1081 long pol_req_started;
1083 * Usage Reference count taken on the policy instance
1087 * The NRS head this policy has been created at
1089 struct ptlrpc_nrs *pol_nrs;
1091 * Private policy data; varies by policy type
1095 * Policy descriptor for this policy instance.
1097 struct ptlrpc_nrs_pol_desc *pol_desc;
1103 * Resources are embedded into two types of NRS entities:
1104 * - Inside NRS policies, in the policy's private data in
1105 * ptlrpc_nrs_policy::pol_private
1106 * - In objects that act as prime-level scheduling entities in different NRS
1107 * policies; e.g. on a policy that performs round robin or similar order
1108 * scheduling across client NIDs, there would be one NRS resource per unique
1109 * client NID. On a policy which performs round robin scheduling across
1110 * backend filesystem objects, there would be one resource associated with
1111 * each of the backend filesystem objects partaking in the scheduling
1112 * performed by the policy.
1114 * NRS resources share a parent-child relationship, in which resources embedded
1115 * in policy instances are the parent entities, with all scheduling entities
1116 * a policy schedules across being the children, thus forming a simple resource
1117 * hierarchy. This hierarchy may be extended with one or more levels in the
1118 * future if the ability to have more than one primary policy is added.
1120 * Upon request initialization, references to the then active NRS policies are
1121 * taken and used to later handle the dispatching of the request with one of
1124 * \see nrs_resource_get_safe()
1125 * \see ptlrpc_nrs_req_add()
1127 struct ptlrpc_nrs_resource {
1129 * This NRS resource's parent; is NULL for resources embedded in NRS
1130 * policy instances; i.e. those are top-level ones.
1132 struct ptlrpc_nrs_resource *res_parent;
1134 * The policy associated with this resource.
1136 struct ptlrpc_nrs_policy *res_policy;
1149 * This policy is a logical wrapper around previous, non-NRS functionality.
1150 * It dispatches RPCs in the same order as they arrive from the network. This
1151 * policy is currently used as the fallback policy, and the only enabled policy
1152 * on all NRS heads of all PTLRPC service partitions.
1157 * Private data structure for the FIFO policy
1159 struct nrs_fifo_head {
1161 * Resource object for policy instance.
1163 struct ptlrpc_nrs_resource fh_res;
1165 * List of queued requests.
1167 struct list_head fh_list;
1169 * For debugging purposes.
1174 struct nrs_fifo_req {
1175 struct list_head fr_list;
1184 * Instances of this object exist embedded within ptlrpc_request; the main
1185 * purpose of this object is to hold references to the request's resources
1186 * for the lifetime of the request, and to hold properties that policies use
1187 * use for determining the request's scheduling priority.
1189 struct ptlrpc_nrs_request {
1191 * The request's resource hierarchy.
1193 struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
1195 * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
1196 * policy that was used to enqueue the request.
1198 * \see nrs_request_enqueue()
1200 unsigned nr_res_idx;
1201 unsigned nr_initialized:1;
1202 unsigned nr_enqueued:1;
1203 unsigned nr_started:1;
1204 unsigned nr_finalized:1;
1207 * Policy-specific fields, used for determining a request's scheduling
1208 * priority, and other supporting functionality.
1212 * Fields for the FIFO policy
1214 struct nrs_fifo_req fifo;
1217 * Externally-registering policies may want to use this to allocate
1218 * their own request properties.
1226 * Basic request prioritization operations structure.
1227 * The whole idea is centered around locks and RPCs that might affect locks.
1228 * When a lock is contended we try to give priority to RPCs that might lead
1229 * to fastest release of that lock.
1230 * Currently only implemented for OSTs only in a way that makes all
1231 * IO and truncate RPCs that are coming from a locked region where a lock is
1232 * contended a priority over other requests.
1234 struct ptlrpc_hpreq_ops {
1236 * Check if the lock handle of the given lock is the same as
1237 * taken from the request.
1239 int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
1241 * Check if the request is a high priority one.
1243 int (*hpreq_check)(struct ptlrpc_request *);
1245 * Called after the request has been handled.
1247 void (*hpreq_fini)(struct ptlrpc_request *);
1251 * Represents remote procedure call.
1253 * This is a staple structure used by everybody wanting to send a request
1256 struct ptlrpc_request {
1257 /* Request type: one of PTL_RPC_MSG_* */
1259 /** Result of request processing */
1262 * Linkage item through which this request is included into
1263 * sending/delayed lists on client and into rqbd list on server
1265 struct list_head rq_list;
1267 * Server side list of incoming unserved requests sorted by arrival
1268 * time. Traversed from time to time to notice about to expire
1269 * requests and sent back "early replies" to clients to let them
1270 * know server is alive and well, just very busy to service their
1273 struct list_head rq_timed_list;
1274 /** server-side history, used for debugging purposes. */
1275 struct list_head rq_history_list;
1276 /** server-side per-export list */
1277 struct list_head rq_exp_list;
1278 /** server-side hp handlers */
1279 struct ptlrpc_hpreq_ops *rq_ops;
1281 /** initial thread servicing this request */
1282 struct ptlrpc_thread *rq_svc_thread;
1284 /** history sequence # */
1285 __u64 rq_history_seq;
1289 /** stub for NRS request */
1290 struct ptlrpc_nrs_request rq_nrq;
1292 /** the index of service's srv_at_array into which request is linked */
1294 /** Lock to protect request flags and some other important bits, like
1298 /** client-side flags are serialized by rq_lock */
1299 unsigned int rq_intr:1, rq_replied:1, rq_err:1,
1300 rq_timedout:1, rq_resend:1, rq_restart:1,
1302 * when ->rq_replay is set, request is kept by the client even
1303 * after server commits corresponding transaction. This is
1304 * used for operations that require sequence of multiple
1305 * requests to be replayed. The only example currently is file
1306 * open/close. When last request in such a sequence is
1307 * committed, ->rq_replay is cleared on all requests in the
1311 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
1312 rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
1314 rq_req_unlink:1, rq_reply_unlink:1,
1315 rq_memalloc:1, /* req originated from "kswapd" */
1316 /* server-side flags */
1317 rq_packed_final:1, /* packed final reply */
1318 rq_hp:1, /* high priority RPC */
1319 rq_at_linked:1, /* link into service's srv_at_array */
1320 rq_reply_truncate:1,
1322 /* whether the "rq_set" is a valid one */
1324 rq_generation_set:1,
1325 /* do not resend request on -EINPROGRESS */
1326 rq_no_retry_einprogress:1,
1327 /* allow the req to be sent if the import is in recovery
1331 /* bulk request, sent to server, but uncommitted */
1334 unsigned int rq_nr_resend;
1336 enum rq_phase rq_phase; /* one of RQ_PHASE_* */
1337 enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
1338 atomic_t rq_refcount; /* client-side refcount for SENT race,
1339 * server-side refcount for multiple replies
1342 /** Portal to which this request would be sent */
1343 short rq_request_portal; /* XXX FIXME bug 249 */
1344 /** Portal where to wait for reply and where reply would be sent */
1345 short rq_reply_portal; /* XXX FIXME bug 249 */
1349 * !rq_truncate : # reply bytes actually received,
1350 * rq_truncate : required repbuf_len for resend
1352 int rq_nob_received;
1353 /** Request length */
1357 /** Request message - what client sent */
1358 struct lustre_msg *rq_reqmsg;
1359 /** Reply message - server response */
1360 struct lustre_msg *rq_repmsg;
1361 /** Transaction number */
1366 * List item to for replay list. Not yet committed requests get linked
1368 * Also see \a rq_replay comment above.
1370 struct list_head rq_replay_list;
1373 * security and encryption data
1376 struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
1377 struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
1378 struct list_head rq_ctx_chain; /**< link to waited ctx */
1380 struct sptlrpc_flavor rq_flvr; /**< for client & server */
1381 enum lustre_sec_part rq_sp_from;
1383 /* client/server security flags */
1385 rq_ctx_init:1, /* context initiation */
1386 rq_ctx_fini:1, /* context destroy */
1387 rq_bulk_read:1, /* request bulk read */
1388 rq_bulk_write:1, /* request bulk write */
1389 /* server authentication flags */
1390 rq_auth_gss:1, /* authenticated by gss */
1391 rq_auth_remote:1, /* authed as remote user */
1392 rq_auth_usr_root:1, /* authed as root */
1393 rq_auth_usr_mdt:1, /* authed as mdt */
1394 rq_auth_usr_ost:1, /* authed as ost */
1395 /* security tfm flags */
1398 /* doesn't expect reply FIXME */
1400 rq_pill_init:1; /* pill initialized */
1402 uid_t rq_auth_uid; /* authed uid */
1403 uid_t rq_auth_mapped_uid; /* authed uid mapped to */
1405 /* (server side), pointed directly into req buffer */
1406 struct ptlrpc_user_desc *rq_user_desc;
1408 /* various buffer pointers */
1409 struct lustre_msg *rq_reqbuf; /* req wrapper */
1410 char *rq_repbuf; /* rep buffer */
1411 struct lustre_msg *rq_repdata; /* rep wrapper msg */
1412 struct lustre_msg *rq_clrbuf; /* only in priv mode */
1413 int rq_reqbuf_len; /* req wrapper buf len */
1414 int rq_reqdata_len; /* req wrapper msg len */
1415 int rq_repbuf_len; /* rep buffer len */
1416 int rq_repdata_len; /* rep wrapper msg len */
1417 int rq_clrbuf_len; /* only in priv mode */
1418 int rq_clrdata_len; /* only in priv mode */
1420 /** early replies go to offset 0, regular replies go after that */
1421 unsigned int rq_reply_off;
1425 /** Fields that help to see if request and reply were swabbed or not */
1426 __u32 rq_req_swab_mask;
1427 __u32 rq_rep_swab_mask;
1429 /** What was import generation when this request was sent */
1430 int rq_import_generation;
1431 enum lustre_imp_state rq_send_state;
1433 /** how many early replies (for stats) */
1436 /** client+server request */
1437 lnet_handle_md_t rq_req_md_h;
1438 struct ptlrpc_cb_id rq_req_cbid;
1439 /** optional time limit for send attempts */
1440 long rq_delay_limit;
1441 /** time request was first queued */
1442 unsigned long rq_queued_time;
1444 /* server-side... */
1445 /** request arrival time */
1446 struct timespec64 rq_arrival_time;
1447 /** separated reply state */
1448 struct ptlrpc_reply_state *rq_reply_state;
1449 /** incoming request buffer */
1450 struct ptlrpc_request_buffer_desc *rq_rqbd;
1452 /** client-only incoming reply */
1453 lnet_handle_md_t rq_reply_md_h;
1454 wait_queue_head_t rq_reply_waitq;
1455 struct ptlrpc_cb_id rq_reply_cbid;
1459 /** Peer description (the other side) */
1460 lnet_process_id_t rq_peer;
1461 /** Server-side, export on which request was received */
1462 struct obd_export *rq_export;
1463 /** Client side, import where request is being sent */
1464 struct obd_import *rq_import;
1466 /** Replay callback, called after request is replayed at recovery */
1467 void (*rq_replay_cb)(struct ptlrpc_request *);
1469 * Commit callback, called when request is committed and about to be
1472 void (*rq_commit_cb)(struct ptlrpc_request *);
1473 /** Opaq data for replay and commit callbacks. */
1476 /** For bulk requests on client only: bulk descriptor */
1477 struct ptlrpc_bulk_desc *rq_bulk;
1479 /** client outgoing req */
1481 * when request/reply sent (secs), or time when request should be sent
1484 /** time for request really sent out */
1485 time64_t rq_real_sent;
1487 /** when request must finish. volatile
1488 * so that servers' early reply updates to the deadline aren't
1489 * kept in per-cpu cache
1491 volatile time64_t rq_deadline;
1492 /** when req reply unlink must finish. */
1493 time64_t rq_reply_deadline;
1494 /** when req bulk unlink must finish. */
1495 time64_t rq_bulk_deadline;
1497 * service time estimate (secs)
1498 * If the requestsis not served by this time, it is marked as timed out.
1502 /** Multi-rpc bits */
1503 /** Per-request waitq introduced by bug 21938 for recovery waiting */
1504 wait_queue_head_t rq_set_waitq;
1505 /** Link item for request set lists */
1506 struct list_head rq_set_chain;
1507 /** Link back to the request set */
1508 struct ptlrpc_request_set *rq_set;
1509 /** Async completion handler, called when reply is received */
1510 ptlrpc_interpterer_t rq_interpret_reply;
1511 /** Async completion context */
1512 union ptlrpc_async_args rq_async_args;
1514 /** Pool if request is from preallocated list */
1515 struct ptlrpc_request_pool *rq_pool;
1517 struct lu_context rq_session;
1518 struct lu_context rq_recov_session;
1520 /** request format description */
1521 struct req_capsule rq_pill;
1525 * Call completion handler for rpc if any, return it's status or original
1526 * rc if there was no handler defined for this request.
1528 static inline int ptlrpc_req_interpret(const struct lu_env *env,
1529 struct ptlrpc_request *req, int rc)
1531 if (req->rq_interpret_reply) {
1532 req->rq_status = req->rq_interpret_reply(env, req,
1533 &req->rq_async_args,
1535 return req->rq_status;
1541 * Can the request be moved from the regular NRS head to the high-priority NRS
1542 * head (of the same PTLRPC service partition), if any?
1544 * For a reliable result, this should be checked under svcpt->scp_req lock.
1546 static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
1548 struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
1551 * LU-898: Check ptlrpc_nrs_request::nr_enqueued to make sure the
1552 * request has been enqueued first, and ptlrpc_nrs_request::nr_started
1553 * to make sure it has not been scheduled yet (analogous to previous
1554 * (non-NRS) checking of !list_empty(&ptlrpc_request::rq_list).
1556 return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp;
1562 * Returns 1 if request buffer at offset \a index was already swabbed
1564 static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
1566 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
1567 return req->rq_req_swab_mask & (1 << index);
1571 * Returns 1 if request reply buffer at offset \a index was already swabbed
1573 static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
1575 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
1576 return req->rq_rep_swab_mask & (1 << index);
1580 * Returns 1 if request needs to be swabbed into local cpu byteorder
1582 static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
1584 return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1588 * Returns 1 if request reply needs to be swabbed into local cpu byteorder
1590 static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
1592 return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1596 * Mark request buffer at offset \a index that it was already swabbed
1598 static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
1600 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
1601 LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
1602 req->rq_req_swab_mask |= 1 << index;
1606 * Mark request reply buffer at offset \a index that it was already swabbed
1608 static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
1610 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
1611 LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
1612 req->rq_rep_swab_mask |= 1 << index;
1616 * Convert numerical request phase value \a phase into text string description
1618 static inline const char *
1619 ptlrpc_phase2str(enum rq_phase phase)
1628 case RQ_PHASE_INTERPRET:
1630 case RQ_PHASE_COMPLETE:
1632 case RQ_PHASE_UNREGISTERING:
1633 return "Unregistering";
1640 * Convert numerical request phase of the request \a req into text stringi
1643 static inline const char *
1644 ptlrpc_rqphase2str(struct ptlrpc_request *req)
1646 return ptlrpc_phase2str(req->rq_phase);
1650 * Debugging functions and helpers to print request structure into debug log
1653 /* Spare the preprocessor, spoil the bugs. */
1654 #define FLAG(field, str) (field ? str : "")
1656 /** Convert bit flags into a string */
1657 #define DEBUG_REQ_FLAGS(req) \
1658 ptlrpc_rqphase2str(req), \
1659 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
1660 FLAG(req->rq_err, "E"), \
1661 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
1662 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
1663 FLAG(req->rq_no_resend, "N"), \
1664 FLAG(req->rq_waiting, "W"), \
1665 FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
1666 FLAG(req->rq_committed, "M")
1668 #define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
1670 void _debug_req(struct ptlrpc_request *req,
1671 struct libcfs_debug_msg_data *data, const char *fmt, ...)
1675 * Helper that decides if we need to print request according to current debug
1678 #define debug_req(msgdata, mask, cdls, req, fmt, a...) \
1680 CFS_CHECK_STACK(msgdata, mask, cdls); \
1682 if (((mask) & D_CANTMASK) != 0 || \
1683 ((libcfs_debug & (mask)) != 0 && \
1684 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
1685 _debug_req((req), msgdata, fmt, ##a); \
1689 * This is the debug print function you need to use to print request structure
1690 * content into lustre debug log.
1691 * for most callers (level is a constant) this is resolved at compile time
1693 #define DEBUG_REQ(level, req, fmt, args...) \
1695 if ((level) & (D_ERROR | D_WARNING)) { \
1696 static struct cfs_debug_limit_state cdls; \
1697 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
1698 debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\
1700 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
1701 debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \
1707 * Structure that defines a single page of a bulk transfer
1709 struct ptlrpc_bulk_page {
1710 /** Linkage to list of pages in a bulk */
1711 struct list_head bp_link;
1713 * Number of bytes in a page to transfer starting from \a bp_pageoffset
1716 /** offset within a page */
1718 /** The page itself */
1719 struct page *bp_page;
1722 #define BULK_GET_SOURCE 0
1723 #define BULK_PUT_SINK 1
1724 #define BULK_GET_SINK 2
1725 #define BULK_PUT_SOURCE 3
1728 * Definition of bulk descriptor.
1729 * Bulks are special "Two phase" RPCs where initial request message
1730 * is sent first and it is followed bt a transfer (o receiving) of a large
1731 * amount of data to be settled into pages referenced from the bulk descriptors.
1732 * Bulks transfers (the actual data following the small requests) are done
1733 * on separate LNet portals.
1734 * In lustre we use bulk transfers for READ and WRITE transfers from/to OSTs.
1735 * Another user is readpage for MDT.
1737 struct ptlrpc_bulk_desc {
1738 /** completed with failure */
1739 unsigned long bd_failure:1;
1740 /** {put,get}{source,sink} */
1741 unsigned long bd_type:2;
1743 unsigned long bd_registered:1;
1744 /** For serialization with callback */
1746 /** Import generation when request for this bulk was sent */
1747 int bd_import_generation;
1748 /** LNet portal for this bulk */
1750 /** Server side - export this bulk created for */
1751 struct obd_export *bd_export;
1752 /** Client side - import this bulk was sent on */
1753 struct obd_import *bd_import;
1754 /** Back pointer to the request */
1755 struct ptlrpc_request *bd_req;
1756 wait_queue_head_t bd_waitq; /* server side only WQ */
1757 int bd_iov_count; /* # entries in bd_iov */
1758 int bd_max_iov; /* allocated size of bd_iov */
1759 int bd_nob; /* # bytes covered */
1760 int bd_nob_transferred; /* # bytes GOT/PUT */
1764 struct ptlrpc_cb_id bd_cbid; /* network callback info */
1765 lnet_nid_t bd_sender; /* stash event::sender */
1766 int bd_md_count; /* # valid entries in bd_mds */
1767 int bd_md_max_brw; /* max entries in bd_mds */
1768 /** array of associated MDs */
1769 lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
1772 * encrypt iov, size is either 0 or bd_iov_count.
1774 lnet_kiov_t *bd_enc_iov;
1776 lnet_kiov_t bd_iov[0];
1780 SVC_STOPPED = 1 << 0,
1781 SVC_STOPPING = 1 << 1,
1782 SVC_STARTING = 1 << 2,
1783 SVC_RUNNING = 1 << 3,
1785 SVC_SIGNAL = 1 << 5,
1788 #define PTLRPC_THR_NAME_LEN 32
1790 * Definition of server service thread structure
1792 struct ptlrpc_thread {
1794 * List of active threads in svc->srv_threads
1796 struct list_head t_link;
1798 * thread-private data (preallocated memory)
1803 * service thread index, from ptlrpc_start_threads
1807 * service thread pid
1811 * put watchdog in the structure per thread b=14840
1813 * Lustre watchdog is removed for client in the hope
1814 * of a generic watchdog can be merged in kernel.
1815 * When that happens, we should add below back.
1817 * struct lc_watchdog *t_watchdog;
1820 * the svc this thread belonged to b=18582
1822 struct ptlrpc_service_part *t_svcpt;
1823 wait_queue_head_t t_ctl_waitq;
1824 struct lu_env *t_env;
1825 char t_name[PTLRPC_THR_NAME_LEN];
1828 static inline int thread_is_init(struct ptlrpc_thread *thread)
1830 return thread->t_flags == 0;
1833 static inline int thread_is_stopped(struct ptlrpc_thread *thread)
1835 return !!(thread->t_flags & SVC_STOPPED);
1838 static inline int thread_is_stopping(struct ptlrpc_thread *thread)
1840 return !!(thread->t_flags & SVC_STOPPING);
1843 static inline int thread_is_starting(struct ptlrpc_thread *thread)
1845 return !!(thread->t_flags & SVC_STARTING);
1848 static inline int thread_is_running(struct ptlrpc_thread *thread)
1850 return !!(thread->t_flags & SVC_RUNNING);
1853 static inline int thread_is_event(struct ptlrpc_thread *thread)
1855 return !!(thread->t_flags & SVC_EVENT);
1858 static inline int thread_is_signal(struct ptlrpc_thread *thread)
1860 return !!(thread->t_flags & SVC_SIGNAL);
1863 static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
1865 thread->t_flags &= ~flags;
1868 static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags)
1870 thread->t_flags = flags;
1873 static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags)
1875 thread->t_flags |= flags;
1878 static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread,
1881 if (thread->t_flags & flags) {
1882 thread->t_flags &= ~flags;
1889 * Request buffer descriptor structure.
1890 * This is a structure that contains one posted request buffer for service.
1891 * Once data land into a buffer, event callback creates actual request and
1892 * notifies wakes one of the service threads to process new incoming request.
1893 * More than one request can fit into the buffer.
1895 struct ptlrpc_request_buffer_desc {
1896 /** Link item for rqbds on a service */
1897 struct list_head rqbd_list;
1898 /** History of requests for this buffer */
1899 struct list_head rqbd_reqs;
1900 /** Back pointer to service for which this buffer is registered */
1901 struct ptlrpc_service_part *rqbd_svcpt;
1902 /** LNet descriptor */
1903 lnet_handle_md_t rqbd_md_h;
1905 /** The buffer itself */
1907 struct ptlrpc_cb_id rqbd_cbid;
1909 * This "embedded" request structure is only used for the
1910 * last request to fit into the buffer
1912 struct ptlrpc_request rqbd_req;
1915 typedef int (*svc_handler_t)(struct ptlrpc_request *req);
1917 struct ptlrpc_service_ops {
1919 * if non-NULL called during thread creation (ptlrpc_start_thread())
1920 * to initialize service specific per-thread state.
1922 int (*so_thr_init)(struct ptlrpc_thread *thr);
1924 * if non-NULL called during thread shutdown (ptlrpc_main()) to
1925 * destruct state created by ->srv_init().
1927 void (*so_thr_done)(struct ptlrpc_thread *thr);
1929 * Handler function for incoming requests for this service
1931 int (*so_req_handler)(struct ptlrpc_request *req);
1933 * function to determine priority of the request, it's called
1934 * on every new request
1936 int (*so_hpreq_handler)(struct ptlrpc_request *);
1938 * service-specific print fn
1940 void (*so_req_printer)(void *, struct ptlrpc_request *);
1943 #ifndef __cfs_cacheline_aligned
1944 /* NB: put it here for reducing patche dependence */
1945 # define __cfs_cacheline_aligned
1949 * How many high priority requests to serve before serving one normal
1952 #define PTLRPC_SVC_HP_RATIO 10
1955 * Definition of PortalRPC service.
1956 * The service is listening on a particular portal (like tcp port)
1957 * and perform actions for a specific server like IO service for OST
1958 * or general metadata service for MDS.
1960 struct ptlrpc_service {
1961 /** serialize sysfs operations */
1962 spinlock_t srv_lock;
1963 /** most often accessed fields */
1964 /** chain thru all services */
1965 struct list_head srv_list;
1966 /** service operations table */
1967 struct ptlrpc_service_ops srv_ops;
1968 /** only statically allocated strings here; we don't clean them */
1970 /** only statically allocated strings here; we don't clean them */
1971 char *srv_thread_name;
1972 /** service thread list */
1973 struct list_head srv_threads;
1974 /** threads # should be created for each partition on initializing */
1975 int srv_nthrs_cpt_init;
1976 /** limit of threads number for each partition */
1977 int srv_nthrs_cpt_limit;
1978 /** Root of debugfs dir tree for this service */
1979 struct dentry *srv_debugfs_entry;
1980 /** Pointer to statistic data for this service */
1981 struct lprocfs_stats *srv_stats;
1982 /** # hp per lp reqs to handle */
1983 int srv_hpreq_ratio;
1984 /** biggest request to receive */
1985 int srv_max_req_size;
1986 /** biggest reply to send */
1987 int srv_max_reply_size;
1988 /** size of individual buffers */
1990 /** # buffers to allocate in 1 group */
1991 int srv_nbuf_per_group;
1992 /** Local portal on which to receive requests */
1993 __u32 srv_req_portal;
1994 /** Portal on the client to send replies to */
1995 __u32 srv_rep_portal;
1997 * Tags for lu_context associated with this thread, see struct
2001 /** soft watchdog timeout multiplier */
2002 int srv_watchdog_factor;
2003 /** under unregister_service */
2004 unsigned srv_is_stopping:1;
2006 /** max # request buffers in history per partition */
2007 int srv_hist_nrqbds_cpt_max;
2008 /** number of CPTs this service bound on */
2010 /** CPTs array this service bound on */
2012 /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */
2014 /** CPT table this service is running over */
2015 struct cfs_cpt_table *srv_cptable;
2018 struct kobject srv_kobj;
2019 struct completion srv_kobj_unregister;
2021 * partition data for ptlrpc service
2023 struct ptlrpc_service_part *srv_parts[0];
2027 * Definition of PortalRPC service partition data.
2028 * Although a service only has one instance of it right now, but we
2029 * will have multiple instances very soon (instance per CPT).
2031 * it has four locks:
2033 * serialize operations on rqbd and requests waiting for preprocess
2035 * serialize operations active requests sent to this portal
2037 * serialize adaptive timeout stuff
2039 * serialize operations on RS list (reply states)
2041 * We don't have any use-case to take two or more locks at the same time
2042 * for now, so there is no lock order issue.
2044 struct ptlrpc_service_part {
2045 /** back reference to owner */
2046 struct ptlrpc_service *scp_service __cfs_cacheline_aligned;
2047 /* CPT id, reserved */
2049 /** always increasing number */
2051 /** # of starting threads */
2052 int scp_nthrs_starting;
2053 /** # of stopping threads, reserved for shrinking threads */
2054 int scp_nthrs_stopping;
2055 /** # running threads */
2056 int scp_nthrs_running;
2057 /** service threads list */
2058 struct list_head scp_threads;
2061 * serialize the following fields, used for protecting
2062 * rqbd list and incoming requests waiting for preprocess,
2063 * threads starting & stopping are also protected by this lock.
2065 spinlock_t scp_lock __cfs_cacheline_aligned;
2066 /** total # req buffer descs allocated */
2067 int scp_nrqbds_total;
2068 /** # posted request buffers for receiving */
2069 int scp_nrqbds_posted;
2070 /** in progress of allocating rqbd */
2071 int scp_rqbd_allocating;
2072 /** # incoming reqs */
2073 int scp_nreqs_incoming;
2074 /** request buffers to be reposted */
2075 struct list_head scp_rqbd_idle;
2076 /** req buffers receiving */
2077 struct list_head scp_rqbd_posted;
2078 /** incoming reqs */
2079 struct list_head scp_req_incoming;
2080 /** timeout before re-posting reqs, in tick */
2081 long scp_rqbd_timeout;
2083 * all threads sleep on this. This wait-queue is signalled when new
2084 * incoming request arrives and when difficult reply has to be handled.
2086 wait_queue_head_t scp_waitq;
2088 /** request history */
2089 struct list_head scp_hist_reqs;
2090 /** request buffer history */
2091 struct list_head scp_hist_rqbds;
2092 /** # request buffers in history */
2093 int scp_hist_nrqbds;
2094 /** sequence number for request */
2096 /** highest seq culled from history */
2097 __u64 scp_hist_seq_culled;
2100 * serialize the following fields, used for processing requests
2101 * sent to this portal
2103 spinlock_t scp_req_lock __cfs_cacheline_aligned;
2104 /** # reqs in either of the NRS heads below */
2105 /** # reqs being served */
2106 int scp_nreqs_active;
2107 /** # HPreqs being served */
2108 int scp_nhreqs_active;
2109 /** # hp requests handled */
2112 /** NRS head for regular requests */
2113 struct ptlrpc_nrs scp_nrs_reg;
2114 /** NRS head for HP requests; this is only valid for services that can
2115 * handle HP requests
2117 struct ptlrpc_nrs *scp_nrs_hp;
2122 * serialize the following fields, used for changes on
2125 spinlock_t scp_at_lock __cfs_cacheline_aligned;
2126 /** estimated rpc service time */
2127 struct adaptive_timeout scp_at_estimate;
2128 /** reqs waiting for replies */
2129 struct ptlrpc_at_array scp_at_array;
2130 /** early reply timer */
2131 struct timer_list scp_at_timer;
2133 unsigned long scp_at_checktime;
2134 /** check early replies */
2135 unsigned scp_at_check;
2139 * serialize the following fields, used for processing
2140 * replies for this portal
2142 spinlock_t scp_rep_lock __cfs_cacheline_aligned;
2143 /** all the active replies */
2144 struct list_head scp_rep_active;
2145 /** List of free reply_states */
2146 struct list_head scp_rep_idle;
2147 /** waitq to run, when adding stuff to srv_free_rs_list */
2148 wait_queue_head_t scp_rep_waitq;
2149 /** # 'difficult' replies */
2150 atomic_t scp_nreps_difficult;
2153 #define ptlrpc_service_for_each_part(part, i, svc) \
2155 i < (svc)->srv_ncpts && \
2156 (svc)->srv_parts && \
2157 ((part) = (svc)->srv_parts[i]); i++)
2160 * Declaration of ptlrpcd control structure
2162 struct ptlrpcd_ctl {
2164 * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
2166 unsigned long pc_flags;
2168 * Thread lock protecting structure fields.
2174 struct completion pc_starting;
2178 struct completion pc_finishing;
2180 * Thread requests set.
2182 struct ptlrpc_request_set *pc_set;
2184 * Thread name used in kthread_run()
2188 * Environment for request interpreters to run in.
2190 struct lu_env pc_env;
2192 * CPT the thread is bound on.
2196 * Index of ptlrpcd thread in the array.
2200 * Pointer to the array of partners' ptlrpcd_ctl structure.
2202 struct ptlrpcd_ctl **pc_partners;
2204 * Number of the ptlrpcd's partners.
2208 * Record the partner index to be processed next.
2212 * Error code if the thread failed to fully start.
2217 /* Bits for pc_flags */
2218 enum ptlrpcd_ctl_flags {
2220 * Ptlrpc thread start flag.
2222 LIOD_START = 1 << 0,
2224 * Ptlrpc thread stop flag.
2228 * Ptlrpc thread force flag (only stop force so far).
2229 * This will cause aborting any inflight rpcs handled
2230 * by thread if LIOD_STOP is specified.
2232 LIOD_FORCE = 1 << 2,
2234 * This is a recovery ptlrpc thread.
2236 LIOD_RECOVERY = 1 << 3,
2243 * Service compatibility function; the policy is compatible with all services.
2245 * \param[in] svc The service the policy is attempting to register with.
2246 * \param[in] desc The policy descriptor
2248 * \retval true The policy is compatible with the service
2250 * \see ptlrpc_nrs_pol_desc::pd_compat()
2252 static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc,
2253 const struct ptlrpc_nrs_pol_desc *desc)
2259 * Service compatibility function; the policy is compatible with only a specific
2260 * service which is identified by its human-readable name at
2261 * ptlrpc_service::srv_name.
2263 * \param[in] svc The service the policy is attempting to register with.
2264 * \param[in] desc The policy descriptor
2266 * \retval false The policy is not compatible with the service
2267 * \retval true The policy is compatible with the service
2269 * \see ptlrpc_nrs_pol_desc::pd_compat()
2271 static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
2272 const struct ptlrpc_nrs_pol_desc *desc)
2274 return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
2279 /* ptlrpc/events.c */
2280 extern lnet_handle_eq_t ptlrpc_eq_h;
2281 int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
2282 lnet_process_id_t *peer, lnet_nid_t *self);
2284 * These callbacks are invoked by LNet when something happened to
2288 void request_out_callback(lnet_event_t *ev);
2289 void reply_in_callback(lnet_event_t *ev);
2290 void client_bulk_callback(lnet_event_t *ev);
2291 void request_in_callback(lnet_event_t *ev);
2292 void reply_out_callback(lnet_event_t *ev);
2295 /* ptlrpc/connection.c */
2296 struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
2298 struct obd_uuid *uuid);
2299 int ptlrpc_connection_put(struct ptlrpc_connection *c);
2300 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
2301 int ptlrpc_connection_init(void);
2302 void ptlrpc_connection_fini(void);
2304 /* ptlrpc/niobuf.c */
2306 * Actual interfacing with LNet to put/get/register/unregister stuff
2310 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
2312 static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
2314 struct ptlrpc_bulk_desc *desc;
2317 desc = req->rq_bulk;
2319 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
2320 req->rq_bulk_deadline > ktime_get_real_seconds())
2326 spin_lock(&desc->bd_lock);
2327 rc = desc->bd_md_count;
2328 spin_unlock(&desc->bd_lock);
2332 #define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
2333 #define PTLRPC_REPLY_EARLY 0x02
2334 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
2335 int ptlrpc_reply(struct ptlrpc_request *req);
2336 int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
2337 int ptlrpc_error(struct ptlrpc_request *req);
2338 void ptlrpc_resend_req(struct ptlrpc_request *request);
2339 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
2340 int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
2341 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
2344 /* ptlrpc/client.c */
2346 * Client-side portals API. Everything to send requests, receive replies,
2347 * request queues, request management, etc.
2350 void ptlrpc_request_committed(struct ptlrpc_request *req, int force);
2352 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
2353 struct ptlrpc_client *);
2354 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
2356 int ptlrpc_queue_wait(struct ptlrpc_request *req);
2357 int ptlrpc_replay_req(struct ptlrpc_request *req);
2358 int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
2359 void ptlrpc_abort_inflight(struct obd_import *imp);
2360 void ptlrpc_abort_set(struct ptlrpc_request_set *set);
2362 struct ptlrpc_request_set *ptlrpc_prep_set(void);
2363 struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
2365 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
2366 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
2367 int ptlrpc_set_wait(struct ptlrpc_request_set *);
2368 int ptlrpc_expired_set(void *data);
2369 void ptlrpc_interrupted_set(void *data);
2370 void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
2371 void ptlrpc_set_destroy(struct ptlrpc_request_set *);
2372 void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
2373 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
2374 struct ptlrpc_request *req);
2376 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
2377 int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
2379 struct ptlrpc_request_pool *
2380 ptlrpc_init_rq_pool(int, int,
2381 int (*populate_pool)(struct ptlrpc_request_pool *, int));
2383 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
2384 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
2385 const struct req_format *format);
2386 struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
2387 struct ptlrpc_request_pool *,
2388 const struct req_format *);
2389 void ptlrpc_request_free(struct ptlrpc_request *request);
2390 int ptlrpc_request_pack(struct ptlrpc_request *request,
2391 __u32 version, int opcode);
2392 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *,
2393 const struct req_format *,
2395 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
2396 __u32 version, int opcode, char **bufs,
2397 struct ptlrpc_cli_ctx *ctx);
2398 void ptlrpc_req_finished(struct ptlrpc_request *request);
2399 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
2400 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
2401 unsigned npages, unsigned max_brw,
2402 unsigned type, unsigned portal);
2403 void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
2404 static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
2406 __ptlrpc_free_bulk(bulk, 1);
2409 static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk)
2411 __ptlrpc_free_bulk(bulk, 0);
2414 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
2415 struct page *page, int pageoffset, int len, int);
2416 static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
2417 struct page *page, int pageoffset,
2420 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
2423 static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
2424 struct page *page, int pageoffset,
2427 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
2430 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
2431 struct obd_import *imp);
2432 __u64 ptlrpc_next_xid(void);
2433 __u64 ptlrpc_sample_next_xid(void);
2434 __u64 ptlrpc_req_xid(struct ptlrpc_request *request);
2436 /* Set of routines to run a function in ptlrpcd context */
2437 void *ptlrpcd_alloc_work(struct obd_import *imp,
2438 int (*cb)(const struct lu_env *, void *), void *data);
2439 void ptlrpcd_destroy_work(void *handler);
2440 int ptlrpcd_queue_work(void *handler);
2443 struct ptlrpc_service_buf_conf {
2444 /* nbufs is buffers # to allocate when growing the pool */
2445 unsigned int bc_nbufs;
2446 /* buffer size to post */
2447 unsigned int bc_buf_size;
2448 /* portal to listed for requests on */
2449 unsigned int bc_req_portal;
2450 /* portal of where to send replies to */
2451 unsigned int bc_rep_portal;
2452 /* maximum request size to be accepted for this service */
2453 unsigned int bc_req_max_size;
2454 /* maximum reply size this service can ever send */
2455 unsigned int bc_rep_max_size;
2458 struct ptlrpc_service_thr_conf {
2459 /* threadname should be 8 characters or less - 6 will be added on */
2461 /* threads increasing factor for each CPU */
2462 unsigned int tc_thr_factor;
2463 /* service threads # to start on each partition while initializing */
2464 unsigned int tc_nthrs_init;
2466 * low water of threads # upper-limit on each partition while running,
2467 * service availability may be impacted if threads number is lower
2468 * than this value. It can be ZERO if the service doesn't require
2469 * CPU affinity or there is only one partition.
2471 unsigned int tc_nthrs_base;
2472 /* "soft" limit for total threads number */
2473 unsigned int tc_nthrs_max;
2474 /* user specified threads number, it will be validated due to
2475 * other members of this structure.
2477 unsigned int tc_nthrs_user;
2478 /* set NUMA node affinity for service threads */
2479 unsigned int tc_cpu_affinity;
2480 /* Tags for lu_context associated with service thread */
2484 struct ptlrpc_service_cpt_conf {
2485 struct cfs_cpt_table *cc_cptable;
2486 /* string pattern to describe CPTs for a service */
2490 struct ptlrpc_service_conf {
2493 /* soft watchdog timeout multiplifier to print stuck service traces */
2494 unsigned int psc_watchdog_factor;
2495 /* buffer information */
2496 struct ptlrpc_service_buf_conf psc_buf;
2497 /* thread information */
2498 struct ptlrpc_service_thr_conf psc_thr;
2499 /* CPU partition information */
2500 struct ptlrpc_service_cpt_conf psc_cpt;
2501 /* function table */
2502 struct ptlrpc_service_ops psc_ops;
2505 /* ptlrpc/service.c */
2507 * Server-side services API. Register/unregister service, request state
2508 * management, service thread management
2512 void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
2513 void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
2514 struct ptlrpc_service *ptlrpc_register_service(struct ptlrpc_service_conf *conf,
2515 struct kset *parent,
2516 struct dentry *debugfs_entry);
2518 int ptlrpc_start_threads(struct ptlrpc_service *svc);
2519 int ptlrpc_unregister_service(struct ptlrpc_service *service);
2521 int ptlrpc_hr_init(void);
2522 void ptlrpc_hr_fini(void);
2526 /* ptlrpc/import.c */
2531 int ptlrpc_connect_import(struct obd_import *imp);
2532 int ptlrpc_init_import(struct obd_import *imp);
2533 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
2534 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
2536 /* ptlrpc/pack_generic.c */
2537 int ptlrpc_reconnect_import(struct obd_import *imp);
2541 * ptlrpc msg buffer and swab interface
2545 int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
2547 void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
2549 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
2550 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
2552 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
2554 int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
2555 __u32 *lens, char **bufs);
2556 int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
2558 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
2559 __u32 *lens, char **bufs, int flags);
2560 #define LPRFL_EARLY_REPLY 1
2561 int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
2562 char **bufs, int flags);
2563 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
2564 unsigned int newlen, int move_data);
2565 void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
2566 int __lustre_unpack_msg(struct lustre_msg *m, int len);
2567 int lustre_msg_hdr_size(__u32 magic, int count);
2568 int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
2569 int lustre_msg_size_v2(int count, __u32 *lengths);
2570 int lustre_packed_msg_size(struct lustre_msg *msg);
2571 int lustre_msg_early_size(void);
2572 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
2573 void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
2574 int lustre_msg_buflen(struct lustre_msg *m, int n);
2575 int lustre_msg_bufcount(struct lustre_msg *m);
2576 char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
2577 __u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
2578 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
2579 __u32 lustre_msg_get_flags(struct lustre_msg *msg);
2580 void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
2581 void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
2582 void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
2583 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
2584 void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
2585 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
2586 __u32 lustre_msg_get_type(struct lustre_msg *msg);
2587 void lustre_msg_add_version(struct lustre_msg *msg, int version);
2588 __u32 lustre_msg_get_opc(struct lustre_msg *msg);
2589 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
2590 __u64 *lustre_msg_get_versions(struct lustre_msg *msg);
2591 __u64 lustre_msg_get_transno(struct lustre_msg *msg);
2592 __u64 lustre_msg_get_slv(struct lustre_msg *msg);
2593 __u32 lustre_msg_get_limit(struct lustre_msg *msg);
2594 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
2595 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
2596 int lustre_msg_get_status(struct lustre_msg *msg);
2597 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
2598 __u32 lustre_msg_get_magic(struct lustre_msg *msg);
2599 __u32 lustre_msg_get_timeout(struct lustre_msg *msg);
2600 __u32 lustre_msg_get_service_time(struct lustre_msg *msg);
2601 __u32 lustre_msg_get_cksum(struct lustre_msg *msg);
2602 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
2603 void lustre_msg_set_handle(struct lustre_msg *msg,
2604 struct lustre_handle *handle);
2605 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
2606 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
2607 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
2608 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
2609 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
2610 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
2611 void ptlrpc_request_set_replen(struct ptlrpc_request *req);
2612 void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
2613 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
2614 void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
2615 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
2618 lustre_shrink_reply(struct ptlrpc_request *req, int segment,
2619 unsigned int newlen, int move_data)
2621 LASSERT(req->rq_reply_state);
2622 LASSERT(req->rq_repmsg);
2623 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
2627 #ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS
2629 static inline int ptlrpc_status_hton(int h)
2632 * Positive errnos must be network errnos, such as LUSTRE_EDEADLK,
2633 * ELDLM_LOCK_ABORTED, etc.
2636 return -lustre_errno_hton(-h);
2641 static inline int ptlrpc_status_ntoh(int n)
2644 * See the comment in ptlrpc_status_hton().
2647 return -lustre_errno_ntoh(-n);
2654 #define ptlrpc_status_hton(h) (h)
2655 #define ptlrpc_status_ntoh(n) (n)
2660 /** Change request phase of \a req to \a new_phase */
2662 ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
2664 if (req->rq_phase == new_phase)
2667 if (new_phase == RQ_PHASE_UNREGISTERING) {
2668 req->rq_next_phase = req->rq_phase;
2670 atomic_inc(&req->rq_import->imp_unregistering);
2673 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
2675 atomic_dec(&req->rq_import->imp_unregistering);
2678 DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
2679 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
2681 req->rq_phase = new_phase;
2685 * Returns true if request \a req got early reply and hard deadline is not met
2688 ptlrpc_client_early(struct ptlrpc_request *req)
2690 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2691 req->rq_reply_deadline > ktime_get_real_seconds())
2693 return req->rq_early;
2697 * Returns true if we got real reply from server for this request
2700 ptlrpc_client_replied(struct ptlrpc_request *req)
2702 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2703 req->rq_reply_deadline > ktime_get_real_seconds())
2705 return req->rq_replied;
2708 /** Returns true if request \a req is in process of receiving server reply */
2710 ptlrpc_client_recv(struct ptlrpc_request *req)
2712 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2713 req->rq_reply_deadline > ktime_get_real_seconds())
2715 return req->rq_receiving_reply;
2719 ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
2723 spin_lock(&req->rq_lock);
2724 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2725 req->rq_reply_deadline > ktime_get_real_seconds()) {
2726 spin_unlock(&req->rq_lock);
2729 rc = req->rq_receiving_reply;
2730 rc = rc || req->rq_req_unlink || req->rq_reply_unlink;
2731 spin_unlock(&req->rq_lock);
2736 ptlrpc_client_wake_req(struct ptlrpc_request *req)
2739 wake_up(&req->rq_reply_waitq);
2741 wake_up(&req->rq_set->set_waitq);
2745 ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
2747 LASSERT(atomic_read(&rs->rs_refcount) > 0);
2748 atomic_inc(&rs->rs_refcount);
2752 ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
2754 LASSERT(atomic_read(&rs->rs_refcount) > 0);
2755 if (atomic_dec_and_test(&rs->rs_refcount))
2756 lustre_free_reply_state(rs);
2759 /* Should only be called once per req */
2760 static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
2762 if (!req->rq_reply_state)
2763 return; /* shouldn't occur */
2764 ptlrpc_rs_decref(req->rq_reply_state);
2765 req->rq_reply_state = NULL;
2766 req->rq_repmsg = NULL;
2769 static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
2771 return lustre_msg_get_magic(req->rq_reqmsg);
2774 static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
2776 switch (req->rq_reqmsg->lm_magic) {
2777 case LUSTRE_MSG_MAGIC_V2:
2778 return req->rq_reqmsg->lm_repsize;
2780 LASSERTF(0, "incorrect message magic: %08x\n",
2781 req->rq_reqmsg->lm_magic);
2786 static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
2788 if (req->rq_delay_limit != 0 &&
2789 time_before(cfs_time_add(req->rq_queued_time,
2790 cfs_time_seconds(req->rq_delay_limit)),
2791 cfs_time_current())) {
2797 static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
2799 if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
2800 spin_lock(&req->rq_lock);
2801 req->rq_no_resend = 1;
2802 spin_unlock(&req->rq_lock);
2804 return req->rq_no_resend;
2808 ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt)
2810 int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate);
2812 return svcpt->scp_service->srv_watchdog_factor *
2813 max_t(int, at, obd_timeout);
2816 static inline struct ptlrpc_service *
2817 ptlrpc_req2svc(struct ptlrpc_request *req)
2819 return req->rq_rqbd->rqbd_svcpt->scp_service;
2822 /* ldlm/ldlm_lib.c */
2824 * Target client logic
2827 int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
2828 int client_obd_cleanup(struct obd_device *obddev);
2829 int client_connect_import(const struct lu_env *env,
2830 struct obd_export **exp, struct obd_device *obd,
2831 struct obd_uuid *cluuid, struct obd_connect_data *,
2833 int client_disconnect_export(struct obd_export *exp);
2834 int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
2836 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
2837 int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
2838 struct obd_uuid *uuid);
2839 int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
2840 void client_destroy_import(struct obd_import *imp);
2843 /* ptlrpc/pinger.c */
2845 * Pinger API (client side only)
2848 enum timeout_event {
2852 struct timeout_item;
2853 typedef int (*timeout_cb_t)(struct timeout_item *, void *);
2854 int ptlrpc_pinger_add_import(struct obd_import *imp);
2855 int ptlrpc_pinger_del_import(struct obd_import *imp);
2856 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
2857 timeout_cb_t cb, void *data,
2858 struct list_head *obd_list);
2859 int ptlrpc_del_timeout_client(struct list_head *obd_list,
2860 enum timeout_event event);
2861 struct ptlrpc_request *ptlrpc_prep_ping(struct obd_import *imp);
2862 int ptlrpc_obd_ping(struct obd_device *obd);
2863 void ptlrpc_pinger_ir_up(void);
2864 void ptlrpc_pinger_ir_down(void);
2866 int ptlrpc_pinger_suppress_pings(void);
2868 /* ptlrpc/ptlrpcd.c */
2869 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
2870 void ptlrpcd_free(struct ptlrpcd_ctl *pc);
2871 void ptlrpcd_wake(struct ptlrpc_request *req);
2872 void ptlrpcd_add_req(struct ptlrpc_request *req);
2873 int ptlrpcd_addref(void);
2874 void ptlrpcd_decref(void);
2876 /* ptlrpc/lproc_ptlrpc.c */
2878 * procfs output related functions
2881 const char *ll_opcode2str(__u32 opcode);
2882 void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
2883 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
2884 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
2887 /* ptlrpc/llog_client.c */
2888 extern struct llog_operations llog_client_ops;