Merge tag 'arm-soc-imx-move' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[cascardo/linux.git] / drivers / scsi / qla2xxx / qla_bsg.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12
13 /* BSG support for ELS/CT pass through */
14 inline srb_t *
15 qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
16 {
17         srb_t *sp;
18         struct qla_hw_data *ha = vha->hw;
19         struct srb_ctx *ctx;
20
21         sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
22         if (!sp)
23                 goto done;
24         ctx = kzalloc(size, GFP_KERNEL);
25         if (!ctx) {
26                 mempool_free(sp, ha->srb_mempool);
27                 sp = NULL;
28                 goto done;
29         }
30
31         memset(sp, 0, sizeof(*sp));
32         sp->fcport = fcport;
33         sp->ctx = ctx;
34         ctx->iocbs = 1;
35 done:
36         return sp;
37 }
38
39 int
40 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
41         struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
42 {
43         int i, ret, num_valid;
44         uint8_t *bcode;
45         struct qla_fcp_prio_entry *pri_entry;
46         uint32_t *bcode_val_ptr, bcode_val;
47
48         ret = 1;
49         num_valid = 0;
50         bcode = (uint8_t *)pri_cfg;
51         bcode_val_ptr = (uint32_t *)pri_cfg;
52         bcode_val = (uint32_t)(*bcode_val_ptr);
53
54         if (bcode_val == 0xFFFFFFFF) {
55                 /* No FCP Priority config data in flash */
56                 ql_dbg(ql_dbg_user, vha, 0x7051,
57                     "No FCP Priority config data.\n");
58                 return 0;
59         }
60
61         if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
62                         bcode[3] != 'S') {
63                 /* Invalid FCP priority data header*/
64                 ql_dbg(ql_dbg_user, vha, 0x7052,
65                     "Invalid FCP Priority data header. bcode=0x%x.\n",
66                     bcode_val);
67                 return 0;
68         }
69         if (flag != 1)
70                 return ret;
71
72         pri_entry = &pri_cfg->entry[0];
73         for (i = 0; i < pri_cfg->num_entries; i++) {
74                 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
75                         num_valid++;
76                 pri_entry++;
77         }
78
79         if (num_valid == 0) {
80                 /* No valid FCP priority data entries */
81                 ql_dbg(ql_dbg_user, vha, 0x7053,
82                     "No valid FCP Priority data entries.\n");
83                 ret = 0;
84         } else {
85                 /* FCP priority data is valid */
86                 ql_dbg(ql_dbg_user, vha, 0x7054,
87                     "Valid FCP priority data. num entries = %d.\n",
88                     num_valid);
89         }
90
91         return ret;
92 }
93
94 static int
95 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
96 {
97         struct Scsi_Host *host = bsg_job->shost;
98         scsi_qla_host_t *vha = shost_priv(host);
99         struct qla_hw_data *ha = vha->hw;
100         int ret = 0;
101         uint32_t len;
102         uint32_t oper;
103
104         bsg_job->reply->reply_payload_rcv_len = 0;
105
106         if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
107                 ret = -EINVAL;
108                 goto exit_fcp_prio_cfg;
109         }
110
111         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
112                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
113                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
114                 ret = -EBUSY;
115                 goto exit_fcp_prio_cfg;
116         }
117
118         /* Get the sub command */
119         oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
120
121         /* Only set config is allowed if config memory is not allocated */
122         if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
123                 ret = -EINVAL;
124                 goto exit_fcp_prio_cfg;
125         }
126         switch (oper) {
127         case QLFC_FCP_PRIO_DISABLE:
128                 if (ha->flags.fcp_prio_enabled) {
129                         ha->flags.fcp_prio_enabled = 0;
130                         ha->fcp_prio_cfg->attributes &=
131                                 ~FCP_PRIO_ATTR_ENABLE;
132                         qla24xx_update_all_fcp_prio(vha);
133                         bsg_job->reply->result = DID_OK;
134                 } else {
135                         ret = -EINVAL;
136                         bsg_job->reply->result = (DID_ERROR << 16);
137                         goto exit_fcp_prio_cfg;
138                 }
139                 break;
140
141         case QLFC_FCP_PRIO_ENABLE:
142                 if (!ha->flags.fcp_prio_enabled) {
143                         if (ha->fcp_prio_cfg) {
144                                 ha->flags.fcp_prio_enabled = 1;
145                                 ha->fcp_prio_cfg->attributes |=
146                                     FCP_PRIO_ATTR_ENABLE;
147                                 qla24xx_update_all_fcp_prio(vha);
148                                 bsg_job->reply->result = DID_OK;
149                         } else {
150                                 ret = -EINVAL;
151                                 bsg_job->reply->result = (DID_ERROR << 16);
152                                 goto exit_fcp_prio_cfg;
153                         }
154                 }
155                 break;
156
157         case QLFC_FCP_PRIO_GET_CONFIG:
158                 len = bsg_job->reply_payload.payload_len;
159                 if (!len || len > FCP_PRIO_CFG_SIZE) {
160                         ret = -EINVAL;
161                         bsg_job->reply->result = (DID_ERROR << 16);
162                         goto exit_fcp_prio_cfg;
163                 }
164
165                 bsg_job->reply->result = DID_OK;
166                 bsg_job->reply->reply_payload_rcv_len =
167                         sg_copy_from_buffer(
168                         bsg_job->reply_payload.sg_list,
169                         bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
170                         len);
171
172                 break;
173
174         case QLFC_FCP_PRIO_SET_CONFIG:
175                 len = bsg_job->request_payload.payload_len;
176                 if (!len || len > FCP_PRIO_CFG_SIZE) {
177                         bsg_job->reply->result = (DID_ERROR << 16);
178                         ret = -EINVAL;
179                         goto exit_fcp_prio_cfg;
180                 }
181
182                 if (!ha->fcp_prio_cfg) {
183                         ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
184                         if (!ha->fcp_prio_cfg) {
185                                 ql_log(ql_log_warn, vha, 0x7050,
186                                     "Unable to allocate memory for fcp prio "
187                                     "config data (%x).\n", FCP_PRIO_CFG_SIZE);
188                                 bsg_job->reply->result = (DID_ERROR << 16);
189                                 ret = -ENOMEM;
190                                 goto exit_fcp_prio_cfg;
191                         }
192                 }
193
194                 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
195                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
196                 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
197                         FCP_PRIO_CFG_SIZE);
198
199                 /* validate fcp priority data */
200
201                 if (!qla24xx_fcp_prio_cfg_valid(vha,
202                     (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
203                         bsg_job->reply->result = (DID_ERROR << 16);
204                         ret = -EINVAL;
205                         /* If buffer was invalidatic int
206                          * fcp_prio_cfg is of no use
207                          */
208                         vfree(ha->fcp_prio_cfg);
209                         ha->fcp_prio_cfg = NULL;
210                         goto exit_fcp_prio_cfg;
211                 }
212
213                 ha->flags.fcp_prio_enabled = 0;
214                 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
215                         ha->flags.fcp_prio_enabled = 1;
216                 qla24xx_update_all_fcp_prio(vha);
217                 bsg_job->reply->result = DID_OK;
218                 break;
219         default:
220                 ret = -EINVAL;
221                 break;
222         }
223 exit_fcp_prio_cfg:
224         bsg_job->job_done(bsg_job);
225         return ret;
226 }
227 static int
228 qla2x00_process_els(struct fc_bsg_job *bsg_job)
229 {
230         struct fc_rport *rport;
231         fc_port_t *fcport = NULL;
232         struct Scsi_Host *host;
233         scsi_qla_host_t *vha;
234         struct qla_hw_data *ha;
235         srb_t *sp;
236         const char *type;
237         int req_sg_cnt, rsp_sg_cnt;
238         int rval =  (DRIVER_ERROR << 16);
239         uint16_t nextlid = 0;
240         struct srb_ctx *els;
241
242         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
243                 rport = bsg_job->rport;
244                 fcport = *(fc_port_t **) rport->dd_data;
245                 host = rport_to_shost(rport);
246                 vha = shost_priv(host);
247                 ha = vha->hw;
248                 type = "FC_BSG_RPT_ELS";
249         } else {
250                 host = bsg_job->shost;
251                 vha = shost_priv(host);
252                 ha = vha->hw;
253                 type = "FC_BSG_HST_ELS_NOLOGIN";
254         }
255
256         /* pass through is supported only for ISP 4Gb or higher */
257         if (!IS_FWI2_CAPABLE(ha)) {
258                 ql_dbg(ql_dbg_user, vha, 0x7001,
259                     "ELS passthru not supported for ISP23xx based adapters.\n");
260                 rval = -EPERM;
261                 goto done;
262         }
263
264         /*  Multiple SG's are not supported for ELS requests */
265         if (bsg_job->request_payload.sg_cnt > 1 ||
266                 bsg_job->reply_payload.sg_cnt > 1) {
267                 ql_dbg(ql_dbg_user, vha, 0x7002,
268                     "Multiple SG's are not suppored for ELS requests, "
269                     "request_sg_cnt=%x reply_sg_cnt=%x.\n",
270                     bsg_job->request_payload.sg_cnt,
271                     bsg_job->reply_payload.sg_cnt);
272                 rval = -EPERM;
273                 goto done;
274         }
275
276         /* ELS request for rport */
277         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
278                 /* make sure the rport is logged in,
279                  * if not perform fabric login
280                  */
281                 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
282                         ql_dbg(ql_dbg_user, vha, 0x7003,
283                             "Failed to login port %06X for ELS passthru.\n",
284                             fcport->d_id.b24);
285                         rval = -EIO;
286                         goto done;
287                 }
288         } else {
289                 /* Allocate a dummy fcport structure, since functions
290                  * preparing the IOCB and mailbox command retrieves port
291                  * specific information from fcport structure. For Host based
292                  * ELS commands there will be no fcport structure allocated
293                  */
294                 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
295                 if (!fcport) {
296                         rval = -ENOMEM;
297                         goto done;
298                 }
299
300                 /* Initialize all required  fields of fcport */
301                 fcport->vha = vha;
302                 fcport->vp_idx = vha->vp_idx;
303                 fcport->d_id.b.al_pa =
304                         bsg_job->request->rqst_data.h_els.port_id[0];
305                 fcport->d_id.b.area =
306                         bsg_job->request->rqst_data.h_els.port_id[1];
307                 fcport->d_id.b.domain =
308                         bsg_job->request->rqst_data.h_els.port_id[2];
309                 fcport->loop_id =
310                         (fcport->d_id.b.al_pa == 0xFD) ?
311                         NPH_FABRIC_CONTROLLER : NPH_F_PORT;
312         }
313
314         if (!vha->flags.online) {
315                 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
316                 rval = -EIO;
317                 goto done;
318         }
319
320         req_sg_cnt =
321                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
322                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
323         if (!req_sg_cnt) {
324                 rval = -ENOMEM;
325                 goto done_free_fcport;
326         }
327
328         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
329                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
330         if (!rsp_sg_cnt) {
331                 rval = -ENOMEM;
332                 goto done_free_fcport;
333         }
334
335         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
336                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
337                 ql_log(ql_log_warn, vha, 0x7008,
338                     "dma mapping resulted in different sg counts, "
339                     "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
340                     "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
341                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
342                 rval = -EAGAIN;
343                 goto done_unmap_sg;
344         }
345
346         /* Alloc SRB structure */
347         sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
348         if (!sp) {
349                 rval = -ENOMEM;
350                 goto done_unmap_sg;
351         }
352
353         els = sp->ctx;
354         els->type =
355                 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
356                 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
357         els->name =
358                 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
359                 "bsg_els_rpt" : "bsg_els_hst");
360         els->u.bsg_job = bsg_job;
361
362         ql_dbg(ql_dbg_user, vha, 0x700a,
363             "bsg rqst type: %s els type: %x - loop-id=%x "
364             "portid=%-2x%02x%02x.\n", type,
365             bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
366             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
367
368         rval = qla2x00_start_sp(sp);
369         if (rval != QLA_SUCCESS) {
370                 ql_log(ql_log_warn, vha, 0x700e,
371                     "qla2x00_start_sp failed = %d\n", rval);
372                 kfree(sp->ctx);
373                 mempool_free(sp, ha->srb_mempool);
374                 rval = -EIO;
375                 goto done_unmap_sg;
376         }
377         return rval;
378
379 done_unmap_sg:
380         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
381                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
382         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
383                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
384         goto done_free_fcport;
385
386 done_free_fcport:
387         if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
388                 kfree(fcport);
389 done:
390         return rval;
391 }
392
393 inline uint16_t
394 qla24xx_calc_ct_iocbs(uint16_t dsds)
395 {
396         uint16_t iocbs;
397
398         iocbs = 1;
399         if (dsds > 2) {
400                 iocbs += (dsds - 2) / 5;
401                 if ((dsds - 2) % 5)
402                         iocbs++;
403         }
404         return iocbs;
405 }
406
407 static int
408 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
409 {
410         srb_t *sp;
411         struct Scsi_Host *host = bsg_job->shost;
412         scsi_qla_host_t *vha = shost_priv(host);
413         struct qla_hw_data *ha = vha->hw;
414         int rval = (DRIVER_ERROR << 16);
415         int req_sg_cnt, rsp_sg_cnt;
416         uint16_t loop_id;
417         struct fc_port *fcport;
418         char  *type = "FC_BSG_HST_CT";
419         struct srb_ctx *ct;
420
421         req_sg_cnt =
422                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
423                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
424         if (!req_sg_cnt) {
425                 ql_log(ql_log_warn, vha, 0x700f,
426                     "dma_map_sg return %d for request\n", req_sg_cnt);
427                 rval = -ENOMEM;
428                 goto done;
429         }
430
431         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
432                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
433         if (!rsp_sg_cnt) {
434                 ql_log(ql_log_warn, vha, 0x7010,
435                     "dma_map_sg return %d for reply\n", rsp_sg_cnt);
436                 rval = -ENOMEM;
437                 goto done;
438         }
439
440         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
441             (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
442                 ql_log(ql_log_warn, vha, 0x7011,
443                     "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
444                     "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
445                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
446                 rval = -EAGAIN;
447                 goto done_unmap_sg;
448         }
449
450         if (!vha->flags.online) {
451                 ql_log(ql_log_warn, vha, 0x7012,
452                     "Host is not online.\n");
453                 rval = -EIO;
454                 goto done_unmap_sg;
455         }
456
457         loop_id =
458                 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
459                         >> 24;
460         switch (loop_id) {
461         case 0xFC:
462                 loop_id = cpu_to_le16(NPH_SNS);
463                 break;
464         case 0xFA:
465                 loop_id = vha->mgmt_svr_loop_id;
466                 break;
467         default:
468                 ql_dbg(ql_dbg_user, vha, 0x7013,
469                     "Unknown loop id: %x.\n", loop_id);
470                 rval = -EINVAL;
471                 goto done_unmap_sg;
472         }
473
474         /* Allocate a dummy fcport structure, since functions preparing the
475          * IOCB and mailbox command retrieves port specific information
476          * from fcport structure. For Host based ELS commands there will be
477          * no fcport structure allocated
478          */
479         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
480         if (!fcport) {
481                 ql_log(ql_log_warn, vha, 0x7014,
482                     "Failed to allocate fcport.\n");
483                 rval = -ENOMEM;
484                 goto done_unmap_sg;
485         }
486
487         /* Initialize all required  fields of fcport */
488         fcport->vha = vha;
489         fcport->vp_idx = vha->vp_idx;
490         fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
491         fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
492         fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
493         fcport->loop_id = loop_id;
494
495         /* Alloc SRB structure */
496         sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
497         if (!sp) {
498                 ql_log(ql_log_warn, vha, 0x7015,
499                     "qla2x00_get_ctx_bsg_sp failed.\n");
500                 rval = -ENOMEM;
501                 goto done_free_fcport;
502         }
503
504         ct = sp->ctx;
505         ct->type = SRB_CT_CMD;
506         ct->name = "bsg_ct";
507         ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
508         ct->u.bsg_job = bsg_job;
509
510         ql_dbg(ql_dbg_user, vha, 0x7016,
511             "bsg rqst type: %s else type: %x - "
512             "loop-id=%x portid=%02x%02x%02x.\n", type,
513             (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
514             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
515             fcport->d_id.b.al_pa);
516
517         rval = qla2x00_start_sp(sp);
518         if (rval != QLA_SUCCESS) {
519                 ql_log(ql_log_warn, vha, 0x7017,
520                     "qla2x00_start_sp failed=%d.\n", rval);
521                 kfree(sp->ctx);
522                 mempool_free(sp, ha->srb_mempool);
523                 rval = -EIO;
524                 goto done_free_fcport;
525         }
526         return rval;
527
528 done_free_fcport:
529         kfree(fcport);
530 done_unmap_sg:
531         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
532                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
533         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
534                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
535 done:
536         return rval;
537 }
538
539 /* Set the port configuration to enable the
540  * internal loopback on ISP81XX
541  */
542 static inline int
543 qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
544     uint16_t *new_config)
545 {
546         int ret = 0;
547         int rval = 0;
548         struct qla_hw_data *ha = vha->hw;
549
550         if (!IS_QLA81XX(ha))
551                 goto done_set_internal;
552
553         new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
554         memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
555
556         ha->notify_dcbx_comp = 1;
557         ret = qla81xx_set_port_config(vha, new_config);
558         if (ret != QLA_SUCCESS) {
559                 ql_log(ql_log_warn, vha, 0x7021,
560                     "set port config failed.\n");
561                 ha->notify_dcbx_comp = 0;
562                 rval = -EINVAL;
563                 goto done_set_internal;
564         }
565
566         /* Wait for DCBX complete event */
567         if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
568                 ql_dbg(ql_dbg_user, vha, 0x7022,
569                     "State change notification not received.\n");
570         } else
571                 ql_dbg(ql_dbg_user, vha, 0x7023,
572                     "State change received.\n");
573
574         ha->notify_dcbx_comp = 0;
575
576 done_set_internal:
577         return rval;
578 }
579
580 /* Set the port configuration to disable the
581  * internal loopback on ISP81XX
582  */
583 static inline int
584 qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
585     int wait)
586 {
587         int ret = 0;
588         int rval = 0;
589         uint16_t new_config[4];
590         struct qla_hw_data *ha = vha->hw;
591
592         if (!IS_QLA81XX(ha))
593                 goto done_reset_internal;
594
595         memset(new_config, 0 , sizeof(new_config));
596         if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
597                         ENABLE_INTERNAL_LOOPBACK) {
598                 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
599                 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
600
601                 ha->notify_dcbx_comp = wait;
602                 ret = qla81xx_set_port_config(vha, new_config);
603                 if (ret != QLA_SUCCESS) {
604                         ql_log(ql_log_warn, vha, 0x7025,
605                             "Set port config failed.\n");
606                         ha->notify_dcbx_comp = 0;
607                         rval = -EINVAL;
608                         goto done_reset_internal;
609                 }
610
611                 /* Wait for DCBX complete event */
612                 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
613                         (20 * HZ))) {
614                         ql_dbg(ql_dbg_user, vha, 0x7026,
615                             "State change notification not received.\n");
616                         ha->notify_dcbx_comp = 0;
617                         rval = -EINVAL;
618                         goto done_reset_internal;
619                 } else
620                         ql_dbg(ql_dbg_user, vha, 0x7027,
621                             "State change received.\n");
622
623                 ha->notify_dcbx_comp = 0;
624         }
625 done_reset_internal:
626         return rval;
627 }
628
629 static int
630 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
631 {
632         struct Scsi_Host *host = bsg_job->shost;
633         scsi_qla_host_t *vha = shost_priv(host);
634         struct qla_hw_data *ha = vha->hw;
635         int rval;
636         uint8_t command_sent;
637         char *type;
638         struct msg_echo_lb elreq;
639         uint16_t response[MAILBOX_REGISTER_COUNT];
640         uint16_t config[4], new_config[4];
641         uint8_t *fw_sts_ptr;
642         uint8_t *req_data = NULL;
643         dma_addr_t req_data_dma;
644         uint32_t req_data_len;
645         uint8_t *rsp_data = NULL;
646         dma_addr_t rsp_data_dma;
647         uint32_t rsp_data_len;
648
649         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
650                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
651                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
652                 ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n");
653                 return -EBUSY;
654         }
655
656         if (!vha->flags.online) {
657                 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
658                 return -EIO;
659         }
660
661         elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
662                 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
663                 DMA_TO_DEVICE);
664
665         if (!elreq.req_sg_cnt) {
666                 ql_log(ql_log_warn, vha, 0x701a,
667                     "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
668                 return -ENOMEM;
669         }
670
671         elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
672                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
673                 DMA_FROM_DEVICE);
674
675         if (!elreq.rsp_sg_cnt) {
676                 ql_log(ql_log_warn, vha, 0x701b,
677                     "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
678                 rval = -ENOMEM;
679                 goto done_unmap_req_sg;
680         }
681
682         if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
683                 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
684                 ql_log(ql_log_warn, vha, 0x701c,
685                     "dma mapping resulted in different sg counts, "
686                     "request_sg_cnt: %x dma_request_sg_cnt: %x "
687                     "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
688                     bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
689                     bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
690                 rval = -EAGAIN;
691                 goto done_unmap_sg;
692         }
693         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
694         req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
695                 &req_data_dma, GFP_KERNEL);
696         if (!req_data) {
697                 ql_log(ql_log_warn, vha, 0x701d,
698                     "dma alloc failed for req_data.\n");
699                 rval = -ENOMEM;
700                 goto done_unmap_sg;
701         }
702
703         rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
704                 &rsp_data_dma, GFP_KERNEL);
705         if (!rsp_data) {
706                 ql_log(ql_log_warn, vha, 0x7004,
707                     "dma alloc failed for rsp_data.\n");
708                 rval = -ENOMEM;
709                 goto done_free_dma_req;
710         }
711
712         /* Copy the request buffer in req_data now */
713         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
714                 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
715
716         elreq.send_dma = req_data_dma;
717         elreq.rcv_dma = rsp_data_dma;
718         elreq.transfer_size = req_data_len;
719
720         elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
721
722         if ((ha->current_topology == ISP_CFG_F ||
723             (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
724             (IS_QLA81XX(ha) &&
725             le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
726             && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
727                 elreq.options == EXTERNAL_LOOPBACK) {
728                 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
729                 ql_dbg(ql_dbg_user, vha, 0x701e,
730                     "BSG request type: %s.\n", type);
731                 command_sent = INT_DEF_LB_ECHO_CMD;
732                 rval = qla2x00_echo_test(vha, &elreq, response);
733         } else {
734                 if (IS_QLA81XX(ha)) {
735                         memset(config, 0, sizeof(config));
736                         memset(new_config, 0, sizeof(new_config));
737                         if (qla81xx_get_port_config(vha, config)) {
738                                 ql_log(ql_log_warn, vha, 0x701f,
739                                     "Get port config failed.\n");
740                                 bsg_job->reply->reply_payload_rcv_len = 0;
741                                 bsg_job->reply->result = (DID_ERROR << 16);
742                                 rval = -EPERM;
743                                 goto done_free_dma_req;
744                         }
745
746                         if (elreq.options != EXTERNAL_LOOPBACK) {
747                                 ql_dbg(ql_dbg_user, vha, 0x7020,
748                                     "Internal: curent port config = %x\n",
749                                     config[0]);
750                                 if (qla81xx_set_internal_loopback(vha, config,
751                                         new_config)) {
752                                         ql_log(ql_log_warn, vha, 0x7024,
753                                             "Internal loopback failed.\n");
754                                         bsg_job->reply->reply_payload_rcv_len =
755                                                 0;
756                                         bsg_job->reply->result =
757                                                 (DID_ERROR << 16);
758                                         rval = -EPERM;
759                                         goto done_free_dma_req;
760                                 }
761                         } else {
762                                 /* For external loopback to work
763                                  * ensure internal loopback is disabled
764                                  */
765                                 if (qla81xx_reset_internal_loopback(vha,
766                                         config, 1)) {
767                                         bsg_job->reply->reply_payload_rcv_len =
768                                                 0;
769                                         bsg_job->reply->result =
770                                                 (DID_ERROR << 16);
771                                         rval = -EPERM;
772                                         goto done_free_dma_req;
773                                 }
774                         }
775
776                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
777                         ql_dbg(ql_dbg_user, vha, 0x7028,
778                             "BSG request type: %s.\n", type);
779
780                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
781                         rval = qla2x00_loopback_test(vha, &elreq, response);
782
783                         if (new_config[0]) {
784                                 /* Revert back to original port config
785                                  * Also clear internal loopback
786                                  */
787                                 qla81xx_reset_internal_loopback(vha,
788                                     new_config, 0);
789                         }
790
791                         if (response[0] == MBS_COMMAND_ERROR &&
792                                         response[1] == MBS_LB_RESET) {
793                                 ql_log(ql_log_warn, vha, 0x7029,
794                                     "MBX command error, Aborting ISP.\n");
795                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
796                                 qla2xxx_wake_dpc(vha);
797                                 qla2x00_wait_for_chip_reset(vha);
798                                 /* Also reset the MPI */
799                                 if (qla81xx_restart_mpi_firmware(vha) !=
800                                     QLA_SUCCESS) {
801                                         ql_log(ql_log_warn, vha, 0x702a,
802                                             "MPI reset failed.\n");
803                                 }
804
805                                 bsg_job->reply->reply_payload_rcv_len = 0;
806                                 bsg_job->reply->result = (DID_ERROR << 16);
807                                 rval = -EIO;
808                                 goto done_free_dma_req;
809                         }
810                 } else {
811                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
812                         ql_dbg(ql_dbg_user, vha, 0x702b,
813                             "BSG request type: %s.\n", type);
814                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
815                         rval = qla2x00_loopback_test(vha, &elreq, response);
816                 }
817         }
818
819         if (rval) {
820                 ql_log(ql_log_warn, vha, 0x702c,
821                     "Vendor request %s failed.\n", type);
822
823                 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
824                     sizeof(struct fc_bsg_reply);
825
826                 memcpy(fw_sts_ptr, response, sizeof(response));
827                 fw_sts_ptr += sizeof(response);
828                 *fw_sts_ptr = command_sent;
829                 rval = 0;
830                 bsg_job->reply->reply_payload_rcv_len = 0;
831                 bsg_job->reply->result = (DID_ERROR << 16);
832         } else {
833                 ql_dbg(ql_dbg_user, vha, 0x702d,
834                     "Vendor request %s completed.\n", type);
835
836                 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
837                         sizeof(response) + sizeof(uint8_t);
838                 bsg_job->reply->reply_payload_rcv_len =
839                         bsg_job->reply_payload.payload_len;
840                 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
841                         sizeof(struct fc_bsg_reply);
842                 memcpy(fw_sts_ptr, response, sizeof(response));
843                 fw_sts_ptr += sizeof(response);
844                 *fw_sts_ptr = command_sent;
845                 bsg_job->reply->result = DID_OK;
846                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
847                         bsg_job->reply_payload.sg_cnt, rsp_data,
848                         rsp_data_len);
849         }
850         bsg_job->job_done(bsg_job);
851
852         dma_free_coherent(&ha->pdev->dev, rsp_data_len,
853                 rsp_data, rsp_data_dma);
854 done_free_dma_req:
855         dma_free_coherent(&ha->pdev->dev, req_data_len,
856                 req_data, req_data_dma);
857 done_unmap_sg:
858         dma_unmap_sg(&ha->pdev->dev,
859             bsg_job->reply_payload.sg_list,
860             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
861 done_unmap_req_sg:
862         dma_unmap_sg(&ha->pdev->dev,
863             bsg_job->request_payload.sg_list,
864             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
865         return rval;
866 }
867
868 static int
869 qla84xx_reset(struct fc_bsg_job *bsg_job)
870 {
871         struct Scsi_Host *host = bsg_job->shost;
872         scsi_qla_host_t *vha = shost_priv(host);
873         struct qla_hw_data *ha = vha->hw;
874         int rval = 0;
875         uint32_t flag;
876
877         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
878             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
879             test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
880                 ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n");
881                 return -EBUSY;
882         }
883
884         if (!IS_QLA84XX(ha)) {
885                 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
886                 return -EINVAL;
887         }
888
889         flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
890
891         rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
892
893         if (rval) {
894                 ql_log(ql_log_warn, vha, 0x7030,
895                     "Vendor request 84xx reset failed.\n");
896                 rval = bsg_job->reply->reply_payload_rcv_len = 0;
897                 bsg_job->reply->result = (DID_ERROR << 16);
898
899         } else {
900                 ql_dbg(ql_dbg_user, vha, 0x7031,
901                     "Vendor request 84xx reset completed.\n");
902                 bsg_job->reply->result = DID_OK;
903         }
904
905         bsg_job->job_done(bsg_job);
906         return rval;
907 }
908
909 static int
910 qla84xx_updatefw(struct fc_bsg_job *bsg_job)
911 {
912         struct Scsi_Host *host = bsg_job->shost;
913         scsi_qla_host_t *vha = shost_priv(host);
914         struct qla_hw_data *ha = vha->hw;
915         struct verify_chip_entry_84xx *mn = NULL;
916         dma_addr_t mn_dma, fw_dma;
917         void *fw_buf = NULL;
918         int rval = 0;
919         uint32_t sg_cnt;
920         uint32_t data_len;
921         uint16_t options;
922         uint32_t flag;
923         uint32_t fw_ver;
924
925         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
926                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
927                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
928                 return -EBUSY;
929
930         if (!IS_QLA84XX(ha)) {
931                 ql_dbg(ql_dbg_user, vha, 0x7032,
932                     "Not 84xx, exiting.\n");
933                 return -EINVAL;
934         }
935
936         sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
937                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
938         if (!sg_cnt) {
939                 ql_log(ql_log_warn, vha, 0x7033,
940                     "dma_map_sg returned %d for request.\n", sg_cnt);
941                 return -ENOMEM;
942         }
943
944         if (sg_cnt != bsg_job->request_payload.sg_cnt) {
945                 ql_log(ql_log_warn, vha, 0x7034,
946                     "DMA mapping resulted in different sg counts, "
947                     "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
948                     bsg_job->request_payload.sg_cnt, sg_cnt);
949                 rval = -EAGAIN;
950                 goto done_unmap_sg;
951         }
952
953         data_len = bsg_job->request_payload.payload_len;
954         fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
955                 &fw_dma, GFP_KERNEL);
956         if (!fw_buf) {
957                 ql_log(ql_log_warn, vha, 0x7035,
958                     "DMA alloc failed for fw_buf.\n");
959                 rval = -ENOMEM;
960                 goto done_unmap_sg;
961         }
962
963         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
964                 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
965
966         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
967         if (!mn) {
968                 ql_log(ql_log_warn, vha, 0x7036,
969                     "DMA alloc failed for fw buffer.\n");
970                 rval = -ENOMEM;
971                 goto done_free_fw_buf;
972         }
973
974         flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
975         fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
976
977         memset(mn, 0, sizeof(struct access_chip_84xx));
978         mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
979         mn->entry_count = 1;
980
981         options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
982         if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
983                 options |= VCO_DIAG_FW;
984
985         mn->options = cpu_to_le16(options);
986         mn->fw_ver =  cpu_to_le32(fw_ver);
987         mn->fw_size =  cpu_to_le32(data_len);
988         mn->fw_seq_size =  cpu_to_le32(data_len);
989         mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
990         mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
991         mn->dseg_length = cpu_to_le32(data_len);
992         mn->data_seg_cnt = cpu_to_le16(1);
993
994         rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
995
996         if (rval) {
997                 ql_log(ql_log_warn, vha, 0x7037,
998                     "Vendor request 84xx updatefw failed.\n");
999
1000                 rval = bsg_job->reply->reply_payload_rcv_len = 0;
1001                 bsg_job->reply->result = (DID_ERROR << 16);
1002
1003         } else {
1004                 ql_dbg(ql_dbg_user, vha, 0x7038,
1005                     "Vendor request 84xx updatefw completed.\n");
1006
1007                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1008                 bsg_job->reply->result = DID_OK;
1009         }
1010
1011         bsg_job->job_done(bsg_job);
1012         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1013
1014 done_free_fw_buf:
1015         dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1016
1017 done_unmap_sg:
1018         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1019                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1020
1021         return rval;
1022 }
1023
1024 static int
1025 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1026 {
1027         struct Scsi_Host *host = bsg_job->shost;
1028         scsi_qla_host_t *vha = shost_priv(host);
1029         struct qla_hw_data *ha = vha->hw;
1030         struct access_chip_84xx *mn = NULL;
1031         dma_addr_t mn_dma, mgmt_dma;
1032         void *mgmt_b = NULL;
1033         int rval = 0;
1034         struct qla_bsg_a84_mgmt *ql84_mgmt;
1035         uint32_t sg_cnt;
1036         uint32_t data_len = 0;
1037         uint32_t dma_direction = DMA_NONE;
1038
1039         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1040                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1041                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1042                 ql_log(ql_log_warn, vha, 0x7039,
1043                     "Abort active or needed.\n");
1044                 return -EBUSY;
1045         }
1046
1047         if (!IS_QLA84XX(ha)) {
1048                 ql_log(ql_log_warn, vha, 0x703a,
1049                     "Not 84xx, exiting.\n");
1050                 return -EINVAL;
1051         }
1052
1053         ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1054                 sizeof(struct fc_bsg_request));
1055         if (!ql84_mgmt) {
1056                 ql_log(ql_log_warn, vha, 0x703b,
1057                     "MGMT header not provided, exiting.\n");
1058                 return -EINVAL;
1059         }
1060
1061         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1062         if (!mn) {
1063                 ql_log(ql_log_warn, vha, 0x703c,
1064                     "DMA alloc failed for fw buffer.\n");
1065                 return -ENOMEM;
1066         }
1067
1068         memset(mn, 0, sizeof(struct access_chip_84xx));
1069         mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1070         mn->entry_count = 1;
1071
1072         switch (ql84_mgmt->mgmt.cmd) {
1073         case QLA84_MGMT_READ_MEM:
1074         case QLA84_MGMT_GET_INFO:
1075                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1076                         bsg_job->reply_payload.sg_list,
1077                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1078                 if (!sg_cnt) {
1079                         ql_log(ql_log_warn, vha, 0x703d,
1080                             "dma_map_sg returned %d for reply.\n", sg_cnt);
1081                         rval = -ENOMEM;
1082                         goto exit_mgmt;
1083                 }
1084
1085                 dma_direction = DMA_FROM_DEVICE;
1086
1087                 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1088                         ql_log(ql_log_warn, vha, 0x703e,
1089                             "DMA mapping resulted in different sg counts, "
1090                             "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1091                             bsg_job->reply_payload.sg_cnt, sg_cnt);
1092                         rval = -EAGAIN;
1093                         goto done_unmap_sg;
1094                 }
1095
1096                 data_len = bsg_job->reply_payload.payload_len;
1097
1098                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1099                     &mgmt_dma, GFP_KERNEL);
1100                 if (!mgmt_b) {
1101                         ql_log(ql_log_warn, vha, 0x703f,
1102                             "DMA alloc failed for mgmt_b.\n");
1103                         rval = -ENOMEM;
1104                         goto done_unmap_sg;
1105                 }
1106
1107                 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1108                         mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1109                         mn->parameter1 =
1110                                 cpu_to_le32(
1111                                 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1112
1113                 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1114                         mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1115                         mn->parameter1 =
1116                                 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1117
1118                         mn->parameter2 =
1119                                 cpu_to_le32(
1120                                 ql84_mgmt->mgmt.mgmtp.u.info.context);
1121                 }
1122                 break;
1123
1124         case QLA84_MGMT_WRITE_MEM:
1125                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1126                         bsg_job->request_payload.sg_list,
1127                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1128
1129                 if (!sg_cnt) {
1130                         ql_log(ql_log_warn, vha, 0x7040,
1131                             "dma_map_sg returned %d.\n", sg_cnt);
1132                         rval = -ENOMEM;
1133                         goto exit_mgmt;
1134                 }
1135
1136                 dma_direction = DMA_TO_DEVICE;
1137
1138                 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1139                         ql_log(ql_log_warn, vha, 0x7041,
1140                             "DMA mapping resulted in different sg counts, "
1141                             "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1142                             bsg_job->request_payload.sg_cnt, sg_cnt);
1143                         rval = -EAGAIN;
1144                         goto done_unmap_sg;
1145                 }
1146
1147                 data_len = bsg_job->request_payload.payload_len;
1148                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1149                         &mgmt_dma, GFP_KERNEL);
1150                 if (!mgmt_b) {
1151                         ql_log(ql_log_warn, vha, 0x7042,
1152                             "DMA alloc failed for mgmt_b.\n");
1153                         rval = -ENOMEM;
1154                         goto done_unmap_sg;
1155                 }
1156
1157                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1158                         bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1159
1160                 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1161                 mn->parameter1 =
1162                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1163                 break;
1164
1165         case QLA84_MGMT_CHNG_CONFIG:
1166                 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1167                 mn->parameter1 =
1168                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1169
1170                 mn->parameter2 =
1171                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1172
1173                 mn->parameter3 =
1174                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1175                 break;
1176
1177         default:
1178                 rval = -EIO;
1179                 goto exit_mgmt;
1180         }
1181
1182         if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1183                 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1184                 mn->dseg_count = cpu_to_le16(1);
1185                 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1186                 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1187                 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1188         }
1189
1190         rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1191
1192         if (rval) {
1193                 ql_log(ql_log_warn, vha, 0x7043,
1194                     "Vendor request 84xx mgmt failed.\n");
1195
1196                 rval = bsg_job->reply->reply_payload_rcv_len = 0;
1197                 bsg_job->reply->result = (DID_ERROR << 16);
1198
1199         } else {
1200                 ql_dbg(ql_dbg_user, vha, 0x7044,
1201                     "Vendor request 84xx mgmt completed.\n");
1202
1203                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1204                 bsg_job->reply->result = DID_OK;
1205
1206                 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1207                         (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1208                         bsg_job->reply->reply_payload_rcv_len =
1209                                 bsg_job->reply_payload.payload_len;
1210
1211                         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1212                                 bsg_job->reply_payload.sg_cnt, mgmt_b,
1213                                 data_len);
1214                 }
1215         }
1216
1217         bsg_job->job_done(bsg_job);
1218
1219 done_unmap_sg:
1220         if (mgmt_b)
1221                 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1222
1223         if (dma_direction == DMA_TO_DEVICE)
1224                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1225                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1226         else if (dma_direction == DMA_FROM_DEVICE)
1227                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1228                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1229
1230 exit_mgmt:
1231         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1232
1233         return rval;
1234 }
1235
1236 static int
1237 qla24xx_iidma(struct fc_bsg_job *bsg_job)
1238 {
1239         struct Scsi_Host *host = bsg_job->shost;
1240         scsi_qla_host_t *vha = shost_priv(host);
1241         int rval = 0;
1242         struct qla_port_param *port_param = NULL;
1243         fc_port_t *fcport = NULL;
1244         uint16_t mb[MAILBOX_REGISTER_COUNT];
1245         uint8_t *rsp_ptr = NULL;
1246
1247         bsg_job->reply->reply_payload_rcv_len = 0;
1248
1249         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1250                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1251                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1252                 ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n");
1253                 return -EBUSY;
1254         }
1255
1256         if (!IS_IIDMA_CAPABLE(vha->hw)) {
1257                 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1258                 return -EINVAL;
1259         }
1260
1261         port_param = (struct qla_port_param *)((char *)bsg_job->request +
1262                 sizeof(struct fc_bsg_request));
1263         if (!port_param) {
1264                 ql_log(ql_log_warn, vha, 0x7047,
1265                     "port_param header not provided.\n");
1266                 return -EINVAL;
1267         }
1268
1269         if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1270                 ql_log(ql_log_warn, vha, 0x7048,
1271                     "Invalid destination type.\n");
1272                 return -EINVAL;
1273         }
1274
1275         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1276                 if (fcport->port_type != FCT_TARGET)
1277                         continue;
1278
1279                 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1280                         fcport->port_name, sizeof(fcport->port_name)))
1281                         continue;
1282                 break;
1283         }
1284
1285         if (!fcport) {
1286                 ql_log(ql_log_warn, vha, 0x7049,
1287                     "Failed to find port.\n");
1288                 return -EINVAL;
1289         }
1290
1291         if (atomic_read(&fcport->state) != FCS_ONLINE) {
1292                 ql_log(ql_log_warn, vha, 0x704a,
1293                     "Port is not online.\n");
1294                 return -EINVAL;
1295         }
1296
1297         if (fcport->flags & FCF_LOGIN_NEEDED) {
1298                 ql_log(ql_log_warn, vha, 0x704b,
1299                     "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1300                 return -EINVAL;
1301         }
1302
1303         if (port_param->mode)
1304                 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1305                         port_param->speed, mb);
1306         else
1307                 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1308                         &port_param->speed, mb);
1309
1310         if (rval) {
1311                 ql_log(ql_log_warn, vha, 0x704c,
1312                     "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1313                     "%04x %x %04x %04x.\n", fcport->port_name[0],
1314                     fcport->port_name[1], fcport->port_name[2],
1315                     fcport->port_name[3], fcport->port_name[4],
1316                     fcport->port_name[5], fcport->port_name[6],
1317                     fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1318                 rval = 0;
1319                 bsg_job->reply->result = (DID_ERROR << 16);
1320
1321         } else {
1322                 if (!port_param->mode) {
1323                         bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1324                                 sizeof(struct qla_port_param);
1325
1326                         rsp_ptr = ((uint8_t *)bsg_job->reply) +
1327                                 sizeof(struct fc_bsg_reply);
1328
1329                         memcpy(rsp_ptr, port_param,
1330                                 sizeof(struct qla_port_param));
1331                 }
1332
1333                 bsg_job->reply->result = DID_OK;
1334         }
1335
1336         bsg_job->job_done(bsg_job);
1337         return rval;
1338 }
1339
1340 static int
1341 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1342         uint8_t is_update)
1343 {
1344         uint32_t start = 0;
1345         int valid = 0;
1346         struct qla_hw_data *ha = vha->hw;
1347
1348         bsg_job->reply->reply_payload_rcv_len = 0;
1349
1350         if (unlikely(pci_channel_offline(ha->pdev)))
1351                 return -EINVAL;
1352
1353         start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1354         if (start > ha->optrom_size) {
1355                 ql_log(ql_log_warn, vha, 0x7055,
1356                     "start %d > optrom_size %d.\n", start, ha->optrom_size);
1357                 return -EINVAL;
1358         }
1359
1360         if (ha->optrom_state != QLA_SWAITING) {
1361                 ql_log(ql_log_info, vha, 0x7056,
1362                     "optrom_state %d.\n", ha->optrom_state);
1363                 return -EBUSY;
1364         }
1365
1366         ha->optrom_region_start = start;
1367         ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1368         if (is_update) {
1369                 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1370                         valid = 1;
1371                 else if (start == (ha->flt_region_boot * 4) ||
1372                     start == (ha->flt_region_fw * 4))
1373                         valid = 1;
1374                 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1375                     IS_QLA8XXX_TYPE(ha))
1376                         valid = 1;
1377                 if (!valid) {
1378                         ql_log(ql_log_warn, vha, 0x7058,
1379                             "Invalid start region 0x%x/0x%x.\n", start,
1380                             bsg_job->request_payload.payload_len);
1381                         return -EINVAL;
1382                 }
1383
1384                 ha->optrom_region_size = start +
1385                     bsg_job->request_payload.payload_len > ha->optrom_size ?
1386                     ha->optrom_size - start :
1387                     bsg_job->request_payload.payload_len;
1388                 ha->optrom_state = QLA_SWRITING;
1389         } else {
1390                 ha->optrom_region_size = start +
1391                     bsg_job->reply_payload.payload_len > ha->optrom_size ?
1392                     ha->optrom_size - start :
1393                     bsg_job->reply_payload.payload_len;
1394                 ha->optrom_state = QLA_SREADING;
1395         }
1396
1397         ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1398         if (!ha->optrom_buffer) {
1399                 ql_log(ql_log_warn, vha, 0x7059,
1400                     "Read: Unable to allocate memory for optrom retrieval "
1401                     "(%x)\n", ha->optrom_region_size);
1402
1403                 ha->optrom_state = QLA_SWAITING;
1404                 return -ENOMEM;
1405         }
1406
1407         memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1408         return 0;
1409 }
1410
1411 static int
1412 qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1413 {
1414         struct Scsi_Host *host = bsg_job->shost;
1415         scsi_qla_host_t *vha = shost_priv(host);
1416         struct qla_hw_data *ha = vha->hw;
1417         int rval = 0;
1418
1419         rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1420         if (rval)
1421                 return rval;
1422
1423         ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1424             ha->optrom_region_start, ha->optrom_region_size);
1425
1426         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1427             bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1428             ha->optrom_region_size);
1429
1430         bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1431         bsg_job->reply->result = DID_OK;
1432         vfree(ha->optrom_buffer);
1433         ha->optrom_buffer = NULL;
1434         ha->optrom_state = QLA_SWAITING;
1435         bsg_job->job_done(bsg_job);
1436         return rval;
1437 }
1438
1439 static int
1440 qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1441 {
1442         struct Scsi_Host *host = bsg_job->shost;
1443         scsi_qla_host_t *vha = shost_priv(host);
1444         struct qla_hw_data *ha = vha->hw;
1445         int rval = 0;
1446
1447         rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1448         if (rval)
1449                 return rval;
1450
1451         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1452             bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1453             ha->optrom_region_size);
1454
1455         ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1456             ha->optrom_region_start, ha->optrom_region_size);
1457
1458         bsg_job->reply->result = DID_OK;
1459         vfree(ha->optrom_buffer);
1460         ha->optrom_buffer = NULL;
1461         ha->optrom_state = QLA_SWAITING;
1462         bsg_job->job_done(bsg_job);
1463         return rval;
1464 }
1465
1466 static int
1467 qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1468 {
1469         struct Scsi_Host *host = bsg_job->shost;
1470         scsi_qla_host_t *vha = shost_priv(host);
1471         struct qla_hw_data *ha = vha->hw;
1472         int rval = 0;
1473         uint8_t bsg[DMA_POOL_SIZE];
1474         struct qla_image_version_list *list = (void *)bsg;
1475         struct qla_image_version *image;
1476         uint32_t count;
1477         dma_addr_t sfp_dma;
1478         void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1479         if (!sfp) {
1480                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1481                     EXT_STATUS_NO_MEMORY;
1482                 goto done;
1483         }
1484
1485         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1486             bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1487
1488         image = list->version;
1489         count = list->count;
1490         while (count--) {
1491                 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1492                 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1493                     image->field_address.device, image->field_address.offset,
1494                     sizeof(image->field_info), image->field_address.option);
1495                 if (rval) {
1496                         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1497                             EXT_STATUS_MAILBOX;
1498                         goto dealloc;
1499                 }
1500                 image++;
1501         }
1502
1503         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1504
1505 dealloc:
1506         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1507
1508 done:
1509         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1510         bsg_job->reply->result = DID_OK << 16;
1511         bsg_job->job_done(bsg_job);
1512
1513         return 0;
1514 }
1515
1516 static int
1517 qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1518 {
1519         struct Scsi_Host *host = bsg_job->shost;
1520         scsi_qla_host_t *vha = shost_priv(host);
1521         struct qla_hw_data *ha = vha->hw;
1522         int rval = 0;
1523         uint8_t bsg[DMA_POOL_SIZE];
1524         struct qla_status_reg *sr = (void *)bsg;
1525         dma_addr_t sfp_dma;
1526         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1527         if (!sfp) {
1528                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1529                     EXT_STATUS_NO_MEMORY;
1530                 goto done;
1531         }
1532
1533         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1534             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1535
1536         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1537             sr->field_address.device, sr->field_address.offset,
1538             sizeof(sr->status_reg), sr->field_address.option);
1539         sr->status_reg = *sfp;
1540
1541         if (rval) {
1542                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1543                     EXT_STATUS_MAILBOX;
1544                 goto dealloc;
1545         }
1546
1547         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1548             bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1549
1550         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1551
1552 dealloc:
1553         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1554
1555 done:
1556         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1557         bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
1558         bsg_job->reply->result = DID_OK << 16;
1559         bsg_job->job_done(bsg_job);
1560
1561         return 0;
1562 }
1563
1564 static int
1565 qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1566 {
1567         struct Scsi_Host *host = bsg_job->shost;
1568         scsi_qla_host_t *vha = shost_priv(host);
1569         struct qla_hw_data *ha = vha->hw;
1570         int rval = 0;
1571         uint8_t bsg[DMA_POOL_SIZE];
1572         struct qla_status_reg *sr = (void *)bsg;
1573         dma_addr_t sfp_dma;
1574         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1575         if (!sfp) {
1576                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1577                     EXT_STATUS_NO_MEMORY;
1578                 goto done;
1579         }
1580
1581         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1582             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1583
1584         *sfp = sr->status_reg;
1585         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1586             sr->field_address.device, sr->field_address.offset,
1587             sizeof(sr->status_reg), sr->field_address.option);
1588
1589         if (rval) {
1590                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1591                     EXT_STATUS_MAILBOX;
1592                 goto dealloc;
1593         }
1594
1595         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1596
1597 dealloc:
1598         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1599
1600 done:
1601         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1602         bsg_job->reply->result = DID_OK << 16;
1603         bsg_job->job_done(bsg_job);
1604
1605         return 0;
1606 }
1607
1608 static int
1609 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1610 {
1611         switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1612         case QL_VND_LOOPBACK:
1613                 return qla2x00_process_loopback(bsg_job);
1614
1615         case QL_VND_A84_RESET:
1616                 return qla84xx_reset(bsg_job);
1617
1618         case QL_VND_A84_UPDATE_FW:
1619                 return qla84xx_updatefw(bsg_job);
1620
1621         case QL_VND_A84_MGMT_CMD:
1622                 return qla84xx_mgmt_cmd(bsg_job);
1623
1624         case QL_VND_IIDMA:
1625                 return qla24xx_iidma(bsg_job);
1626
1627         case QL_VND_FCP_PRIO_CFG_CMD:
1628                 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1629
1630         case QL_VND_READ_FLASH:
1631                 return qla2x00_read_optrom(bsg_job);
1632
1633         case QL_VND_UPDATE_FLASH:
1634                 return qla2x00_update_optrom(bsg_job);
1635
1636         case QL_VND_SET_FRU_VERSION:
1637                 return qla2x00_update_fru_versions(bsg_job);
1638
1639         case QL_VND_READ_FRU_STATUS:
1640                 return qla2x00_read_fru_status(bsg_job);
1641
1642         case QL_VND_WRITE_FRU_STATUS:
1643                 return qla2x00_write_fru_status(bsg_job);
1644
1645         default:
1646                 bsg_job->reply->result = (DID_ERROR << 16);
1647                 bsg_job->job_done(bsg_job);
1648                 return -ENOSYS;
1649         }
1650 }
1651
1652 int
1653 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1654 {
1655         int ret = -EINVAL;
1656         struct fc_rport *rport;
1657         fc_port_t *fcport = NULL;
1658         struct Scsi_Host *host;
1659         scsi_qla_host_t *vha;
1660
1661         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1662                 rport = bsg_job->rport;
1663                 fcport = *(fc_port_t **) rport->dd_data;
1664                 host = rport_to_shost(rport);
1665                 vha = shost_priv(host);
1666         } else {
1667                 host = bsg_job->shost;
1668                 vha = shost_priv(host);
1669         }
1670
1671         ql_dbg(ql_dbg_user, vha, 0x7000,
1672             "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
1673
1674         switch (bsg_job->request->msgcode) {
1675         case FC_BSG_RPT_ELS:
1676         case FC_BSG_HST_ELS_NOLOGIN:
1677                 ret = qla2x00_process_els(bsg_job);
1678                 break;
1679         case FC_BSG_HST_CT:
1680                 ret = qla2x00_process_ct(bsg_job);
1681                 break;
1682         case FC_BSG_HST_VENDOR:
1683                 ret = qla2x00_process_vendor_specific(bsg_job);
1684                 break;
1685         case FC_BSG_HST_ADD_RPORT:
1686         case FC_BSG_HST_DEL_RPORT:
1687         case FC_BSG_RPT_CT:
1688         default:
1689                 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
1690                 break;
1691         }
1692         return ret;
1693 }
1694
1695 int
1696 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1697 {
1698         scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1699         struct qla_hw_data *ha = vha->hw;
1700         srb_t *sp;
1701         int cnt, que;
1702         unsigned long flags;
1703         struct req_que *req;
1704         struct srb_ctx *sp_bsg;
1705
1706         /* find the bsg job from the active list of commands */
1707         spin_lock_irqsave(&ha->hardware_lock, flags);
1708         for (que = 0; que < ha->max_req_queues; que++) {
1709                 req = ha->req_q_map[que];
1710                 if (!req)
1711                         continue;
1712
1713                 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1714                         sp = req->outstanding_cmds[cnt];
1715                         if (sp) {
1716                                 sp_bsg = sp->ctx;
1717
1718                                 if (((sp_bsg->type == SRB_CT_CMD) ||
1719                                         (sp_bsg->type == SRB_ELS_CMD_HST))
1720                                         && (sp_bsg->u.bsg_job == bsg_job)) {
1721                                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1722                                         if (ha->isp_ops->abort_command(sp)) {
1723                                                 ql_log(ql_log_warn, vha, 0x7089,
1724                                                     "mbx abort_command "
1725                                                     "failed.\n");
1726                                                 bsg_job->req->errors =
1727                                                 bsg_job->reply->result = -EIO;
1728                                         } else {
1729                                                 ql_dbg(ql_dbg_user, vha, 0x708a,
1730                                                     "mbx abort_command "
1731                                                     "success.\n");
1732                                                 bsg_job->req->errors =
1733                                                 bsg_job->reply->result = 0;
1734                                         }
1735                                         spin_lock_irqsave(&ha->hardware_lock, flags);
1736                                         goto done;
1737                                 }
1738                         }
1739                 }
1740         }
1741         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1742         ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
1743         bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1744         return 0;
1745
1746 done:
1747         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1748         if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1749                 kfree(sp->fcport);
1750         kfree(sp->ctx);
1751         mempool_free(sp, ha->srb_mempool);
1752         return 0;
1753 }