mpt3sas: Ported WarpDrive product SSS6200 support
[cascardo/linux.git] / drivers / scsi / mpt3sas / mpt3sas_base.c
1 /*
2  * This is the Fusion MPT base driver providing common API layer interface
3  * for access to MPT (Message Passing Technology) firmware.
4  *
5  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6  * Copyright (C) 2012-2014  LSI Corporation
7  * Copyright (C) 2013-2014 Avago Technologies
8  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * NO WARRANTY
21  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25  * solely responsible for determining the appropriateness of using and
26  * distributing the Program and assumes all risks associated with its
27  * exercise of rights under this Agreement, including but not limited to
28  * the risks and costs of program errors, damage to or loss of data,
29  * programs or equipment, and unavailability or interruption of operations.
30
31  * DISCLAIMER OF LIABILITY
32  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40  * You should have received a copy of the GNU General Public License
41  * along with this program; if not, write to the Free Software
42  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
43  * USA.
44  */
45
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/kdev_t.h>
54 #include <linux/blkdev.h>
55 #include <linux/delay.h>
56 #include <linux/interrupt.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/io.h>
59 #include <linux/time.h>
60 #include <linux/kthread.h>
61 #include <linux/aer.h>
62
63
64 #include "mpt3sas_base.h"
65
66 static MPT_CALLBACK     mpt_callbacks[MPT_MAX_CALLBACKS];
67
68
69 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
70
71  /* maximum controller queue depth */
72 #define MAX_HBA_QUEUE_DEPTH     30000
73 #define MAX_CHAIN_DEPTH         100000
74 static int max_queue_depth = -1;
75 module_param(max_queue_depth, int, 0);
76 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
77
78 static int max_sgl_entries = -1;
79 module_param(max_sgl_entries, int, 0);
80 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
81
82 static int msix_disable = -1;
83 module_param(msix_disable, int, 0);
84 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
85
86 static int max_msix_vectors = -1;
87 module_param(max_msix_vectors, int, 0);
88 MODULE_PARM_DESC(max_msix_vectors,
89         " max msix vectors");
90
91 static int mpt3sas_fwfault_debug;
92 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
93         " enable detection of firmware fault and halt firmware - (default=0)");
94
95 static int
96 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
97
98 /**
99  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
100  *
101  */
102 static int
103 _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
104 {
105         int ret = param_set_int(val, kp);
106         struct MPT3SAS_ADAPTER *ioc;
107
108         if (ret)
109                 return ret;
110
111         pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
112         list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
113                 ioc->fwfault_debug = mpt3sas_fwfault_debug;
114         return 0;
115 }
116 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
117         param_get_int, &mpt3sas_fwfault_debug, 0644);
118
119 /**
120  *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
121  * @arg: input argument, used to derive ioc
122  *
123  * Return 0 if controller is removed from pci subsystem.
124  * Return -1 for other case.
125  */
126 static int mpt3sas_remove_dead_ioc_func(void *arg)
127 {
128         struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
129         struct pci_dev *pdev;
130
131         if ((ioc == NULL))
132                 return -1;
133
134         pdev = ioc->pdev;
135         if ((pdev == NULL))
136                 return -1;
137         pci_stop_and_remove_bus_device_locked(pdev);
138         return 0;
139 }
140
141 /**
142  * _base_fault_reset_work - workq handling ioc fault conditions
143  * @work: input argument, used to derive ioc
144  * Context: sleep.
145  *
146  * Return nothing.
147  */
148 static void
149 _base_fault_reset_work(struct work_struct *work)
150 {
151         struct MPT3SAS_ADAPTER *ioc =
152             container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
153         unsigned long    flags;
154         u32 doorbell;
155         int rc;
156         struct task_struct *p;
157
158
159         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
160         if (ioc->shost_recovery || ioc->pci_error_recovery)
161                 goto rearm_timer;
162         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
163
164         doorbell = mpt3sas_base_get_iocstate(ioc, 0);
165         if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
166                 pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
167                     ioc->name);
168
169                 /* It may be possible that EEH recovery can resolve some of
170                  * pci bus failure issues rather removing the dead ioc function
171                  * by considering controller is in a non-operational state. So
172                  * here priority is given to the EEH recovery. If it doesn't
173                  * not resolve this issue, mpt3sas driver will consider this
174                  * controller to non-operational state and remove the dead ioc
175                  * function.
176                  */
177                 if (ioc->non_operational_loop++ < 5) {
178                         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
179                                                          flags);
180                         goto rearm_timer;
181                 }
182
183                 /*
184                  * Call _scsih_flush_pending_cmds callback so that we flush all
185                  * pending commands back to OS. This call is required to aovid
186                  * deadlock at block layer. Dead IOC will fail to do diag reset,
187                  * and this call is safe since dead ioc will never return any
188                  * command back from HW.
189                  */
190                 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
191                 /*
192                  * Set remove_host flag early since kernel thread will
193                  * take some time to execute.
194                  */
195                 ioc->remove_host = 1;
196                 /*Remove the Dead Host */
197                 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
198                     "mpt3sas_dead_ioc_%d", ioc->id);
199                 if (IS_ERR(p))
200                         pr_err(MPT3SAS_FMT
201                         "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
202                         ioc->name, __func__);
203                 else
204                         pr_err(MPT3SAS_FMT
205                         "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
206                         ioc->name, __func__);
207                 return; /* don't rearm timer */
208         }
209
210         ioc->non_operational_loop = 0;
211
212         if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
213                 rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
214                     FORCE_BIG_HAMMER);
215                 pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
216                     __func__, (rc == 0) ? "success" : "failed");
217                 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
218                 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
219                         mpt3sas_base_fault_info(ioc, doorbell &
220                             MPI2_DOORBELL_DATA_MASK);
221                 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
222                     MPI2_IOC_STATE_OPERATIONAL)
223                         return; /* don't rearm timer */
224         }
225
226         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
227  rearm_timer:
228         if (ioc->fault_reset_work_q)
229                 queue_delayed_work(ioc->fault_reset_work_q,
230                     &ioc->fault_reset_work,
231                     msecs_to_jiffies(FAULT_POLLING_INTERVAL));
232         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
233 }
234
235 /**
236  * mpt3sas_base_start_watchdog - start the fault_reset_work_q
237  * @ioc: per adapter object
238  * Context: sleep.
239  *
240  * Return nothing.
241  */
242 void
243 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
244 {
245         unsigned long    flags;
246
247         if (ioc->fault_reset_work_q)
248                 return;
249
250         /* initialize fault polling */
251
252         INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
253         snprintf(ioc->fault_reset_work_q_name,
254             sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
255         ioc->fault_reset_work_q =
256                 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
257         if (!ioc->fault_reset_work_q) {
258                 pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
259                     ioc->name, __func__, __LINE__);
260                         return;
261         }
262         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
263         if (ioc->fault_reset_work_q)
264                 queue_delayed_work(ioc->fault_reset_work_q,
265                     &ioc->fault_reset_work,
266                     msecs_to_jiffies(FAULT_POLLING_INTERVAL));
267         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
268 }
269
270 /**
271  * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
272  * @ioc: per adapter object
273  * Context: sleep.
274  *
275  * Return nothing.
276  */
277 void
278 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
279 {
280         unsigned long flags;
281         struct workqueue_struct *wq;
282
283         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
284         wq = ioc->fault_reset_work_q;
285         ioc->fault_reset_work_q = NULL;
286         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
287         if (wq) {
288                 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
289                         flush_workqueue(wq);
290                 destroy_workqueue(wq);
291         }
292 }
293
294 /**
295  * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
296  * @ioc: per adapter object
297  * @fault_code: fault code
298  *
299  * Return nothing.
300  */
301 void
302 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
303 {
304         pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
305             ioc->name, fault_code);
306 }
307
308 /**
309  * mpt3sas_halt_firmware - halt's mpt controller firmware
310  * @ioc: per adapter object
311  *
312  * For debugging timeout related issues.  Writing 0xCOFFEE00
313  * to the doorbell register will halt controller firmware. With
314  * the purpose to stop both driver and firmware, the enduser can
315  * obtain a ring buffer from controller UART.
316  */
317 void
318 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
319 {
320         u32 doorbell;
321
322         if (!ioc->fwfault_debug)
323                 return;
324
325         dump_stack();
326
327         doorbell = readl(&ioc->chip->Doorbell);
328         if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
329                 mpt3sas_base_fault_info(ioc , doorbell);
330         else {
331                 writel(0xC0FFEE00, &ioc->chip->Doorbell);
332                 pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
333                         ioc->name);
334         }
335
336         if (ioc->fwfault_debug == 2)
337                 for (;;)
338                         ;
339         else
340                 panic("panic in %s\n", __func__);
341 }
342
343 /**
344  * _base_sas_ioc_info - verbose translation of the ioc status
345  * @ioc: per adapter object
346  * @mpi_reply: reply mf payload returned from firmware
347  * @request_hdr: request mf
348  *
349  * Return nothing.
350  */
351 static void
352 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
353         MPI2RequestHeader_t *request_hdr)
354 {
355         u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
356             MPI2_IOCSTATUS_MASK;
357         char *desc = NULL;
358         u16 frame_sz;
359         char *func_str = NULL;
360
361         /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
362         if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
363             request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
364             request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
365                 return;
366
367         if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
368                 return;
369
370         switch (ioc_status) {
371
372 /****************************************************************************
373 *  Common IOCStatus values for all replies
374 ****************************************************************************/
375
376         case MPI2_IOCSTATUS_INVALID_FUNCTION:
377                 desc = "invalid function";
378                 break;
379         case MPI2_IOCSTATUS_BUSY:
380                 desc = "busy";
381                 break;
382         case MPI2_IOCSTATUS_INVALID_SGL:
383                 desc = "invalid sgl";
384                 break;
385         case MPI2_IOCSTATUS_INTERNAL_ERROR:
386                 desc = "internal error";
387                 break;
388         case MPI2_IOCSTATUS_INVALID_VPID:
389                 desc = "invalid vpid";
390                 break;
391         case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
392                 desc = "insufficient resources";
393                 break;
394         case MPI2_IOCSTATUS_INVALID_FIELD:
395                 desc = "invalid field";
396                 break;
397         case MPI2_IOCSTATUS_INVALID_STATE:
398                 desc = "invalid state";
399                 break;
400         case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
401                 desc = "op state not supported";
402                 break;
403
404 /****************************************************************************
405 *  Config IOCStatus values
406 ****************************************************************************/
407
408         case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
409                 desc = "config invalid action";
410                 break;
411         case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
412                 desc = "config invalid type";
413                 break;
414         case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
415                 desc = "config invalid page";
416                 break;
417         case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
418                 desc = "config invalid data";
419                 break;
420         case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
421                 desc = "config no defaults";
422                 break;
423         case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
424                 desc = "config cant commit";
425                 break;
426
427 /****************************************************************************
428 *  SCSI IO Reply
429 ****************************************************************************/
430
431         case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
432         case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
433         case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
434         case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
435         case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
436         case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
437         case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
438         case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
439         case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
440         case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
441         case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
442         case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
443                 break;
444
445 /****************************************************************************
446 *  For use by SCSI Initiator and SCSI Target end-to-end data protection
447 ****************************************************************************/
448
449         case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
450                 desc = "eedp guard error";
451                 break;
452         case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
453                 desc = "eedp ref tag error";
454                 break;
455         case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
456                 desc = "eedp app tag error";
457                 break;
458
459 /****************************************************************************
460 *  SCSI Target values
461 ****************************************************************************/
462
463         case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
464                 desc = "target invalid io index";
465                 break;
466         case MPI2_IOCSTATUS_TARGET_ABORTED:
467                 desc = "target aborted";
468                 break;
469         case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
470                 desc = "target no conn retryable";
471                 break;
472         case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
473                 desc = "target no connection";
474                 break;
475         case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
476                 desc = "target xfer count mismatch";
477                 break;
478         case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
479                 desc = "target data offset error";
480                 break;
481         case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
482                 desc = "target too much write data";
483                 break;
484         case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
485                 desc = "target iu too short";
486                 break;
487         case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
488                 desc = "target ack nak timeout";
489                 break;
490         case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
491                 desc = "target nak received";
492                 break;
493
494 /****************************************************************************
495 *  Serial Attached SCSI values
496 ****************************************************************************/
497
498         case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
499                 desc = "smp request failed";
500                 break;
501         case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
502                 desc = "smp data overrun";
503                 break;
504
505 /****************************************************************************
506 *  Diagnostic Buffer Post / Diagnostic Release values
507 ****************************************************************************/
508
509         case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
510                 desc = "diagnostic released";
511                 break;
512         default:
513                 break;
514         }
515
516         if (!desc)
517                 return;
518
519         switch (request_hdr->Function) {
520         case MPI2_FUNCTION_CONFIG:
521                 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
522                 func_str = "config_page";
523                 break;
524         case MPI2_FUNCTION_SCSI_TASK_MGMT:
525                 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
526                 func_str = "task_mgmt";
527                 break;
528         case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
529                 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
530                 func_str = "sas_iounit_ctl";
531                 break;
532         case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
533                 frame_sz = sizeof(Mpi2SepRequest_t);
534                 func_str = "enclosure";
535                 break;
536         case MPI2_FUNCTION_IOC_INIT:
537                 frame_sz = sizeof(Mpi2IOCInitRequest_t);
538                 func_str = "ioc_init";
539                 break;
540         case MPI2_FUNCTION_PORT_ENABLE:
541                 frame_sz = sizeof(Mpi2PortEnableRequest_t);
542                 func_str = "port_enable";
543                 break;
544         case MPI2_FUNCTION_SMP_PASSTHROUGH:
545                 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
546                 func_str = "smp_passthru";
547                 break;
548         default:
549                 frame_sz = 32;
550                 func_str = "unknown";
551                 break;
552         }
553
554         pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
555                 ioc->name, desc, ioc_status, request_hdr, func_str);
556
557         _debug_dump_mf(request_hdr, frame_sz/4);
558 }
559
560 /**
561  * _base_display_event_data - verbose translation of firmware asyn events
562  * @ioc: per adapter object
563  * @mpi_reply: reply mf payload returned from firmware
564  *
565  * Return nothing.
566  */
567 static void
568 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
569         Mpi2EventNotificationReply_t *mpi_reply)
570 {
571         char *desc = NULL;
572         u16 event;
573
574         if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
575                 return;
576
577         event = le16_to_cpu(mpi_reply->Event);
578
579         switch (event) {
580         case MPI2_EVENT_LOG_DATA:
581                 desc = "Log Data";
582                 break;
583         case MPI2_EVENT_STATE_CHANGE:
584                 desc = "Status Change";
585                 break;
586         case MPI2_EVENT_HARD_RESET_RECEIVED:
587                 desc = "Hard Reset Received";
588                 break;
589         case MPI2_EVENT_EVENT_CHANGE:
590                 desc = "Event Change";
591                 break;
592         case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
593                 desc = "Device Status Change";
594                 break;
595         case MPI2_EVENT_IR_OPERATION_STATUS:
596                 if (!ioc->hide_ir_msg)
597                         desc = "IR Operation Status";
598                 break;
599         case MPI2_EVENT_SAS_DISCOVERY:
600         {
601                 Mpi2EventDataSasDiscovery_t *event_data =
602                     (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
603                 pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
604                     (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
605                     "start" : "stop");
606                 if (event_data->DiscoveryStatus)
607                         pr_info("discovery_status(0x%08x)",
608                             le32_to_cpu(event_data->DiscoveryStatus));
609                         pr_info("\n");
610                 return;
611         }
612         case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
613                 desc = "SAS Broadcast Primitive";
614                 break;
615         case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
616                 desc = "SAS Init Device Status Change";
617                 break;
618         case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
619                 desc = "SAS Init Table Overflow";
620                 break;
621         case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
622                 desc = "SAS Topology Change List";
623                 break;
624         case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
625                 desc = "SAS Enclosure Device Status Change";
626                 break;
627         case MPI2_EVENT_IR_VOLUME:
628                 if (!ioc->hide_ir_msg)
629                         desc = "IR Volume";
630                 break;
631         case MPI2_EVENT_IR_PHYSICAL_DISK:
632                 if (!ioc->hide_ir_msg)
633                         desc = "IR Physical Disk";
634                 break;
635         case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
636                 if (!ioc->hide_ir_msg)
637                         desc = "IR Configuration Change List";
638                 break;
639         case MPI2_EVENT_LOG_ENTRY_ADDED:
640                 if (!ioc->hide_ir_msg)
641                         desc = "Log Entry Added";
642                 break;
643         case MPI2_EVENT_TEMP_THRESHOLD:
644                 desc = "Temperature Threshold";
645                 break;
646         }
647
648         if (!desc)
649                 return;
650
651         pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
652 }
653
654 /**
655  * _base_sas_log_info - verbose translation of firmware log info
656  * @ioc: per adapter object
657  * @log_info: log info
658  *
659  * Return nothing.
660  */
661 static void
662 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
663 {
664         union loginfo_type {
665                 u32     loginfo;
666                 struct {
667                         u32     subcode:16;
668                         u32     code:8;
669                         u32     originator:4;
670                         u32     bus_type:4;
671                 } dw;
672         };
673         union loginfo_type sas_loginfo;
674         char *originator_str = NULL;
675
676         sas_loginfo.loginfo = log_info;
677         if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
678                 return;
679
680         /* each nexus loss loginfo */
681         if (log_info == 0x31170000)
682                 return;
683
684         /* eat the loginfos associated with task aborts */
685         if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
686             0x31140000 || log_info == 0x31130000))
687                 return;
688
689         switch (sas_loginfo.dw.originator) {
690         case 0:
691                 originator_str = "IOP";
692                 break;
693         case 1:
694                 originator_str = "PL";
695                 break;
696         case 2:
697                 if (!ioc->hide_ir_msg)
698                         originator_str = "IR";
699                 else
700                         originator_str = "WarpDrive";
701                 break;
702         }
703
704         pr_warn(MPT3SAS_FMT
705                 "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
706                 ioc->name, log_info,
707              originator_str, sas_loginfo.dw.code,
708              sas_loginfo.dw.subcode);
709 }
710
711 /**
712  * _base_display_reply_info -
713  * @ioc: per adapter object
714  * @smid: system request message index
715  * @msix_index: MSIX table index supplied by the OS
716  * @reply: reply message frame(lower 32bit addr)
717  *
718  * Return nothing.
719  */
720 static void
721 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
722         u32 reply)
723 {
724         MPI2DefaultReply_t *mpi_reply;
725         u16 ioc_status;
726         u32 loginfo = 0;
727
728         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
729         if (unlikely(!mpi_reply)) {
730                 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
731                     ioc->name, __FILE__, __LINE__, __func__);
732                 return;
733         }
734         ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
735
736         if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
737             (ioc->logging_level & MPT_DEBUG_REPLY)) {
738                 _base_sas_ioc_info(ioc , mpi_reply,
739                    mpt3sas_base_get_msg_frame(ioc, smid));
740         }
741
742         if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
743                 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
744                 _base_sas_log_info(ioc, loginfo);
745         }
746
747         if (ioc_status || loginfo) {
748                 ioc_status &= MPI2_IOCSTATUS_MASK;
749                 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
750         }
751 }
752
753 /**
754  * mpt3sas_base_done - base internal command completion routine
755  * @ioc: per adapter object
756  * @smid: system request message index
757  * @msix_index: MSIX table index supplied by the OS
758  * @reply: reply message frame(lower 32bit addr)
759  *
760  * Return 1 meaning mf should be freed from _base_interrupt
761  *        0 means the mf is freed from this function.
762  */
763 u8
764 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
765         u32 reply)
766 {
767         MPI2DefaultReply_t *mpi_reply;
768
769         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
770         if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
771                 return 1;
772
773         if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
774                 return 1;
775
776         ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
777         if (mpi_reply) {
778                 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
779                 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
780         }
781         ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
782
783         complete(&ioc->base_cmds.done);
784         return 1;
785 }
786
787 /**
788  * _base_async_event - main callback handler for firmware asyn events
789  * @ioc: per adapter object
790  * @msix_index: MSIX table index supplied by the OS
791  * @reply: reply message frame(lower 32bit addr)
792  *
793  * Return 1 meaning mf should be freed from _base_interrupt
794  *        0 means the mf is freed from this function.
795  */
796 static u8
797 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
798 {
799         Mpi2EventNotificationReply_t *mpi_reply;
800         Mpi2EventAckRequest_t *ack_request;
801         u16 smid;
802
803         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
804         if (!mpi_reply)
805                 return 1;
806         if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
807                 return 1;
808
809         _base_display_event_data(ioc, mpi_reply);
810
811         if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
812                 goto out;
813         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
814         if (!smid) {
815                 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
816                     ioc->name, __func__);
817                 goto out;
818         }
819
820         ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
821         memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
822         ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
823         ack_request->Event = mpi_reply->Event;
824         ack_request->EventContext = mpi_reply->EventContext;
825         ack_request->VF_ID = 0;  /* TODO */
826         ack_request->VP_ID = 0;
827         mpt3sas_base_put_smid_default(ioc, smid);
828
829  out:
830
831         /* scsih callback handler */
832         mpt3sas_scsih_event_callback(ioc, msix_index, reply);
833
834         /* ctl callback handler */
835         mpt3sas_ctl_event_callback(ioc, msix_index, reply);
836
837         return 1;
838 }
839
840 /**
841  * _base_get_cb_idx - obtain the callback index
842  * @ioc: per adapter object
843  * @smid: system request message index
844  *
845  * Return callback index.
846  */
847 static u8
848 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
849 {
850         int i;
851         u8 cb_idx;
852
853         if (smid < ioc->hi_priority_smid) {
854                 i = smid - 1;
855                 cb_idx = ioc->scsi_lookup[i].cb_idx;
856         } else if (smid < ioc->internal_smid) {
857                 i = smid - ioc->hi_priority_smid;
858                 cb_idx = ioc->hpr_lookup[i].cb_idx;
859         } else if (smid <= ioc->hba_queue_depth) {
860                 i = smid - ioc->internal_smid;
861                 cb_idx = ioc->internal_lookup[i].cb_idx;
862         } else
863                 cb_idx = 0xFF;
864         return cb_idx;
865 }
866
867 /**
868  * _base_mask_interrupts - disable interrupts
869  * @ioc: per adapter object
870  *
871  * Disabling ResetIRQ, Reply and Doorbell Interrupts
872  *
873  * Return nothing.
874  */
875 static void
876 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
877 {
878         u32 him_register;
879
880         ioc->mask_interrupts = 1;
881         him_register = readl(&ioc->chip->HostInterruptMask);
882         him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
883         writel(him_register, &ioc->chip->HostInterruptMask);
884         readl(&ioc->chip->HostInterruptMask);
885 }
886
887 /**
888  * _base_unmask_interrupts - enable interrupts
889  * @ioc: per adapter object
890  *
891  * Enabling only Reply Interrupts
892  *
893  * Return nothing.
894  */
895 static void
896 _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
897 {
898         u32 him_register;
899
900         him_register = readl(&ioc->chip->HostInterruptMask);
901         him_register &= ~MPI2_HIM_RIM;
902         writel(him_register, &ioc->chip->HostInterruptMask);
903         ioc->mask_interrupts = 0;
904 }
905
906 union reply_descriptor {
907         u64 word;
908         struct {
909                 u32 low;
910                 u32 high;
911         } u;
912 };
913
914 /**
915  * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
916  * @irq: irq number (not used)
917  * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
918  * @r: pt_regs pointer (not used)
919  *
920  * Return IRQ_HANDLE if processed, else IRQ_NONE.
921  */
922 static irqreturn_t
923 _base_interrupt(int irq, void *bus_id)
924 {
925         struct adapter_reply_queue *reply_q = bus_id;
926         union reply_descriptor rd;
927         u32 completed_cmds;
928         u8 request_desript_type;
929         u16 smid;
930         u8 cb_idx;
931         u32 reply;
932         u8 msix_index = reply_q->msix_index;
933         struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
934         Mpi2ReplyDescriptorsUnion_t *rpf;
935         u8 rc;
936
937         if (ioc->mask_interrupts)
938                 return IRQ_NONE;
939
940         if (!atomic_add_unless(&reply_q->busy, 1, 1))
941                 return IRQ_NONE;
942
943         rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
944         request_desript_type = rpf->Default.ReplyFlags
945              & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
946         if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
947                 atomic_dec(&reply_q->busy);
948                 return IRQ_NONE;
949         }
950
951         completed_cmds = 0;
952         cb_idx = 0xFF;
953         do {
954                 rd.word = le64_to_cpu(rpf->Words);
955                 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
956                         goto out;
957                 reply = 0;
958                 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
959                 if (request_desript_type ==
960                     MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
961                     request_desript_type ==
962                     MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
963                         cb_idx = _base_get_cb_idx(ioc, smid);
964                         if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
965                             (likely(mpt_callbacks[cb_idx] != NULL))) {
966                                 rc = mpt_callbacks[cb_idx](ioc, smid,
967                                     msix_index, 0);
968                                 if (rc)
969                                         mpt3sas_base_free_smid(ioc, smid);
970                         }
971                 } else if (request_desript_type ==
972                     MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
973                         reply = le32_to_cpu(
974                             rpf->AddressReply.ReplyFrameAddress);
975                         if (reply > ioc->reply_dma_max_address ||
976                             reply < ioc->reply_dma_min_address)
977                                 reply = 0;
978                         if (smid) {
979                                 cb_idx = _base_get_cb_idx(ioc, smid);
980                                 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
981                                     (likely(mpt_callbacks[cb_idx] != NULL))) {
982                                         rc = mpt_callbacks[cb_idx](ioc, smid,
983                                             msix_index, reply);
984                                         if (reply)
985                                                 _base_display_reply_info(ioc,
986                                                     smid, msix_index, reply);
987                                         if (rc)
988                                                 mpt3sas_base_free_smid(ioc,
989                                                     smid);
990                                 }
991                         } else {
992                                 _base_async_event(ioc, msix_index, reply);
993                         }
994
995                         /* reply free queue handling */
996                         if (reply) {
997                                 ioc->reply_free_host_index =
998                                     (ioc->reply_free_host_index ==
999                                     (ioc->reply_free_queue_depth - 1)) ?
1000                                     0 : ioc->reply_free_host_index + 1;
1001                                 ioc->reply_free[ioc->reply_free_host_index] =
1002                                     cpu_to_le32(reply);
1003                                 wmb();
1004                                 writel(ioc->reply_free_host_index,
1005                                     &ioc->chip->ReplyFreeHostIndex);
1006                         }
1007                 }
1008
1009                 rpf->Words = cpu_to_le64(ULLONG_MAX);
1010                 reply_q->reply_post_host_index =
1011                     (reply_q->reply_post_host_index ==
1012                     (ioc->reply_post_queue_depth - 1)) ? 0 :
1013                     reply_q->reply_post_host_index + 1;
1014                 request_desript_type =
1015                     reply_q->reply_post_free[reply_q->reply_post_host_index].
1016                     Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1017                 completed_cmds++;
1018                 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1019                         goto out;
1020                 if (!reply_q->reply_post_host_index)
1021                         rpf = reply_q->reply_post_free;
1022                 else
1023                         rpf++;
1024         } while (1);
1025
1026  out:
1027
1028         if (!completed_cmds) {
1029                 atomic_dec(&reply_q->busy);
1030                 return IRQ_NONE;
1031         }
1032
1033         wmb();
1034         if (ioc->is_warpdrive) {
1035                 writel(reply_q->reply_post_host_index,
1036                 ioc->reply_post_host_index[msix_index]);
1037                 atomic_dec(&reply_q->busy);
1038                 return IRQ_HANDLED;
1039         }
1040
1041         /* Update Reply Post Host Index.
1042          * For those HBA's which support combined reply queue feature
1043          * 1. Get the correct Supplemental Reply Post Host Index Register.
1044          *    i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1045          *    Index Register address bank i.e replyPostRegisterIndex[],
1046          * 2. Then update this register with new reply host index value
1047          *    in ReplyPostIndex field and the MSIxIndex field with
1048          *    msix_index value reduced to a value between 0 and 7,
1049          *    using a modulo 8 operation. Since each Supplemental Reply Post
1050          *    Host Index Register supports 8 MSI-X vectors.
1051          *
1052          * For other HBA's just update the Reply Post Host Index register with
1053          * new reply host index value in ReplyPostIndex Field and msix_index
1054          * value in MSIxIndex field.
1055          */
1056         if (ioc->msix96_vector)
1057                 writel(reply_q->reply_post_host_index | ((msix_index  & 7) <<
1058                         MPI2_RPHI_MSIX_INDEX_SHIFT),
1059                         ioc->replyPostRegisterIndex[msix_index/8]);
1060         else
1061                 writel(reply_q->reply_post_host_index | (msix_index <<
1062                         MPI2_RPHI_MSIX_INDEX_SHIFT),
1063                         &ioc->chip->ReplyPostHostIndex);
1064         atomic_dec(&reply_q->busy);
1065         return IRQ_HANDLED;
1066 }
1067
1068 /**
1069  * _base_is_controller_msix_enabled - is controller support muli-reply queues
1070  * @ioc: per adapter object
1071  *
1072  */
1073 static inline int
1074 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1075 {
1076         return (ioc->facts.IOCCapabilities &
1077             MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1078 }
1079
1080 /**
1081  * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
1082  * @ioc: per adapter object
1083  * Context: ISR conext
1084  *
1085  * Called when a Task Management request has completed. We want
1086  * to flush the other reply queues so all the outstanding IO has been
1087  * completed back to OS before we process the TM completetion.
1088  *
1089  * Return nothing.
1090  */
1091 void
1092 mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1093 {
1094         struct adapter_reply_queue *reply_q;
1095
1096         /* If MSIX capability is turned off
1097          * then multi-queues are not enabled
1098          */
1099         if (!_base_is_controller_msix_enabled(ioc))
1100                 return;
1101
1102         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1103                 if (ioc->shost_recovery)
1104                         return;
1105                 /* TMs are on msix_index == 0 */
1106                 if (reply_q->msix_index == 0)
1107                         continue;
1108                 _base_interrupt(reply_q->vector, (void *)reply_q);
1109         }
1110 }
1111
1112 /**
1113  * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1114  * @cb_idx: callback index
1115  *
1116  * Return nothing.
1117  */
1118 void
1119 mpt3sas_base_release_callback_handler(u8 cb_idx)
1120 {
1121         mpt_callbacks[cb_idx] = NULL;
1122 }
1123
1124 /**
1125  * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1126  * @cb_func: callback function
1127  *
1128  * Returns cb_func.
1129  */
1130 u8
1131 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1132 {
1133         u8 cb_idx;
1134
1135         for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1136                 if (mpt_callbacks[cb_idx] == NULL)
1137                         break;
1138
1139         mpt_callbacks[cb_idx] = cb_func;
1140         return cb_idx;
1141 }
1142
1143 /**
1144  * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1145  *
1146  * Return nothing.
1147  */
1148 void
1149 mpt3sas_base_initialize_callback_handler(void)
1150 {
1151         u8 cb_idx;
1152
1153         for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1154                 mpt3sas_base_release_callback_handler(cb_idx);
1155 }
1156
1157
1158 /**
1159  * _base_build_zero_len_sge - build zero length sg entry
1160  * @ioc: per adapter object
1161  * @paddr: virtual address for SGE
1162  *
1163  * Create a zero length scatter gather entry to insure the IOCs hardware has
1164  * something to use if the target device goes brain dead and tries
1165  * to send data even when none is asked for.
1166  *
1167  * Return nothing.
1168  */
1169 static void
1170 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1171 {
1172         u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1173             MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1174             MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1175             MPI2_SGE_FLAGS_SHIFT);
1176         ioc->base_add_sg_single(paddr, flags_length, -1);
1177 }
1178
1179 /**
1180  * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1181  * @paddr: virtual address for SGE
1182  * @flags_length: SGE flags and data transfer length
1183  * @dma_addr: Physical address
1184  *
1185  * Return nothing.
1186  */
1187 static void
1188 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1189 {
1190         Mpi2SGESimple32_t *sgel = paddr;
1191
1192         flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1193             MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1194         sgel->FlagsLength = cpu_to_le32(flags_length);
1195         sgel->Address = cpu_to_le32(dma_addr);
1196 }
1197
1198
1199 /**
1200  * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1201  * @paddr: virtual address for SGE
1202  * @flags_length: SGE flags and data transfer length
1203  * @dma_addr: Physical address
1204  *
1205  * Return nothing.
1206  */
1207 static void
1208 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1209 {
1210         Mpi2SGESimple64_t *sgel = paddr;
1211
1212         flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1213             MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1214         sgel->FlagsLength = cpu_to_le32(flags_length);
1215         sgel->Address = cpu_to_le64(dma_addr);
1216 }
1217
1218 /**
1219  * _base_get_chain_buffer_tracker - obtain chain tracker
1220  * @ioc: per adapter object
1221  * @smid: smid associated to an IO request
1222  *
1223  * Returns chain tracker(from ioc->free_chain_list)
1224  */
1225 static struct chain_tracker *
1226 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1227 {
1228         struct chain_tracker *chain_req;
1229         unsigned long flags;
1230
1231         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1232         if (list_empty(&ioc->free_chain_list)) {
1233                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1234                 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
1235                         "chain buffers not available\n", ioc->name));
1236                 return NULL;
1237         }
1238         chain_req = list_entry(ioc->free_chain_list.next,
1239             struct chain_tracker, tracker_list);
1240         list_del_init(&chain_req->tracker_list);
1241         list_add_tail(&chain_req->tracker_list,
1242             &ioc->scsi_lookup[smid - 1].chain_list);
1243         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1244         return chain_req;
1245 }
1246
1247
1248 /**
1249  * _base_build_sg - build generic sg
1250  * @ioc: per adapter object
1251  * @psge: virtual address for SGE
1252  * @data_out_dma: physical address for WRITES
1253  * @data_out_sz: data xfer size for WRITES
1254  * @data_in_dma: physical address for READS
1255  * @data_in_sz: data xfer size for READS
1256  *
1257  * Return nothing.
1258  */
1259 static void
1260 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1261         dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1262         size_t data_in_sz)
1263 {
1264         u32 sgl_flags;
1265
1266         if (!data_out_sz && !data_in_sz) {
1267                 _base_build_zero_len_sge(ioc, psge);
1268                 return;
1269         }
1270
1271         if (data_out_sz && data_in_sz) {
1272                 /* WRITE sgel first */
1273                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1274                     MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1275                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1276                 ioc->base_add_sg_single(psge, sgl_flags |
1277                     data_out_sz, data_out_dma);
1278
1279                 /* incr sgel */
1280                 psge += ioc->sge_size;
1281
1282                 /* READ sgel last */
1283                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1284                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1285                     MPI2_SGE_FLAGS_END_OF_LIST);
1286                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1287                 ioc->base_add_sg_single(psge, sgl_flags |
1288                     data_in_sz, data_in_dma);
1289         } else if (data_out_sz) /* WRITE */ {
1290                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1291                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1292                     MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1293                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1294                 ioc->base_add_sg_single(psge, sgl_flags |
1295                     data_out_sz, data_out_dma);
1296         } else if (data_in_sz) /* READ */ {
1297                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1298                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1299                     MPI2_SGE_FLAGS_END_OF_LIST);
1300                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1301                 ioc->base_add_sg_single(psge, sgl_flags |
1302                     data_in_sz, data_in_dma);
1303         }
1304 }
1305
1306 /* IEEE format sgls */
1307
1308 /**
1309  * _base_add_sg_single_ieee - add sg element for IEEE format
1310  * @paddr: virtual address for SGE
1311  * @flags: SGE flags
1312  * @chain_offset: number of 128 byte elements from start of segment
1313  * @length: data transfer length
1314  * @dma_addr: Physical address
1315  *
1316  * Return nothing.
1317  */
1318 static void
1319 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
1320         dma_addr_t dma_addr)
1321 {
1322         Mpi25IeeeSgeChain64_t *sgel = paddr;
1323
1324         sgel->Flags = flags;
1325         sgel->NextChainOffset = chain_offset;
1326         sgel->Length = cpu_to_le32(length);
1327         sgel->Address = cpu_to_le64(dma_addr);
1328 }
1329
1330 /**
1331  * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
1332  * @ioc: per adapter object
1333  * @paddr: virtual address for SGE
1334  *
1335  * Create a zero length scatter gather entry to insure the IOCs hardware has
1336  * something to use if the target device goes brain dead and tries
1337  * to send data even when none is asked for.
1338  *
1339  * Return nothing.
1340  */
1341 static void
1342 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1343 {
1344         u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1345                 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
1346                 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
1347         _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
1348 }
1349
1350 /**
1351  * _base_build_sg_scmd - main sg creation routine
1352  * @ioc: per adapter object
1353  * @scmd: scsi command
1354  * @smid: system request message index
1355  * Context: none.
1356  *
1357  * The main routine that builds scatter gather table from a given
1358  * scsi request sent via the .queuecommand main handler.
1359  *
1360  * Returns 0 success, anything else error
1361  */
1362 static int
1363 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
1364                 struct scsi_cmnd *scmd, u16 smid)
1365 {
1366         Mpi2SCSIIORequest_t *mpi_request;
1367         dma_addr_t chain_dma;
1368         struct scatterlist *sg_scmd;
1369         void *sg_local, *chain;
1370         u32 chain_offset;
1371         u32 chain_length;
1372         u32 chain_flags;
1373         int sges_left;
1374         u32 sges_in_segment;
1375         u32 sgl_flags;
1376         u32 sgl_flags_last_element;
1377         u32 sgl_flags_end_buffer;
1378         struct chain_tracker *chain_req;
1379
1380         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1381
1382         /* init scatter gather flags */
1383         sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
1384         if (scmd->sc_data_direction == DMA_TO_DEVICE)
1385                 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
1386         sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
1387             << MPI2_SGE_FLAGS_SHIFT;
1388         sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
1389             MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
1390             << MPI2_SGE_FLAGS_SHIFT;
1391         sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1392
1393         sg_scmd = scsi_sglist(scmd);
1394         sges_left = scsi_dma_map(scmd);
1395         if (sges_left < 0) {
1396                 sdev_printk(KERN_ERR, scmd->device,
1397                  "pci_map_sg failed: request for %d bytes!\n",
1398                  scsi_bufflen(scmd));
1399                 return -ENOMEM;
1400         }
1401
1402         sg_local = &mpi_request->SGL;
1403         sges_in_segment = ioc->max_sges_in_main_message;
1404         if (sges_left <= sges_in_segment)
1405                 goto fill_in_last_segment;
1406
1407         mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
1408             (sges_in_segment * ioc->sge_size))/4;
1409
1410         /* fill in main message segment when there is a chain following */
1411         while (sges_in_segment) {
1412                 if (sges_in_segment == 1)
1413                         ioc->base_add_sg_single(sg_local,
1414                             sgl_flags_last_element | sg_dma_len(sg_scmd),
1415                             sg_dma_address(sg_scmd));
1416                 else
1417                         ioc->base_add_sg_single(sg_local, sgl_flags |
1418                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1419                 sg_scmd = sg_next(sg_scmd);
1420                 sg_local += ioc->sge_size;
1421                 sges_left--;
1422                 sges_in_segment--;
1423         }
1424
1425         /* initializing the chain flags and pointers */
1426         chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
1427         chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1428         if (!chain_req)
1429                 return -1;
1430         chain = chain_req->chain_buffer;
1431         chain_dma = chain_req->chain_buffer_dma;
1432         do {
1433                 sges_in_segment = (sges_left <=
1434                     ioc->max_sges_in_chain_message) ? sges_left :
1435                     ioc->max_sges_in_chain_message;
1436                 chain_offset = (sges_left == sges_in_segment) ?
1437                     0 : (sges_in_segment * ioc->sge_size)/4;
1438                 chain_length = sges_in_segment * ioc->sge_size;
1439                 if (chain_offset) {
1440                         chain_offset = chain_offset <<
1441                             MPI2_SGE_CHAIN_OFFSET_SHIFT;
1442                         chain_length += ioc->sge_size;
1443                 }
1444                 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
1445                     chain_length, chain_dma);
1446                 sg_local = chain;
1447                 if (!chain_offset)
1448                         goto fill_in_last_segment;
1449
1450                 /* fill in chain segments */
1451                 while (sges_in_segment) {
1452                         if (sges_in_segment == 1)
1453                                 ioc->base_add_sg_single(sg_local,
1454                                     sgl_flags_last_element |
1455                                     sg_dma_len(sg_scmd),
1456                                     sg_dma_address(sg_scmd));
1457                         else
1458                                 ioc->base_add_sg_single(sg_local, sgl_flags |
1459                                     sg_dma_len(sg_scmd),
1460                                     sg_dma_address(sg_scmd));
1461                         sg_scmd = sg_next(sg_scmd);
1462                         sg_local += ioc->sge_size;
1463                         sges_left--;
1464                         sges_in_segment--;
1465                 }
1466
1467                 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1468                 if (!chain_req)
1469                         return -1;
1470                 chain = chain_req->chain_buffer;
1471                 chain_dma = chain_req->chain_buffer_dma;
1472         } while (1);
1473
1474
1475  fill_in_last_segment:
1476
1477         /* fill the last segment */
1478         while (sges_left) {
1479                 if (sges_left == 1)
1480                         ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
1481                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1482                 else
1483                         ioc->base_add_sg_single(sg_local, sgl_flags |
1484                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1485                 sg_scmd = sg_next(sg_scmd);
1486                 sg_local += ioc->sge_size;
1487                 sges_left--;
1488         }
1489
1490         return 0;
1491 }
1492
1493 /**
1494  * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
1495  * @ioc: per adapter object
1496  * @scmd: scsi command
1497  * @smid: system request message index
1498  * Context: none.
1499  *
1500  * The main routine that builds scatter gather table from a given
1501  * scsi request sent via the .queuecommand main handler.
1502  *
1503  * Returns 0 success, anything else error
1504  */
1505 static int
1506 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1507         struct scsi_cmnd *scmd, u16 smid)
1508 {
1509         Mpi2SCSIIORequest_t *mpi_request;
1510         dma_addr_t chain_dma;
1511         struct scatterlist *sg_scmd;
1512         void *sg_local, *chain;
1513         u32 chain_offset;
1514         u32 chain_length;
1515         int sges_left;
1516         u32 sges_in_segment;
1517         u8 simple_sgl_flags;
1518         u8 simple_sgl_flags_last;
1519         u8 chain_sgl_flags;
1520         struct chain_tracker *chain_req;
1521
1522         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1523
1524         /* init scatter gather flags */
1525         simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1526             MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1527         simple_sgl_flags_last = simple_sgl_flags |
1528             MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1529         chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1530             MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1531
1532         sg_scmd = scsi_sglist(scmd);
1533         sges_left = scsi_dma_map(scmd);
1534         if (sges_left < 0) {
1535                 sdev_printk(KERN_ERR, scmd->device,
1536                         "pci_map_sg failed: request for %d bytes!\n",
1537                         scsi_bufflen(scmd));
1538                 return -ENOMEM;
1539         }
1540
1541         sg_local = &mpi_request->SGL;
1542         sges_in_segment = (ioc->request_sz -
1543             offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
1544         if (sges_left <= sges_in_segment)
1545                 goto fill_in_last_segment;
1546
1547         mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
1548             (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
1549
1550         /* fill in main message segment when there is a chain following */
1551         while (sges_in_segment > 1) {
1552                 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1553                     sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1554                 sg_scmd = sg_next(sg_scmd);
1555                 sg_local += ioc->sge_size_ieee;
1556                 sges_left--;
1557                 sges_in_segment--;
1558         }
1559
1560         /* initializing the pointers */
1561         chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1562         if (!chain_req)
1563                 return -1;
1564         chain = chain_req->chain_buffer;
1565         chain_dma = chain_req->chain_buffer_dma;
1566         do {
1567                 sges_in_segment = (sges_left <=
1568                     ioc->max_sges_in_chain_message) ? sges_left :
1569                     ioc->max_sges_in_chain_message;
1570                 chain_offset = (sges_left == sges_in_segment) ?
1571                     0 : sges_in_segment;
1572                 chain_length = sges_in_segment * ioc->sge_size_ieee;
1573                 if (chain_offset)
1574                         chain_length += ioc->sge_size_ieee;
1575                 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
1576                     chain_offset, chain_length, chain_dma);
1577
1578                 sg_local = chain;
1579                 if (!chain_offset)
1580                         goto fill_in_last_segment;
1581
1582                 /* fill in chain segments */
1583                 while (sges_in_segment) {
1584                         _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1585                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1586                         sg_scmd = sg_next(sg_scmd);
1587                         sg_local += ioc->sge_size_ieee;
1588                         sges_left--;
1589                         sges_in_segment--;
1590                 }
1591
1592                 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1593                 if (!chain_req)
1594                         return -1;
1595                 chain = chain_req->chain_buffer;
1596                 chain_dma = chain_req->chain_buffer_dma;
1597         } while (1);
1598
1599
1600  fill_in_last_segment:
1601
1602         /* fill the last segment */
1603         while (sges_left > 0) {
1604                 if (sges_left == 1)
1605                         _base_add_sg_single_ieee(sg_local,
1606                             simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
1607                             sg_dma_address(sg_scmd));
1608                 else
1609                         _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1610                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1611                 sg_scmd = sg_next(sg_scmd);
1612                 sg_local += ioc->sge_size_ieee;
1613                 sges_left--;
1614         }
1615
1616         return 0;
1617 }
1618
1619 /**
1620  * _base_build_sg_ieee - build generic sg for IEEE format
1621  * @ioc: per adapter object
1622  * @psge: virtual address for SGE
1623  * @data_out_dma: physical address for WRITES
1624  * @data_out_sz: data xfer size for WRITES
1625  * @data_in_dma: physical address for READS
1626  * @data_in_sz: data xfer size for READS
1627  *
1628  * Return nothing.
1629  */
1630 static void
1631 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
1632         dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1633         size_t data_in_sz)
1634 {
1635         u8 sgl_flags;
1636
1637         if (!data_out_sz && !data_in_sz) {
1638                 _base_build_zero_len_sge_ieee(ioc, psge);
1639                 return;
1640         }
1641
1642         if (data_out_sz && data_in_sz) {
1643                 /* WRITE sgel first */
1644                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1645                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1646                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1647                     data_out_dma);
1648
1649                 /* incr sgel */
1650                 psge += ioc->sge_size_ieee;
1651
1652                 /* READ sgel last */
1653                 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1654                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1655                     data_in_dma);
1656         } else if (data_out_sz) /* WRITE */ {
1657                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1658                     MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1659                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1660                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1661                     data_out_dma);
1662         } else if (data_in_sz) /* READ */ {
1663                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1664                     MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1665                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1666                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1667                     data_in_dma);
1668         }
1669 }
1670
1671 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1672
1673 /**
1674  * _base_config_dma_addressing - set dma addressing
1675  * @ioc: per adapter object
1676  * @pdev: PCI device struct
1677  *
1678  * Returns 0 for success, non-zero for failure.
1679  */
1680 static int
1681 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
1682 {
1683         struct sysinfo s;
1684         u64 consistent_dma_mask;
1685
1686         if (ioc->dma_mask)
1687                 consistent_dma_mask = DMA_BIT_MASK(64);
1688         else
1689                 consistent_dma_mask = DMA_BIT_MASK(32);
1690
1691         if (sizeof(dma_addr_t) > 4) {
1692                 const uint64_t required_mask =
1693                     dma_get_required_mask(&pdev->dev);
1694                 if ((required_mask > DMA_BIT_MASK(32)) &&
1695                     !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1696                     !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
1697                         ioc->base_add_sg_single = &_base_add_sg_single_64;
1698                         ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1699                         ioc->dma_mask = 64;
1700                         goto out;
1701                 }
1702         }
1703
1704         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1705             && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1706                 ioc->base_add_sg_single = &_base_add_sg_single_32;
1707                 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1708                 ioc->dma_mask = 32;
1709         } else
1710                 return -ENODEV;
1711
1712  out:
1713         si_meminfo(&s);
1714         pr_info(MPT3SAS_FMT
1715                 "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
1716                 ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
1717
1718         return 0;
1719 }
1720
1721 static int
1722 _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
1723                                       struct pci_dev *pdev)
1724 {
1725         if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1726                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1727                         return -ENODEV;
1728         }
1729         return 0;
1730 }
1731
1732 /**
1733  * _base_check_enable_msix - checks MSIX capabable.
1734  * @ioc: per adapter object
1735  *
1736  * Check to see if card is capable of MSIX, and set number
1737  * of available msix vectors
1738  */
1739 static int
1740 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1741 {
1742         int base;
1743         u16 message_control;
1744
1745         /* Check whether controller SAS2008 B0 controller,
1746          * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
1747          */
1748         if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
1749             ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
1750                 return -EINVAL;
1751         }
1752
1753         base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1754         if (!base) {
1755                 dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
1756                         ioc->name));
1757                 return -EINVAL;
1758         }
1759
1760         /* get msix vector count */
1761         /* NUMA_IO not supported for older controllers */
1762         if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1763             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1764             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1765             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1766             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1767             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1768             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1769                 ioc->msix_vector_count = 1;
1770         else {
1771                 pci_read_config_word(ioc->pdev, base + 2, &message_control);
1772                 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1773         }
1774         dinitprintk(ioc, pr_info(MPT3SAS_FMT
1775                 "msix is supported, vector_count(%d)\n",
1776                 ioc->name, ioc->msix_vector_count));
1777         return 0;
1778 }
1779
1780 /**
1781  * _base_free_irq - free irq
1782  * @ioc: per adapter object
1783  *
1784  * Freeing respective reply_queue from the list.
1785  */
1786 static void
1787 _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
1788 {
1789         struct adapter_reply_queue *reply_q, *next;
1790
1791         if (list_empty(&ioc->reply_queue_list))
1792                 return;
1793
1794         list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1795                 list_del(&reply_q->list);
1796                 irq_set_affinity_hint(reply_q->vector, NULL);
1797                 free_cpumask_var(reply_q->affinity_hint);
1798                 synchronize_irq(reply_q->vector);
1799                 free_irq(reply_q->vector, reply_q);
1800                 kfree(reply_q);
1801         }
1802 }
1803
1804 /**
1805  * _base_request_irq - request irq
1806  * @ioc: per adapter object
1807  * @index: msix index into vector table
1808  * @vector: irq vector
1809  *
1810  * Inserting respective reply_queue into the list.
1811  */
1812 static int
1813 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
1814 {
1815         struct adapter_reply_queue *reply_q;
1816         int r;
1817
1818         reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1819         if (!reply_q) {
1820                 pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
1821                     ioc->name, (int)sizeof(struct adapter_reply_queue));
1822                 return -ENOMEM;
1823         }
1824         reply_q->ioc = ioc;
1825         reply_q->msix_index = index;
1826         reply_q->vector = vector;
1827
1828         if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL))
1829                 return -ENOMEM;
1830         cpumask_clear(reply_q->affinity_hint);
1831
1832         atomic_set(&reply_q->busy, 0);
1833         if (ioc->msix_enable)
1834                 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1835                     driver_name, ioc->id, index);
1836         else
1837                 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1838                     driver_name, ioc->id);
1839         r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1840             reply_q);
1841         if (r) {
1842                 pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
1843                     reply_q->name, vector);
1844                 kfree(reply_q);
1845                 return -EBUSY;
1846         }
1847
1848         INIT_LIST_HEAD(&reply_q->list);
1849         list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1850         return 0;
1851 }
1852
1853 /**
1854  * _base_assign_reply_queues - assigning msix index for each cpu
1855  * @ioc: per adapter object
1856  *
1857  * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1858  *
1859  * It would nice if we could call irq_set_affinity, however it is not
1860  * an exported symbol
1861  */
1862 static void
1863 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1864 {
1865         unsigned int cpu, nr_cpus, nr_msix, index = 0;
1866         struct adapter_reply_queue *reply_q;
1867
1868         if (!_base_is_controller_msix_enabled(ioc))
1869                 return;
1870
1871         memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1872
1873         nr_cpus = num_online_cpus();
1874         nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
1875                                                ioc->facts.MaxMSIxVectors);
1876         if (!nr_msix)
1877                 return;
1878
1879         cpu = cpumask_first(cpu_online_mask);
1880
1881         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1882
1883                 unsigned int i, group = nr_cpus / nr_msix;
1884
1885                 if (cpu >= nr_cpus)
1886                         break;
1887
1888                 if (index < nr_cpus % nr_msix)
1889                         group++;
1890
1891                 for (i = 0 ; i < group ; i++) {
1892                         ioc->cpu_msix_table[cpu] = index;
1893                         cpumask_or(reply_q->affinity_hint,
1894                                    reply_q->affinity_hint, get_cpu_mask(cpu));
1895                         cpu = cpumask_next(cpu, cpu_online_mask);
1896                 }
1897
1898                 if (irq_set_affinity_hint(reply_q->vector,
1899                                            reply_q->affinity_hint))
1900                         dinitprintk(ioc, pr_info(MPT3SAS_FMT
1901                             "error setting affinity hint for irq vector %d\n",
1902                             ioc->name, reply_q->vector));
1903                 index++;
1904         }
1905 }
1906
1907 /**
1908  * _base_disable_msix - disables msix
1909  * @ioc: per adapter object
1910  *
1911  */
1912 static void
1913 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
1914 {
1915         if (!ioc->msix_enable)
1916                 return;
1917         pci_disable_msix(ioc->pdev);
1918         ioc->msix_enable = 0;
1919 }
1920
1921 /**
1922  * _base_enable_msix - enables msix, failback to io_apic
1923  * @ioc: per adapter object
1924  *
1925  */
1926 static int
1927 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1928 {
1929         struct msix_entry *entries, *a;
1930         int r;
1931         int i;
1932         u8 try_msix = 0;
1933
1934         if (msix_disable == -1 || msix_disable == 0)
1935                 try_msix = 1;
1936
1937         if (!try_msix)
1938                 goto try_ioapic;
1939
1940         if (_base_check_enable_msix(ioc) != 0)
1941                 goto try_ioapic;
1942
1943         ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1944             ioc->msix_vector_count);
1945
1946         printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
1947           ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
1948           ioc->cpu_count, max_msix_vectors);
1949
1950         if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
1951                 max_msix_vectors = 8;
1952
1953         if (max_msix_vectors > 0) {
1954                 ioc->reply_queue_count = min_t(int, max_msix_vectors,
1955                         ioc->reply_queue_count);
1956                 ioc->msix_vector_count = ioc->reply_queue_count;
1957         } else if (max_msix_vectors == 0)
1958                 goto try_ioapic;
1959
1960         entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1961             GFP_KERNEL);
1962         if (!entries) {
1963                 dfailprintk(ioc, pr_info(MPT3SAS_FMT
1964                         "kcalloc failed @ at %s:%d/%s() !!!\n",
1965                         ioc->name, __FILE__, __LINE__, __func__));
1966                 goto try_ioapic;
1967         }
1968
1969         for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1970                 a->entry = i;
1971
1972         r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
1973         if (r) {
1974                 dfailprintk(ioc, pr_info(MPT3SAS_FMT
1975                         "pci_enable_msix_exact failed (r=%d) !!!\n",
1976                         ioc->name, r));
1977                 kfree(entries);
1978                 goto try_ioapic;
1979         }
1980
1981         ioc->msix_enable = 1;
1982         for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
1983                 r = _base_request_irq(ioc, i, a->vector);
1984                 if (r) {
1985                         _base_free_irq(ioc);
1986                         _base_disable_msix(ioc);
1987                         kfree(entries);
1988                         goto try_ioapic;
1989                 }
1990         }
1991
1992         kfree(entries);
1993         return 0;
1994
1995 /* failback to io_apic interrupt routing */
1996  try_ioapic:
1997
1998         ioc->reply_queue_count = 1;
1999         r = _base_request_irq(ioc, 0, ioc->pdev->irq);
2000
2001         return r;
2002 }
2003
2004 /**
2005  * mpt3sas_base_unmap_resources - free controller resources
2006  * @ioc: per adapter object
2007  */
2008 void
2009 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
2010 {
2011         struct pci_dev *pdev = ioc->pdev;
2012
2013         dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
2014                 ioc->name, __func__));
2015
2016         _base_free_irq(ioc);
2017         _base_disable_msix(ioc);
2018
2019         if (ioc->msix96_vector)
2020                 kfree(ioc->replyPostRegisterIndex);
2021
2022         if (ioc->chip_phys) {
2023                 iounmap(ioc->chip);
2024                 ioc->chip_phys = 0;
2025         }
2026
2027         if (pci_is_enabled(pdev)) {
2028                 pci_release_selected_regions(ioc->pdev, ioc->bars);
2029                 pci_disable_pcie_error_reporting(pdev);
2030                 pci_disable_device(pdev);
2031         }
2032 }
2033
2034 /**
2035  * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
2036  * @ioc: per adapter object
2037  *
2038  * Returns 0 for success, non-zero for failure.
2039  */
2040 int
2041 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2042 {
2043         struct pci_dev *pdev = ioc->pdev;
2044         u32 memap_sz;
2045         u32 pio_sz;
2046         int i, r = 0;
2047         u64 pio_chip = 0;
2048         u64 chip_phys = 0;
2049         struct adapter_reply_queue *reply_q;
2050
2051         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
2052             ioc->name, __func__));
2053
2054         ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
2055         if (pci_enable_device_mem(pdev)) {
2056                 pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
2057                         ioc->name);
2058                 ioc->bars = 0;
2059                 return -ENODEV;
2060         }
2061
2062
2063         if (pci_request_selected_regions(pdev, ioc->bars,
2064             driver_name)) {
2065                 pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
2066                         ioc->name);
2067                 ioc->bars = 0;
2068                 r = -ENODEV;
2069                 goto out_fail;
2070         }
2071
2072 /* AER (Advanced Error Reporting) hooks */
2073         pci_enable_pcie_error_reporting(pdev);
2074
2075         pci_set_master(pdev);
2076
2077
2078         if (_base_config_dma_addressing(ioc, pdev) != 0) {
2079                 pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
2080                     ioc->name, pci_name(pdev));
2081                 r = -ENODEV;
2082                 goto out_fail;
2083         }
2084
2085         for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
2086              (!memap_sz || !pio_sz); i++) {
2087                 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
2088                         if (pio_sz)
2089                                 continue;
2090                         pio_chip = (u64)pci_resource_start(pdev, i);
2091                         pio_sz = pci_resource_len(pdev, i);
2092                 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
2093                         if (memap_sz)
2094                                 continue;
2095                         ioc->chip_phys = pci_resource_start(pdev, i);
2096                         chip_phys = (u64)ioc->chip_phys;
2097                         memap_sz = pci_resource_len(pdev, i);
2098                         ioc->chip = ioremap(ioc->chip_phys, memap_sz);
2099                 }
2100         }
2101
2102         if (ioc->chip == NULL) {
2103                 pr_err(MPT3SAS_FMT "unable to map adapter memory! "
2104                         " or resource not found\n", ioc->name);
2105                 r = -EINVAL;
2106                 goto out_fail;
2107         }
2108
2109         _base_mask_interrupts(ioc);
2110
2111         r = _base_get_ioc_facts(ioc, CAN_SLEEP);
2112         if (r)
2113                 goto out_fail;
2114
2115         if (!ioc->rdpq_array_enable_assigned) {
2116                 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
2117                 ioc->rdpq_array_enable_assigned = 1;
2118         }
2119
2120         r = _base_enable_msix(ioc);
2121         if (r)
2122                 goto out_fail;
2123
2124         /* Use the Combined reply queue feature only for SAS3 C0 & higher
2125          * revision HBAs and also only when reply queue count is greater than 8
2126          */
2127         if (ioc->msix96_vector && ioc->reply_queue_count > 8) {
2128                 /* Determine the Supplemental Reply Post Host Index Registers
2129                  * Addresse. Supplemental Reply Post Host Index Registers
2130                  * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
2131                  * each register is at offset bytes of
2132                  * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
2133                  */
2134                 ioc->replyPostRegisterIndex = kcalloc(
2135                      MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT,
2136                      sizeof(resource_size_t *), GFP_KERNEL);
2137                 if (!ioc->replyPostRegisterIndex) {
2138                         dfailprintk(ioc, printk(MPT3SAS_FMT
2139                         "allocation for reply Post Register Index failed!!!\n",
2140                                                                    ioc->name));
2141                         r = -ENOMEM;
2142                         goto out_fail;
2143                 }
2144
2145                 for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) {
2146                         ioc->replyPostRegisterIndex[i] = (resource_size_t *)
2147                              ((u8 *)&ioc->chip->Doorbell +
2148                              MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2149                              (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
2150                 }
2151         } else
2152                 ioc->msix96_vector = 0;
2153
2154         list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
2155                 pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
2156                     reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
2157                     "IO-APIC enabled"), reply_q->vector);
2158
2159         pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
2160             ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
2161         pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
2162             ioc->name, (unsigned long long)pio_chip, pio_sz);
2163
2164         /* Save PCI configuration state for recovery from PCI AER/EEH errors */
2165         pci_save_state(pdev);
2166         return 0;
2167
2168  out_fail:
2169         mpt3sas_base_unmap_resources(ioc);
2170         return r;
2171 }
2172
2173 /**
2174  * mpt3sas_base_get_msg_frame - obtain request mf pointer
2175  * @ioc: per adapter object
2176  * @smid: system request message index(smid zero is invalid)
2177  *
2178  * Returns virt pointer to message frame.
2179  */
2180 void *
2181 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2182 {
2183         return (void *)(ioc->request + (smid * ioc->request_sz));
2184 }
2185
2186 /**
2187  * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
2188  * @ioc: per adapter object
2189  * @smid: system request message index
2190  *
2191  * Returns virt pointer to sense buffer.
2192  */
2193 void *
2194 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2195 {
2196         return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
2197 }
2198
2199 /**
2200  * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
2201  * @ioc: per adapter object
2202  * @smid: system request message index
2203  *
2204  * Returns phys pointer to the low 32bit address of the sense buffer.
2205  */
2206 __le32
2207 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2208 {
2209         return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
2210             SCSI_SENSE_BUFFERSIZE));
2211 }
2212
2213 /**
2214  * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
2215  * @ioc: per adapter object
2216  * @phys_addr: lower 32 physical addr of the reply
2217  *
2218  * Converts 32bit lower physical addr into a virt address.
2219  */
2220 void *
2221 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
2222 {
2223         if (!phys_addr)
2224                 return NULL;
2225         return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
2226 }
2227
2228 /**
2229  * mpt3sas_base_get_smid - obtain a free smid from internal queue
2230  * @ioc: per adapter object
2231  * @cb_idx: callback index
2232  *
2233  * Returns smid (zero is invalid)
2234  */
2235 u16
2236 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2237 {
2238         unsigned long flags;
2239         struct request_tracker *request;
2240         u16 smid;
2241
2242         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2243         if (list_empty(&ioc->internal_free_list)) {
2244                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2245                 pr_err(MPT3SAS_FMT "%s: smid not available\n",
2246                     ioc->name, __func__);
2247                 return 0;
2248         }
2249
2250         request = list_entry(ioc->internal_free_list.next,
2251             struct request_tracker, tracker_list);
2252         request->cb_idx = cb_idx;
2253         smid = request->smid;
2254         list_del(&request->tracker_list);
2255         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2256         return smid;
2257 }
2258
2259 /**
2260  * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
2261  * @ioc: per adapter object
2262  * @cb_idx: callback index
2263  * @scmd: pointer to scsi command object
2264  *
2265  * Returns smid (zero is invalid)
2266  */
2267 u16
2268 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
2269         struct scsi_cmnd *scmd)
2270 {
2271         unsigned long flags;
2272         struct scsiio_tracker *request;
2273         u16 smid;
2274
2275         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2276         if (list_empty(&ioc->free_list)) {
2277                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2278                 pr_err(MPT3SAS_FMT "%s: smid not available\n",
2279                     ioc->name, __func__);
2280                 return 0;
2281         }
2282
2283         request = list_entry(ioc->free_list.next,
2284             struct scsiio_tracker, tracker_list);
2285         request->scmd = scmd;
2286         request->cb_idx = cb_idx;
2287         smid = request->smid;
2288         list_del(&request->tracker_list);
2289         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2290         return smid;
2291 }
2292
2293 /**
2294  * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
2295  * @ioc: per adapter object
2296  * @cb_idx: callback index
2297  *
2298  * Returns smid (zero is invalid)
2299  */
2300 u16
2301 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2302 {
2303         unsigned long flags;
2304         struct request_tracker *request;
2305         u16 smid;
2306
2307         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2308         if (list_empty(&ioc->hpr_free_list)) {
2309                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2310                 return 0;
2311         }
2312
2313         request = list_entry(ioc->hpr_free_list.next,
2314             struct request_tracker, tracker_list);
2315         request->cb_idx = cb_idx;
2316         smid = request->smid;
2317         list_del(&request->tracker_list);
2318         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2319         return smid;
2320 }
2321
2322 /**
2323  * mpt3sas_base_free_smid - put smid back on free_list
2324  * @ioc: per adapter object
2325  * @smid: system request message index
2326  *
2327  * Return nothing.
2328  */
2329 void
2330 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2331 {
2332         unsigned long flags;
2333         int i;
2334         struct chain_tracker *chain_req, *next;
2335
2336         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2337         if (smid < ioc->hi_priority_smid) {
2338                 /* scsiio queue */
2339                 i = smid - 1;
2340                 if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
2341                         list_for_each_entry_safe(chain_req, next,
2342                             &ioc->scsi_lookup[i].chain_list, tracker_list) {
2343                                 list_del_init(&chain_req->tracker_list);
2344                                 list_add(&chain_req->tracker_list,
2345                                     &ioc->free_chain_list);
2346                         }
2347                 }
2348                 ioc->scsi_lookup[i].cb_idx = 0xFF;
2349                 ioc->scsi_lookup[i].scmd = NULL;
2350                 ioc->scsi_lookup[i].direct_io = 0;
2351                 list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
2352                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2353
2354                 /*
2355                  * See _wait_for_commands_to_complete() call with regards
2356                  * to this code.
2357                  */
2358                 if (ioc->shost_recovery && ioc->pending_io_count) {
2359                         if (ioc->pending_io_count == 1)
2360                                 wake_up(&ioc->reset_wq);
2361                         ioc->pending_io_count--;
2362                 }
2363                 return;
2364         } else if (smid < ioc->internal_smid) {
2365                 /* hi-priority */
2366                 i = smid - ioc->hi_priority_smid;
2367                 ioc->hpr_lookup[i].cb_idx = 0xFF;
2368                 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
2369         } else if (smid <= ioc->hba_queue_depth) {
2370                 /* internal queue */
2371                 i = smid - ioc->internal_smid;
2372                 ioc->internal_lookup[i].cb_idx = 0xFF;
2373                 list_add(&ioc->internal_lookup[i].tracker_list,
2374                     &ioc->internal_free_list);
2375         }
2376         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2377 }
2378
2379 /**
2380  * _base_writeq - 64 bit write to MMIO
2381  * @ioc: per adapter object
2382  * @b: data payload
2383  * @addr: address in MMIO space
2384  * @writeq_lock: spin lock
2385  *
2386  * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
2387  * care of 32 bit environment where its not quarenteed to send the entire word
2388  * in one transfer.
2389  */
2390 #if defined(writeq) && defined(CONFIG_64BIT)
2391 static inline void
2392 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2393 {
2394         writeq(cpu_to_le64(b), addr);
2395 }
2396 #else
2397 static inline void
2398 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2399 {
2400         unsigned long flags;
2401         __u64 data_out = cpu_to_le64(b);
2402
2403         spin_lock_irqsave(writeq_lock, flags);
2404         writel((u32)(data_out), addr);
2405         writel((u32)(data_out >> 32), (addr + 4));
2406         spin_unlock_irqrestore(writeq_lock, flags);
2407 }
2408 #endif
2409
2410 static inline u8
2411 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
2412 {
2413         return ioc->cpu_msix_table[raw_smp_processor_id()];
2414 }
2415
2416 /**
2417  * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
2418  * @ioc: per adapter object
2419  * @smid: system request message index
2420  * @handle: device handle
2421  *
2422  * Return nothing.
2423  */
2424 void
2425 mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
2426 {
2427         Mpi2RequestDescriptorUnion_t descriptor;
2428         u64 *request = (u64 *)&descriptor;
2429
2430
2431         descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2432         descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
2433         descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2434         descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2435         descriptor.SCSIIO.LMID = 0;
2436         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2437             &ioc->scsi_lookup_lock);
2438 }
2439
2440 /**
2441  * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
2442  * @ioc: per adapter object
2443  * @smid: system request message index
2444  * @handle: device handle
2445  *
2446  * Return nothing.
2447  */
2448 void
2449 mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2450         u16 handle)
2451 {
2452         Mpi2RequestDescriptorUnion_t descriptor;
2453         u64 *request = (u64 *)&descriptor;
2454
2455         descriptor.SCSIIO.RequestFlags =
2456             MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2457         descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
2458         descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2459         descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2460         descriptor.SCSIIO.LMID = 0;
2461         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2462             &ioc->scsi_lookup_lock);
2463 }
2464
2465 /**
2466  * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
2467  * @ioc: per adapter object
2468  * @smid: system request message index
2469  *
2470  * Return nothing.
2471  */
2472 void
2473 mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2474 {
2475         Mpi2RequestDescriptorUnion_t descriptor;
2476         u64 *request = (u64 *)&descriptor;
2477
2478         descriptor.HighPriority.RequestFlags =
2479             MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2480         descriptor.HighPriority.MSIxIndex =  0;
2481         descriptor.HighPriority.SMID = cpu_to_le16(smid);
2482         descriptor.HighPriority.LMID = 0;
2483         descriptor.HighPriority.Reserved1 = 0;
2484         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2485             &ioc->scsi_lookup_lock);
2486 }
2487
2488 /**
2489  * mpt3sas_base_put_smid_default - Default, primarily used for config pages
2490  * @ioc: per adapter object
2491  * @smid: system request message index
2492  *
2493  * Return nothing.
2494  */
2495 void
2496 mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2497 {
2498         Mpi2RequestDescriptorUnion_t descriptor;
2499         u64 *request = (u64 *)&descriptor;
2500
2501         descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2502         descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
2503         descriptor.Default.SMID = cpu_to_le16(smid);
2504         descriptor.Default.LMID = 0;
2505         descriptor.Default.DescriptorTypeDependent = 0;
2506         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2507             &ioc->scsi_lookup_lock);
2508 }
2509
2510 /**
2511  * _base_display_intel_branding - Display branding string
2512  * @ioc: per adapter object
2513  *
2514  * Return nothing.
2515  */
2516 static void
2517 _base_display_intel_branding(struct MPT3SAS_ADAPTER *ioc)
2518 {
2519         if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
2520                 return;
2521
2522         switch (ioc->pdev->device) {
2523         case MPI25_MFGPAGE_DEVID_SAS3008:
2524                 switch (ioc->pdev->subsystem_device) {
2525                 case MPT3SAS_INTEL_RMS3JC080_SSDID:
2526                         pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2527                                 MPT3SAS_INTEL_RMS3JC080_BRANDING);
2528                         break;
2529
2530                 case MPT3SAS_INTEL_RS3GC008_SSDID:
2531                         pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2532                                 MPT3SAS_INTEL_RS3GC008_BRANDING);
2533                         break;
2534                 case MPT3SAS_INTEL_RS3FC044_SSDID:
2535                         pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2536                                 MPT3SAS_INTEL_RS3FC044_BRANDING);
2537                         break;
2538                 case MPT3SAS_INTEL_RS3UC080_SSDID:
2539                         pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2540                                 MPT3SAS_INTEL_RS3UC080_BRANDING);
2541                         break;
2542                 default:
2543                         pr_info(MPT3SAS_FMT
2544                                 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2545                                 ioc->name, ioc->pdev->subsystem_device);
2546                         break;
2547                 }
2548                 break;
2549         default:
2550                 pr_info(MPT3SAS_FMT
2551                         "Intel(R) Controller: Subsystem ID: 0x%X\n",
2552                         ioc->name, ioc->pdev->subsystem_device);
2553                 break;
2554         }
2555 }
2556
2557
2558
2559 /**
2560  * _base_display_dell_branding - Display branding string
2561  * @ioc: per adapter object
2562  *
2563  * Return nothing.
2564  */
2565 static void
2566 _base_display_dell_branding(struct MPT3SAS_ADAPTER *ioc)
2567 {
2568         if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL)
2569                 return;
2570
2571         switch (ioc->pdev->device) {
2572         case MPI25_MFGPAGE_DEVID_SAS3008:
2573                 switch (ioc->pdev->subsystem_device) {
2574                 case MPT3SAS_DELL_12G_HBA_SSDID:
2575                         pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2576                                 MPT3SAS_DELL_12G_HBA_BRANDING);
2577                         break;
2578                 default:
2579                         pr_info(MPT3SAS_FMT
2580                            "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", ioc->name,
2581                            ioc->pdev->subsystem_device);
2582                         break;
2583                 }
2584                 break;
2585         default:
2586                 pr_info(MPT3SAS_FMT
2587                         "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", ioc->name,
2588                         ioc->pdev->subsystem_device);
2589                 break;
2590         }
2591 }
2592
2593 /**
2594  * _base_display_cisco_branding - Display branding string
2595  * @ioc: per adapter object
2596  *
2597  * Return nothing.
2598  */
2599 static void
2600 _base_display_cisco_branding(struct MPT3SAS_ADAPTER *ioc)
2601 {
2602         if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_CISCO)
2603                 return;
2604
2605         switch (ioc->pdev->device) {
2606         case MPI25_MFGPAGE_DEVID_SAS3008:
2607                 switch (ioc->pdev->subsystem_device) {
2608                 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
2609                         pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2610                                 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
2611                         break;
2612                 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
2613                         pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2614                                 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
2615                         break;
2616                 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2617                         pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2618                                 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
2619                         break;
2620                 default:
2621                         pr_info(MPT3SAS_FMT
2622                           "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2623                           ioc->name, ioc->pdev->subsystem_device);
2624                         break;
2625                 }
2626                 break;
2627         case MPI25_MFGPAGE_DEVID_SAS3108_1:
2628                 switch (ioc->pdev->subsystem_device) {
2629                 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2630                         pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2631                         MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
2632                         break;
2633                 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
2634                         pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2635                         MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
2636                         break;
2637                 default:
2638                         pr_info(MPT3SAS_FMT
2639                          "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2640                          ioc->name, ioc->pdev->subsystem_device);
2641                         break;
2642                 }
2643                 break;
2644         default:
2645                  pr_info(MPT3SAS_FMT
2646                         "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2647                         ioc->name, ioc->pdev->subsystem_device);
2648                 break;
2649         }
2650 }
2651
2652 /**
2653  * _base_display_ioc_capabilities - Disply IOC's capabilities.
2654  * @ioc: per adapter object
2655  *
2656  * Return nothing.
2657  */
2658 static void
2659 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
2660 {
2661         int i = 0;
2662         char desc[16];
2663         u32 iounit_pg1_flags;
2664         u32 bios_version;
2665
2666         bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2667         strncpy(desc, ioc->manu_pg0.ChipName, 16);
2668         pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
2669            "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2670             ioc->name, desc,
2671            (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2672            (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2673            (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2674            ioc->facts.FWVersion.Word & 0x000000FF,
2675            ioc->pdev->revision,
2676            (bios_version & 0xFF000000) >> 24,
2677            (bios_version & 0x00FF0000) >> 16,
2678            (bios_version & 0x0000FF00) >> 8,
2679             bios_version & 0x000000FF);
2680
2681         _base_display_intel_branding(ioc);
2682         _base_display_dell_branding(ioc);
2683         _base_display_cisco_branding(ioc);
2684
2685         pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
2686
2687         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2688                 pr_info("Initiator");
2689                 i++;
2690         }
2691
2692         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2693                 pr_info("%sTarget", i ? "," : "");
2694                 i++;
2695         }
2696
2697         i = 0;
2698         pr_info("), ");
2699         pr_info("Capabilities=(");
2700
2701         if (!ioc->hide_ir_msg) {
2702                 if (ioc->facts.IOCCapabilities &
2703                     MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2704                         pr_info("Raid");
2705                         i++;
2706                 }
2707         }
2708
2709         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2710                 pr_info("%sTLR", i ? "," : "");
2711                 i++;
2712         }
2713
2714         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2715                 pr_info("%sMulticast", i ? "," : "");
2716                 i++;
2717         }
2718
2719         if (ioc->facts.IOCCapabilities &
2720             MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2721                 pr_info("%sBIDI Target", i ? "," : "");
2722                 i++;
2723         }
2724
2725         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
2726                 pr_info("%sEEDP", i ? "," : "");
2727                 i++;
2728         }
2729
2730         if (ioc->facts.IOCCapabilities &
2731             MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
2732                 pr_info("%sSnapshot Buffer", i ? "," : "");
2733                 i++;
2734         }
2735
2736         if (ioc->facts.IOCCapabilities &
2737             MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
2738                 pr_info("%sDiag Trace Buffer", i ? "," : "");
2739                 i++;
2740         }
2741
2742         if (ioc->facts.IOCCapabilities &
2743             MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
2744                 pr_info("%sDiag Extended Buffer", i ? "," : "");
2745                 i++;
2746         }
2747
2748         if (ioc->facts.IOCCapabilities &
2749             MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
2750                 pr_info("%sTask Set Full", i ? "," : "");
2751                 i++;
2752         }
2753
2754         iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2755         if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
2756                 pr_info("%sNCQ", i ? "," : "");
2757                 i++;
2758         }
2759
2760         pr_info(")\n");
2761 }
2762
2763 /**
2764  * mpt3sas_base_update_missing_delay - change the missing delay timers
2765  * @ioc: per adapter object
2766  * @device_missing_delay: amount of time till device is reported missing
2767  * @io_missing_delay: interval IO is returned when there is a missing device
2768  *
2769  * Return nothing.
2770  *
2771  * Passed on the command line, this function will modify the device missing
2772  * delay, as well as the io missing delay. This should be called at driver
2773  * load time.
2774  */
2775 void
2776 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
2777         u16 device_missing_delay, u8 io_missing_delay)
2778 {
2779         u16 dmd, dmd_new, dmd_orignal;
2780         u8 io_missing_delay_original;
2781         u16 sz;
2782         Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
2783         Mpi2ConfigReply_t mpi_reply;
2784         u8 num_phys = 0;
2785         u16 ioc_status;
2786
2787         mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
2788         if (!num_phys)
2789                 return;
2790
2791         sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
2792             sizeof(Mpi2SasIOUnit1PhyData_t));
2793         sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
2794         if (!sas_iounit_pg1) {
2795                 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2796                     ioc->name, __FILE__, __LINE__, __func__);
2797                 goto out;
2798         }
2799         if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
2800             sas_iounit_pg1, sz))) {
2801                 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2802                     ioc->name, __FILE__, __LINE__, __func__);
2803                 goto out;
2804         }
2805         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2806             MPI2_IOCSTATUS_MASK;
2807         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2808                 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2809                     ioc->name, __FILE__, __LINE__, __func__);
2810                 goto out;
2811         }
2812
2813         /* device missing delay */
2814         dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
2815         if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2816                 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2817         else
2818                 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2819         dmd_orignal = dmd;
2820         if (device_missing_delay > 0x7F) {
2821                 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
2822                     device_missing_delay;
2823                 dmd = dmd / 16;
2824                 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
2825         } else
2826                 dmd = device_missing_delay;
2827         sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
2828
2829         /* io missing delay */
2830         io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
2831         sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
2832
2833         if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
2834             sz)) {
2835                 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2836                         dmd_new = (dmd &
2837                             MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2838                 else
2839                         dmd_new =
2840                     dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2841                 pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
2842                         ioc->name, dmd_orignal, dmd_new);
2843                 pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
2844                         ioc->name, io_missing_delay_original,
2845                     io_missing_delay);
2846                 ioc->device_missing_delay = dmd_new;
2847                 ioc->io_missing_delay = io_missing_delay;
2848         }
2849
2850 out:
2851         kfree(sas_iounit_pg1);
2852 }
2853 /**
2854  * _base_static_config_pages - static start of day config pages
2855  * @ioc: per adapter object
2856  *
2857  * Return nothing.
2858  */
2859 static void
2860 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
2861 {
2862         Mpi2ConfigReply_t mpi_reply;
2863         u32 iounit_pg1_flags;
2864
2865         mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
2866         if (ioc->ir_firmware)
2867                 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
2868                     &ioc->manu_pg10);
2869
2870         /*
2871          * Ensure correct T10 PI operation if vendor left EEDPTagMode
2872          * flag unset in NVDATA.
2873          */
2874         mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
2875         if (ioc->manu_pg11.EEDPTagMode == 0) {
2876                 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
2877                     ioc->name);
2878                 ioc->manu_pg11.EEDPTagMode &= ~0x3;
2879                 ioc->manu_pg11.EEDPTagMode |= 0x1;
2880                 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
2881                     &ioc->manu_pg11);
2882         }
2883
2884         mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
2885         mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
2886         mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
2887         mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
2888         mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2889         mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
2890         _base_display_ioc_capabilities(ioc);
2891
2892         /*
2893          * Enable task_set_full handling in iounit_pg1 when the
2894          * facts capabilities indicate that its supported.
2895          */
2896         iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2897         if ((ioc->facts.IOCCapabilities &
2898             MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
2899                 iounit_pg1_flags &=
2900                     ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2901         else
2902                 iounit_pg1_flags |=
2903                     MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2904         ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
2905         mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2906
2907         if (ioc->iounit_pg8.NumSensors)
2908                 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
2909 }
2910
2911 /**
2912  * _base_release_memory_pools - release memory
2913  * @ioc: per adapter object
2914  *
2915  * Free memory allocated from _base_allocate_memory_pools.
2916  *
2917  * Return nothing.
2918  */
2919 static void
2920 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
2921 {
2922         int i = 0;
2923         struct reply_post_struct *rps;
2924
2925         dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2926             __func__));
2927
2928         if (ioc->request) {
2929                 pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
2930                     ioc->request,  ioc->request_dma);
2931                 dexitprintk(ioc, pr_info(MPT3SAS_FMT
2932                         "request_pool(0x%p): free\n",
2933                         ioc->name, ioc->request));
2934                 ioc->request = NULL;
2935         }
2936
2937         if (ioc->sense) {
2938                 pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
2939                 if (ioc->sense_dma_pool)
2940                         pci_pool_destroy(ioc->sense_dma_pool);
2941                 dexitprintk(ioc, pr_info(MPT3SAS_FMT
2942                         "sense_pool(0x%p): free\n",
2943                         ioc->name, ioc->sense));
2944                 ioc->sense = NULL;
2945         }
2946
2947         if (ioc->reply) {
2948                 pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
2949                 if (ioc->reply_dma_pool)
2950                         pci_pool_destroy(ioc->reply_dma_pool);
2951                 dexitprintk(ioc, pr_info(MPT3SAS_FMT
2952                         "reply_pool(0x%p): free\n",
2953                         ioc->name, ioc->reply));
2954                 ioc->reply = NULL;
2955         }
2956
2957         if (ioc->reply_free) {
2958                 pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
2959                     ioc->reply_free_dma);
2960                 if (ioc->reply_free_dma_pool)
2961                         pci_pool_destroy(ioc->reply_free_dma_pool);
2962                 dexitprintk(ioc, pr_info(MPT3SAS_FMT
2963                         "reply_free_pool(0x%p): free\n",
2964                         ioc->name, ioc->reply_free));
2965                 ioc->reply_free = NULL;
2966         }
2967
2968         if (ioc->reply_post) {
2969                 do {
2970                         rps = &ioc->reply_post[i];
2971                         if (rps->reply_post_free) {
2972                                 pci_pool_free(
2973                                     ioc->reply_post_free_dma_pool,
2974                                     rps->reply_post_free,
2975                                     rps->reply_post_free_dma);
2976                                 dexitprintk(ioc, pr_info(MPT3SAS_FMT
2977                                     "reply_post_free_pool(0x%p): free\n",
2978                                     ioc->name, rps->reply_post_free));
2979                                 rps->reply_post_free = NULL;
2980                         }
2981                 } while (ioc->rdpq_array_enable &&
2982                            (++i < ioc->reply_queue_count));
2983
2984                 if (ioc->reply_post_free_dma_pool)
2985                         pci_pool_destroy(ioc->reply_post_free_dma_pool);
2986                 kfree(ioc->reply_post);
2987         }
2988
2989         if (ioc->config_page) {
2990                 dexitprintk(ioc, pr_info(MPT3SAS_FMT
2991                     "config_page(0x%p): free\n", ioc->name,
2992                     ioc->config_page));
2993                 pci_free_consistent(ioc->pdev, ioc->config_page_sz,
2994                     ioc->config_page, ioc->config_page_dma);
2995         }
2996
2997         if (ioc->scsi_lookup) {
2998                 free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
2999                 ioc->scsi_lookup = NULL;
3000         }
3001         kfree(ioc->hpr_lookup);
3002         kfree(ioc->internal_lookup);
3003         if (ioc->chain_lookup) {
3004                 for (i = 0; i < ioc->chain_depth; i++) {
3005                         if (ioc->chain_lookup[i].chain_buffer)
3006                                 pci_pool_free(ioc->chain_dma_pool,
3007                                     ioc->chain_lookup[i].chain_buffer,
3008                                     ioc->chain_lookup[i].chain_buffer_dma);
3009                 }
3010                 if (ioc->chain_dma_pool)
3011                         pci_pool_destroy(ioc->chain_dma_pool);
3012                 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
3013                 ioc->chain_lookup = NULL;
3014         }
3015 }
3016
3017 /**
3018  * _base_allocate_memory_pools - allocate start of day memory pools
3019  * @ioc: per adapter object
3020  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3021  *
3022  * Returns 0 success, anything else error
3023  */
3024 static int
3025 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc,  int sleep_flag)
3026 {
3027         struct mpt3sas_facts *facts;
3028         u16 max_sge_elements;
3029         u16 chains_needed_per_io;
3030         u32 sz, total_sz, reply_post_free_sz;
3031         u32 retry_sz;
3032         u16 max_request_credit;
3033         unsigned short sg_tablesize;
3034         u16 sge_size;
3035         int i;
3036
3037         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3038             __func__));
3039
3040
3041         retry_sz = 0;
3042         facts = &ioc->facts;
3043
3044         /* command line tunables for max sgl entries */
3045         if (max_sgl_entries != -1)
3046                 sg_tablesize = max_sgl_entries;
3047         else {
3048                 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
3049                         sg_tablesize = MPT2SAS_SG_DEPTH;
3050                 else
3051                         sg_tablesize = MPT3SAS_SG_DEPTH;
3052         }
3053
3054         if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
3055                 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
3056         else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
3057                 sg_tablesize = min_t(unsigned short, sg_tablesize,
3058                                       SCSI_MAX_SG_CHAIN_SEGMENTS);
3059                 pr_warn(MPT3SAS_FMT
3060                  "sg_tablesize(%u) is bigger than kernel"
3061                  " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name,
3062                  sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
3063         }
3064         ioc->shost->sg_tablesize = sg_tablesize;
3065
3066         ioc->hi_priority_depth = facts->HighPriorityCredit;
3067         ioc->internal_depth = ioc->hi_priority_depth + (5);
3068         /* command line tunables  for max controller queue depth */
3069         if (max_queue_depth != -1 && max_queue_depth != 0) {
3070                 max_request_credit = min_t(u16, max_queue_depth +
3071                     ioc->hi_priority_depth + ioc->internal_depth,
3072                     facts->RequestCredit);
3073                 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
3074                         max_request_credit =  MAX_HBA_QUEUE_DEPTH;
3075         } else
3076                 max_request_credit = min_t(u16, facts->RequestCredit,
3077                     MAX_HBA_QUEUE_DEPTH);
3078
3079         ioc->hba_queue_depth = max_request_credit;
3080
3081         /* request frame size */
3082         ioc->request_sz = facts->IOCRequestFrameSize * 4;
3083
3084         /* reply frame size */
3085         ioc->reply_sz = facts->ReplyFrameSize * 4;
3086
3087         /* calculate the max scatter element size */
3088         sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
3089
3090  retry_allocation:
3091         total_sz = 0;
3092         /* calculate number of sg elements left over in the 1st frame */
3093         max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
3094             sizeof(Mpi2SGEIOUnion_t)) + sge_size);
3095         ioc->max_sges_in_main_message = max_sge_elements/sge_size;
3096
3097         /* now do the same for a chain buffer */
3098         max_sge_elements = ioc->request_sz - sge_size;
3099         ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
3100
3101         /*
3102          *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
3103          */
3104         chains_needed_per_io = ((ioc->shost->sg_tablesize -
3105            ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
3106             + 1;
3107         if (chains_needed_per_io > facts->MaxChainDepth) {
3108                 chains_needed_per_io = facts->MaxChainDepth;
3109                 ioc->shost->sg_tablesize = min_t(u16,
3110                 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
3111                 * chains_needed_per_io), ioc->shost->sg_tablesize);
3112         }
3113         ioc->chains_needed_per_io = chains_needed_per_io;
3114
3115         /* reply free queue sizing - taking into account for 64 FW events */
3116         ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
3117
3118         /* calculate reply descriptor post queue depth */
3119         ioc->reply_post_queue_depth = ioc->hba_queue_depth +
3120                                 ioc->reply_free_queue_depth +  1 ;
3121         /* align the reply post queue on the next 16 count boundary */
3122         if (ioc->reply_post_queue_depth % 16)
3123                 ioc->reply_post_queue_depth += 16 -
3124                 (ioc->reply_post_queue_depth % 16);
3125
3126
3127         if (ioc->reply_post_queue_depth >
3128             facts->MaxReplyDescriptorPostQueueDepth) {
3129                 ioc->reply_post_queue_depth =
3130                                 facts->MaxReplyDescriptorPostQueueDepth -
3131                     (facts->MaxReplyDescriptorPostQueueDepth % 16);
3132                 ioc->hba_queue_depth =
3133                                 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
3134                 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
3135         }
3136
3137         dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
3138             "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
3139             "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
3140             ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
3141             ioc->chains_needed_per_io));
3142
3143         /* reply post queue, 16 byte align */
3144         reply_post_free_sz = ioc->reply_post_queue_depth *
3145             sizeof(Mpi2DefaultReplyDescriptor_t);
3146
3147         sz = reply_post_free_sz;
3148         if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
3149                 sz *= ioc->reply_queue_count;
3150
3151         ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
3152             (ioc->reply_queue_count):1,
3153             sizeof(struct reply_post_struct), GFP_KERNEL);
3154
3155         if (!ioc->reply_post) {
3156                 pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
3157                         ioc->name);
3158                 goto out;
3159         }
3160         ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
3161             ioc->pdev, sz, 16, 0);
3162         if (!ioc->reply_post_free_dma_pool) {
3163                 pr_err(MPT3SAS_FMT
3164                  "reply_post_free pool: pci_pool_create failed\n",
3165                  ioc->name);
3166                 goto out;
3167         }
3168         i = 0;
3169         do {
3170                 ioc->reply_post[i].reply_post_free =
3171                     pci_pool_alloc(ioc->reply_post_free_dma_pool,
3172                     GFP_KERNEL,
3173                     &ioc->reply_post[i].reply_post_free_dma);
3174                 if (!ioc->reply_post[i].reply_post_free) {
3175                         pr_err(MPT3SAS_FMT
3176                         "reply_post_free pool: pci_pool_alloc failed\n",
3177                         ioc->name);
3178                         goto out;
3179                 }
3180                 memset(ioc->reply_post[i].reply_post_free, 0, sz);
3181                 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3182                     "reply post free pool (0x%p): depth(%d),"
3183                     "element_size(%d), pool_size(%d kB)\n", ioc->name,
3184                     ioc->reply_post[i].reply_post_free,
3185                     ioc->reply_post_queue_depth, 8, sz/1024));
3186                 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3187                     "reply_post_free_dma = (0x%llx)\n", ioc->name,
3188                     (unsigned long long)
3189                     ioc->reply_post[i].reply_post_free_dma));
3190                 total_sz += sz;
3191         } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
3192
3193         if (ioc->dma_mask == 64) {
3194                 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
3195                         pr_warn(MPT3SAS_FMT
3196                             "no suitable consistent DMA mask for %s\n",
3197                             ioc->name, pci_name(ioc->pdev));
3198                         goto out;
3199                 }
3200         }
3201
3202         ioc->scsiio_depth = ioc->hba_queue_depth -
3203             ioc->hi_priority_depth - ioc->internal_depth;
3204
3205         /* set the scsi host can_queue depth
3206          * with some internal commands that could be outstanding
3207          */
3208         ioc->shost->can_queue = ioc->scsiio_depth;
3209         dinitprintk(ioc, pr_info(MPT3SAS_FMT
3210                 "scsi host: can_queue depth (%d)\n",
3211                 ioc->name, ioc->shost->can_queue));
3212
3213
3214         /* contiguous pool for request and chains, 16 byte align, one extra "
3215          * "frame for smid=0
3216          */
3217         ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
3218         sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
3219
3220         /* hi-priority queue */
3221         sz += (ioc->hi_priority_depth * ioc->request_sz);
3222
3223         /* internal queue */
3224         sz += (ioc->internal_depth * ioc->request_sz);
3225
3226         ioc->request_dma_sz = sz;
3227         ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
3228         if (!ioc->request) {
3229                 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
3230                     "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
3231                     "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
3232                     ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
3233                 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
3234                         goto out;
3235                 retry_sz += 64;
3236                 ioc->hba_queue_depth = max_request_credit - retry_sz;
3237                 goto retry_allocation;
3238         }
3239
3240         if (retry_sz)
3241                 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
3242                     "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
3243                     "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
3244                     ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
3245
3246         /* hi-priority queue */
3247         ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
3248             ioc->request_sz);
3249         ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
3250             ioc->request_sz);
3251
3252         /* internal queue */
3253         ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
3254             ioc->request_sz);
3255         ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
3256             ioc->request_sz);
3257
3258         dinitprintk(ioc, pr_info(MPT3SAS_FMT
3259                 "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
3260                 ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
3261             (ioc->hba_queue_depth * ioc->request_sz)/1024));
3262
3263         dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
3264             ioc->name, (unsigned long long) ioc->request_dma));
3265         total_sz += sz;
3266
3267         sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
3268         ioc->scsi_lookup_pages = get_order(sz);
3269         ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
3270             GFP_KERNEL, ioc->scsi_lookup_pages);
3271         if (!ioc->scsi_lookup) {
3272                 pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
3273                         ioc->name, (int)sz);
3274                 goto out;
3275         }
3276
3277         dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
3278                 ioc->name, ioc->request, ioc->scsiio_depth));
3279
3280         ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
3281         sz = ioc->chain_depth * sizeof(struct chain_tracker);
3282         ioc->chain_pages = get_order(sz);
3283         ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
3284             GFP_KERNEL, ioc->chain_pages);
3285         if (!ioc->chain_lookup) {
3286                 pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
3287                         ioc->name);
3288                 goto out;
3289         }
3290         ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
3291             ioc->request_sz, 16, 0);
3292         if (!ioc->chain_dma_pool) {
3293                 pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
3294                         ioc->name);
3295                 goto out;
3296         }
3297         for (i = 0; i < ioc->chain_depth; i++) {
3298                 ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
3299                     ioc->chain_dma_pool , GFP_KERNEL,
3300                     &ioc->chain_lookup[i].chain_buffer_dma);
3301                 if (!ioc->chain_lookup[i].chain_buffer) {
3302                         ioc->chain_depth = i;
3303                         goto chain_done;
3304                 }
3305                 total_sz += ioc->request_sz;
3306         }
3307  chain_done:
3308         dinitprintk(ioc, pr_info(MPT3SAS_FMT
3309                 "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
3310                 ioc->name, ioc->chain_depth, ioc->request_sz,
3311                 ((ioc->chain_depth *  ioc->request_sz))/1024));
3312
3313         /* initialize hi-priority queue smid's */
3314         ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
3315             sizeof(struct request_tracker), GFP_KERNEL);
3316         if (!ioc->hpr_lookup) {
3317                 pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
3318                     ioc->name);
3319                 goto out;
3320         }
3321         ioc->hi_priority_smid = ioc->scsiio_depth + 1;
3322         dinitprintk(ioc, pr_info(MPT3SAS_FMT
3323                 "hi_priority(0x%p): depth(%d), start smid(%d)\n",
3324                 ioc->name, ioc->hi_priority,
3325             ioc->hi_priority_depth, ioc->hi_priority_smid));
3326
3327         /* initialize internal queue smid's */
3328         ioc->internal_lookup = kcalloc(ioc->internal_depth,
3329             sizeof(struct request_tracker), GFP_KERNEL);
3330         if (!ioc->internal_lookup) {
3331                 pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
3332                     ioc->name);
3333                 goto out;
3334         }
3335         ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
3336         dinitprintk(ioc, pr_info(MPT3SAS_FMT
3337                 "internal(0x%p): depth(%d), start smid(%d)\n",
3338                 ioc->name, ioc->internal,
3339             ioc->internal_depth, ioc->internal_smid));
3340
3341         /* sense buffers, 4 byte align */
3342         sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
3343         ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
3344             0);
3345         if (!ioc->sense_dma_pool) {
3346                 pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
3347                     ioc->name);
3348                 goto out;
3349         }
3350         ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
3351             &ioc->sense_dma);
3352         if (!ioc->sense) {
3353                 pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
3354                     ioc->name);
3355                 goto out;
3356         }
3357         dinitprintk(ioc, pr_info(MPT3SAS_FMT
3358             "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
3359             "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
3360             SCSI_SENSE_BUFFERSIZE, sz/1024));
3361         dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
3362             ioc->name, (unsigned long long)ioc->sense_dma));
3363         total_sz += sz;
3364
3365         /* reply pool, 4 byte align */
3366         sz = ioc->reply_free_queue_depth * ioc->reply_sz;
3367         ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
3368             0);
3369         if (!ioc->reply_dma_pool) {
3370                 pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
3371                     ioc->name);
3372                 goto out;
3373         }
3374         ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
3375             &ioc->reply_dma);
3376         if (!ioc->reply) {
3377                 pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
3378                     ioc->name);
3379                 goto out;
3380         }
3381         ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
3382         ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
3383         dinitprintk(ioc, pr_info(MPT3SAS_FMT
3384                 "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
3385                 ioc->name, ioc->reply,
3386             ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
3387         dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
3388             ioc->name, (unsigned long long)ioc->reply_dma));
3389         total_sz += sz;
3390
3391         /* reply free queue, 16 byte align */
3392         sz = ioc->reply_free_queue_depth * 4;
3393         ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
3394             ioc->pdev, sz, 16, 0);
3395         if (!ioc->reply_free_dma_pool) {
3396                 pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
3397                         ioc->name);
3398                 goto out;
3399         }
3400         ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
3401             &ioc->reply_free_dma);
3402         if (!ioc->reply_free) {
3403                 pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
3404                         ioc->name);
3405                 goto out;
3406         }
3407         memset(ioc->reply_free, 0, sz);
3408         dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
3409             "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
3410             ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
3411         dinitprintk(ioc, pr_info(MPT3SAS_FMT
3412                 "reply_free_dma (0x%llx)\n",
3413                 ioc->name, (unsigned long long)ioc->reply_free_dma));
3414         total_sz += sz;
3415
3416         ioc->config_page_sz = 512;
3417         ioc->config_page = pci_alloc_consistent(ioc->pdev,
3418             ioc->config_page_sz, &ioc->config_page_dma);
3419         if (!ioc->config_page) {
3420                 pr_err(MPT3SAS_FMT
3421                         "config page: pci_pool_alloc failed\n",
3422                         ioc->name);
3423                 goto out;
3424         }
3425         dinitprintk(ioc, pr_info(MPT3SAS_FMT
3426                 "config page(0x%p): size(%d)\n",
3427                 ioc->name, ioc->config_page, ioc->config_page_sz));
3428         dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
3429                 ioc->name, (unsigned long long)ioc->config_page_dma));
3430         total_sz += ioc->config_page_sz;
3431
3432         pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
3433             ioc->name, total_sz/1024);
3434         pr_info(MPT3SAS_FMT
3435                 "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
3436             ioc->name, ioc->shost->can_queue, facts->RequestCredit);
3437         pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
3438             ioc->name, ioc->shost->sg_tablesize);
3439         return 0;
3440
3441  out:
3442         return -ENOMEM;
3443 }
3444
3445 /**
3446  * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
3447  * @ioc: Pointer to MPT_ADAPTER structure
3448  * @cooked: Request raw or cooked IOC state
3449  *
3450  * Returns all IOC Doorbell register bits if cooked==0, else just the
3451  * Doorbell bits in MPI_IOC_STATE_MASK.
3452  */
3453 u32
3454 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
3455 {
3456         u32 s, sc;
3457
3458         s = readl(&ioc->chip->Doorbell);
3459         sc = s & MPI2_IOC_STATE_MASK;
3460         return cooked ? sc : s;
3461 }
3462
3463 /**
3464  * _base_wait_on_iocstate - waiting on a particular ioc state
3465  * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
3466  * @timeout: timeout in second
3467  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3468  *
3469  * Returns 0 for success, non-zero for failure.
3470  */
3471 static int
3472 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
3473         int sleep_flag)
3474 {
3475         u32 count, cntdn;
3476         u32 current_state;
3477
3478         count = 0;
3479         cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3480         do {
3481                 current_state = mpt3sas_base_get_iocstate(ioc, 1);
3482                 if (current_state == ioc_state)
3483                         return 0;
3484                 if (count && current_state == MPI2_IOC_STATE_FAULT)
3485                         break;
3486                 if (sleep_flag == CAN_SLEEP)
3487                         usleep_range(1000, 1500);
3488                 else
3489                         udelay(500);
3490                 count++;
3491         } while (--cntdn);
3492
3493         return current_state;
3494 }
3495
3496 /**
3497  * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
3498  * a write to the doorbell)
3499  * @ioc: per adapter object
3500  * @timeout: timeout in second
3501  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3502  *
3503  * Returns 0 for success, non-zero for failure.
3504  *
3505  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
3506  */
3507 static int
3508 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
3509
3510 static int
3511 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
3512         int sleep_flag)
3513 {
3514         u32 cntdn, count;
3515         u32 int_status;
3516
3517         count = 0;
3518         cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3519         do {
3520                 int_status = readl(&ioc->chip->HostInterruptStatus);
3521                 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3522                         dhsprintk(ioc, pr_info(MPT3SAS_FMT
3523                                 "%s: successful count(%d), timeout(%d)\n",
3524                                 ioc->name, __func__, count, timeout));
3525                         return 0;
3526                 }
3527                 if (sleep_flag == CAN_SLEEP)
3528                         usleep_range(1000, 1500);
3529                 else
3530                         udelay(500);
3531                 count++;
3532         } while (--cntdn);
3533
3534         pr_err(MPT3SAS_FMT
3535                 "%s: failed due to timeout count(%d), int_status(%x)!\n",
3536                 ioc->name, __func__, count, int_status);
3537         return -EFAULT;
3538 }
3539
3540 /**
3541  * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
3542  * @ioc: per adapter object
3543  * @timeout: timeout in second
3544  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3545  *
3546  * Returns 0 for success, non-zero for failure.
3547  *
3548  * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
3549  * doorbell.
3550  */
3551 static int
3552 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
3553         int sleep_flag)
3554 {
3555         u32 cntdn, count;
3556         u32 int_status;
3557         u32 doorbell;
3558
3559         count = 0;
3560         cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3561         do {
3562                 int_status = readl(&ioc->chip->HostInterruptStatus);
3563                 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
3564                         dhsprintk(ioc, pr_info(MPT3SAS_FMT
3565                                 "%s: successful count(%d), timeout(%d)\n",
3566                                 ioc->name, __func__, count, timeout));
3567                         return 0;
3568                 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3569                         doorbell = readl(&ioc->chip->Doorbell);
3570                         if ((doorbell & MPI2_IOC_STATE_MASK) ==
3571                             MPI2_IOC_STATE_FAULT) {
3572                                 mpt3sas_base_fault_info(ioc , doorbell);
3573                                 return -EFAULT;
3574                         }
3575                 } else if (int_status == 0xFFFFFFFF)
3576                         goto out;
3577
3578                 if (sleep_flag == CAN_SLEEP)
3579                         usleep_range(1000, 1500);
3580                 else
3581                         udelay(500);
3582                 count++;
3583         } while (--cntdn);
3584
3585  out:
3586         pr_err(MPT3SAS_FMT
3587          "%s: failed due to timeout count(%d), int_status(%x)!\n",
3588          ioc->name, __func__, count, int_status);
3589         return -EFAULT;
3590 }
3591
3592 /**
3593  * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
3594  * @ioc: per adapter object
3595  * @timeout: timeout in second
3596  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3597  *
3598  * Returns 0 for success, non-zero for failure.
3599  *
3600  */
3601 static int
3602 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
3603         int sleep_flag)
3604 {
3605         u32 cntdn, count;
3606         u32 doorbell_reg;
3607
3608         count = 0;
3609         cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3610         do {
3611                 doorbell_reg = readl(&ioc->chip->Doorbell);
3612                 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
3613                         dhsprintk(ioc, pr_info(MPT3SAS_FMT
3614                                 "%s: successful count(%d), timeout(%d)\n",
3615                                 ioc->name, __func__, count, timeout));
3616                         return 0;
3617                 }
3618                 if (sleep_flag == CAN_SLEEP)
3619                         usleep_range(1000, 1500);
3620                 else
3621                         udelay(500);
3622                 count++;
3623         } while (--cntdn);
3624
3625         pr_err(MPT3SAS_FMT
3626                 "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
3627                 ioc->name, __func__, count, doorbell_reg);
3628         return -EFAULT;
3629 }
3630
3631 /**
3632  * _base_send_ioc_reset - send doorbell reset
3633  * @ioc: per adapter object
3634  * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
3635  * @timeout: timeout in second
3636  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3637  *
3638  * Returns 0 for success, non-zero for failure.
3639  */
3640 static int
3641 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
3642         int sleep_flag)
3643 {
3644         u32 ioc_state;
3645         int r = 0;
3646
3647         if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
3648                 pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
3649                     ioc->name, __func__);
3650                 return -EFAULT;
3651         }
3652
3653         if (!(ioc->facts.IOCCapabilities &
3654            MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
3655                 return -EFAULT;
3656
3657         pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
3658
3659         writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
3660             &ioc->chip->Doorbell);
3661         if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
3662                 r = -EFAULT;
3663                 goto out;
3664         }
3665         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
3666             timeout, sleep_flag);
3667         if (ioc_state) {
3668                 pr_err(MPT3SAS_FMT
3669                         "%s: failed going to ready state (ioc_state=0x%x)\n",
3670                         ioc->name, __func__, ioc_state);
3671                 r = -EFAULT;
3672                 goto out;
3673         }
3674  out:
3675         pr_info(MPT3SAS_FMT "message unit reset: %s\n",
3676             ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3677         return r;
3678 }
3679
3680 /**
3681  * _base_handshake_req_reply_wait - send request thru doorbell interface
3682  * @ioc: per adapter object
3683  * @request_bytes: request length
3684  * @request: pointer having request payload
3685  * @reply_bytes: reply length
3686  * @reply: pointer to reply payload
3687  * @timeout: timeout in second
3688  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3689  *
3690  * Returns 0 for success, non-zero for failure.
3691  */
3692 static int
3693 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3694         u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
3695 {
3696         MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3697         int i;
3698         u8 failed;
3699         u16 dummy;
3700         __le32 *mfp;
3701
3702         /* make sure doorbell is not in use */
3703         if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
3704                 pr_err(MPT3SAS_FMT
3705                         "doorbell is in use (line=%d)\n",
3706                         ioc->name, __LINE__);
3707                 return -EFAULT;
3708         }
3709
3710         /* clear pending doorbell interrupts from previous state changes */
3711         if (readl(&ioc->chip->HostInterruptStatus) &
3712             MPI2_HIS_IOC2SYS_DB_STATUS)
3713                 writel(0, &ioc->chip->HostInterruptStatus);
3714
3715         /* send message to ioc */
3716         writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
3717             ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3718             &ioc->chip->Doorbell);
3719
3720         if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
3721                 pr_err(MPT3SAS_FMT
3722                         "doorbell handshake int failed (line=%d)\n",
3723                         ioc->name, __LINE__);
3724                 return -EFAULT;
3725         }
3726         writel(0, &ioc->chip->HostInterruptStatus);
3727
3728         if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
3729                 pr_err(MPT3SAS_FMT
3730                         "doorbell handshake ack failed (line=%d)\n",
3731                         ioc->name, __LINE__);
3732                 return -EFAULT;
3733         }
3734
3735         /* send message 32-bits at a time */
3736         for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3737                 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3738                 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
3739                         failed = 1;
3740         }
3741
3742         if (failed) {
3743                 pr_err(MPT3SAS_FMT
3744                         "doorbell handshake sending request failed (line=%d)\n",
3745                         ioc->name, __LINE__);
3746                 return -EFAULT;
3747         }
3748
3749         /* now wait for the reply */
3750         if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
3751                 pr_err(MPT3SAS_FMT
3752                         "doorbell handshake int failed (line=%d)\n",
3753                         ioc->name, __LINE__);
3754                 return -EFAULT;
3755         }
3756
3757         /* read the first two 16-bits, it gives the total length of the reply */
3758         reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3759             & MPI2_DOORBELL_DATA_MASK);
3760         writel(0, &ioc->chip->HostInterruptStatus);
3761         if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3762                 pr_err(MPT3SAS_FMT
3763                         "doorbell handshake int failed (line=%d)\n",
3764                         ioc->name, __LINE__);
3765                 return -EFAULT;
3766         }
3767         reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3768             & MPI2_DOORBELL_DATA_MASK);
3769         writel(0, &ioc->chip->HostInterruptStatus);
3770
3771         for (i = 2; i < default_reply->MsgLength * 2; i++)  {
3772                 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3773                         pr_err(MPT3SAS_FMT
3774                                 "doorbell handshake int failed (line=%d)\n",
3775                                 ioc->name, __LINE__);
3776                         return -EFAULT;
3777                 }
3778                 if (i >=  reply_bytes/2) /* overflow case */
3779                         dummy = readl(&ioc->chip->Doorbell);
3780                 else
3781                         reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3782                             & MPI2_DOORBELL_DATA_MASK);
3783                 writel(0, &ioc->chip->HostInterruptStatus);
3784         }
3785
3786         _base_wait_for_doorbell_int(ioc, 5, sleep_flag);
3787         if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
3788                 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3789                         "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
3790         }
3791         writel(0, &ioc->chip->HostInterruptStatus);
3792
3793         if (ioc->logging_level & MPT_DEBUG_INIT) {
3794                 mfp = (__le32 *)reply;
3795                 pr_info("\toffset:data\n");
3796                 for (i = 0; i < reply_bytes/4; i++)
3797                         pr_info("\t[0x%02x]:%08x\n", i*4,
3798                             le32_to_cpu(mfp[i]));
3799         }
3800         return 0;
3801 }
3802
3803 /**
3804  * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
3805  * @ioc: per adapter object
3806  * @mpi_reply: the reply payload from FW
3807  * @mpi_request: the request payload sent to FW
3808  *
3809  * The SAS IO Unit Control Request message allows the host to perform low-level
3810  * operations, such as resets on the PHYs of the IO Unit, also allows the host
3811  * to obtain the IOC assigned device handles for a device if it has other
3812  * identifying information about the device, in addition allows the host to
3813  * remove IOC resources associated with the device.
3814  *
3815  * Returns 0 for success, non-zero for failure.
3816  */
3817 int
3818 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
3819         Mpi2SasIoUnitControlReply_t *mpi_reply,
3820         Mpi2SasIoUnitControlRequest_t *mpi_request)
3821 {
3822         u16 smid;
3823         u32 ioc_state;
3824         unsigned long timeleft;
3825         bool issue_reset = false;
3826         int rc;
3827         void *request;
3828         u16 wait_state_count;
3829
3830         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3831             __func__));
3832
3833         mutex_lock(&ioc->base_cmds.mutex);
3834
3835         if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
3836                 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
3837                     ioc->name, __func__);
3838                 rc = -EAGAIN;
3839                 goto out;
3840         }
3841
3842         wait_state_count = 0;
3843         ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3844         while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3845                 if (wait_state_count++ == 10) {
3846                         pr_err(MPT3SAS_FMT
3847                             "%s: failed due to ioc not operational\n",
3848                             ioc->name, __func__);
3849                         rc = -EFAULT;
3850                         goto out;
3851                 }
3852                 ssleep(1);
3853                 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3854                 pr_info(MPT3SAS_FMT
3855                         "%s: waiting for operational state(count=%d)\n",
3856                         ioc->name, __func__, wait_state_count);
3857         }
3858
3859         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3860         if (!smid) {
3861                 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3862                     ioc->name, __func__);
3863                 rc = -EAGAIN;
3864                 goto out;
3865         }
3866
3867         rc = 0;
3868         ioc->base_cmds.status = MPT3_CMD_PENDING;
3869         request = mpt3sas_base_get_msg_frame(ioc, smid);
3870         ioc->base_cmds.smid = smid;
3871         memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
3872         if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3873             mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
3874                 ioc->ioc_link_reset_in_progress = 1;
3875         init_completion(&ioc->base_cmds.done);
3876         mpt3sas_base_put_smid_default(ioc, smid);
3877         timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3878             msecs_to_jiffies(10000));
3879         if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3880             mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
3881             ioc->ioc_link_reset_in_progress)
3882                 ioc->ioc_link_reset_in_progress = 0;
3883         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3884                 pr_err(MPT3SAS_FMT "%s: timeout\n",
3885                     ioc->name, __func__);
3886                 _debug_dump_mf(mpi_request,
3887                     sizeof(Mpi2SasIoUnitControlRequest_t)/4);
3888                 if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
3889                         issue_reset = true;
3890                 goto issue_host_reset;
3891         }
3892         if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
3893                 memcpy(mpi_reply, ioc->base_cmds.reply,
3894                     sizeof(Mpi2SasIoUnitControlReply_t));
3895         else
3896                 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
3897         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3898         goto out;
3899
3900  issue_host_reset:
3901         if (issue_reset)
3902                 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3903                     FORCE_BIG_HAMMER);
3904         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3905         rc = -EFAULT;
3906  out:
3907         mutex_unlock(&ioc->base_cmds.mutex);
3908         return rc;
3909 }
3910
3911 /**
3912  * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
3913  * @ioc: per adapter object
3914  * @mpi_reply: the reply payload from FW
3915  * @mpi_request: the request payload sent to FW
3916  *
3917  * The SCSI Enclosure Processor request message causes the IOC to
3918  * communicate with SES devices to control LED status signals.
3919  *
3920  * Returns 0 for success, non-zero for failure.
3921  */
3922 int
3923 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
3924         Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
3925 {
3926         u16 smid;
3927         u32 ioc_state;
3928         unsigned long timeleft;
3929         bool issue_reset = false;
3930         int rc;
3931         void *request;
3932         u16 wait_state_count;
3933
3934         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3935             __func__));
3936
3937         mutex_lock(&ioc->base_cmds.mutex);
3938
3939         if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
3940                 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
3941                     ioc->name, __func__);
3942                 rc = -EAGAIN;
3943                 goto out;
3944         }
3945
3946         wait_state_count = 0;
3947         ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3948         while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3949                 if (wait_state_count++ == 10) {
3950                         pr_err(MPT3SAS_FMT
3951                             "%s: failed due to ioc not operational\n",
3952                             ioc->name, __func__);
3953                         rc = -EFAULT;
3954                         goto out;
3955                 }
3956                 ssleep(1);
3957                 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3958                 pr_info(MPT3SAS_FMT
3959                         "%s: waiting for operational state(count=%d)\n",
3960                         ioc->name,
3961                     __func__, wait_state_count);
3962         }
3963
3964         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3965         if (!smid) {
3966                 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3967                     ioc->name, __func__);
3968                 rc = -EAGAIN;
3969                 goto out;
3970         }
3971
3972         rc = 0;
3973         ioc->base_cmds.status = MPT3_CMD_PENDING;
3974         request = mpt3sas_base_get_msg_frame(ioc, smid);
3975         ioc->base_cmds.smid = smid;
3976         memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
3977         init_completion(&ioc->base_cmds.done);
3978         mpt3sas_base_put_smid_default(ioc, smid);
3979         timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3980             msecs_to_jiffies(10000));
3981         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3982                 pr_err(MPT3SAS_FMT "%s: timeout\n",
3983                     ioc->name, __func__);
3984                 _debug_dump_mf(mpi_request,
3985                     sizeof(Mpi2SepRequest_t)/4);
3986                 if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
3987                         issue_reset = false;
3988                 goto issue_host_reset;
3989         }
3990         if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
3991                 memcpy(mpi_reply, ioc->base_cmds.reply,
3992                     sizeof(Mpi2SepReply_t));
3993         else
3994                 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
3995         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3996         goto out;
3997
3998  issue_host_reset:
3999         if (issue_reset)
4000                 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
4001                     FORCE_BIG_HAMMER);
4002         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4003         rc = -EFAULT;
4004  out:
4005         mutex_unlock(&ioc->base_cmds.mutex);
4006         return rc;
4007 }
4008
4009 /**
4010  * _base_get_port_facts - obtain port facts reply and save in ioc
4011  * @ioc: per adapter object
4012  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4013  *
4014  * Returns 0 for success, non-zero for failure.
4015  */
4016 static int
4017 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
4018 {
4019         Mpi2PortFactsRequest_t mpi_request;
4020         Mpi2PortFactsReply_t mpi_reply;
4021         struct mpt3sas_port_facts *pfacts;
4022         int mpi_reply_sz, mpi_request_sz, r;
4023
4024         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4025             __func__));
4026
4027         mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
4028         mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
4029         memset(&mpi_request, 0, mpi_request_sz);
4030         mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
4031         mpi_request.PortNumber = port;
4032         r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
4033             (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
4034
4035         if (r != 0) {
4036                 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4037                     ioc->name, __func__, r);
4038                 return r;
4039         }
4040
4041         pfacts = &ioc->pfacts[port];
4042         memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
4043         pfacts->PortNumber = mpi_reply.PortNumber;
4044         pfacts->VP_ID = mpi_reply.VP_ID;
4045         pfacts->VF_ID = mpi_reply.VF_ID;
4046         pfacts->MaxPostedCmdBuffers =
4047             le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
4048
4049         return 0;
4050 }
4051
4052 /**
4053  * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
4054  * @ioc: per adapter object
4055  * @timeout:
4056  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4057  *
4058  * Returns 0 for success, non-zero for failure.
4059  */
4060 static int
4061 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
4062         int sleep_flag)
4063 {
4064         u32 ioc_state;
4065         int rc;
4066
4067         dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
4068             __func__));
4069
4070         if (ioc->pci_error_recovery) {
4071                 dfailprintk(ioc, printk(MPT3SAS_FMT
4072                     "%s: host in pci error recovery\n", ioc->name, __func__));
4073                 return -EFAULT;
4074         }
4075
4076         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4077         dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4078             ioc->name, __func__, ioc_state));
4079
4080         if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
4081             (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4082                 return 0;
4083
4084         if (ioc_state & MPI2_DOORBELL_USED) {
4085                 dhsprintk(ioc, printk(MPT3SAS_FMT
4086                     "unexpected doorbell active!\n", ioc->name));
4087                 goto issue_diag_reset;
4088         }
4089
4090         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4091                 mpt3sas_base_fault_info(ioc, ioc_state &
4092                     MPI2_DOORBELL_DATA_MASK);
4093                 goto issue_diag_reset;
4094         }
4095
4096         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
4097             timeout, sleep_flag);
4098         if (ioc_state) {
4099                 dfailprintk(ioc, printk(MPT3SAS_FMT
4100                     "%s: failed going to ready state (ioc_state=0x%x)\n",
4101                     ioc->name, __func__, ioc_state));
4102                 return -EFAULT;
4103         }
4104
4105  issue_diag_reset:
4106         rc = _base_diag_reset(ioc, sleep_flag);
4107         return rc;
4108 }
4109
4110 /**
4111  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
4112  * @ioc: per adapter object
4113  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4114  *
4115  * Returns 0 for success, non-zero for failure.
4116  */
4117 static int
4118 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4119 {
4120         Mpi2IOCFactsRequest_t mpi_request;
4121         Mpi2IOCFactsReply_t mpi_reply;
4122         struct mpt3sas_facts *facts;
4123         int mpi_reply_sz, mpi_request_sz, r;
4124
4125         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4126             __func__));
4127
4128         r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
4129         if (r) {
4130                 dfailprintk(ioc, printk(MPT3SAS_FMT
4131                     "%s: failed getting to correct state\n",
4132                     ioc->name, __func__));
4133                 return r;
4134         }
4135         mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
4136         mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
4137         memset(&mpi_request, 0, mpi_request_sz);
4138         mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
4139         r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
4140             (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
4141
4142         if (r != 0) {
4143                 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4144                     ioc->name, __func__, r);
4145                 return r;
4146         }
4147
4148         facts = &ioc->facts;
4149         memset(facts, 0, sizeof(struct mpt3sas_facts));
4150         facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
4151         facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
4152         facts->VP_ID = mpi_reply.VP_ID;
4153         facts->VF_ID = mpi_reply.VF_ID;
4154         facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
4155         facts->MaxChainDepth = mpi_reply.MaxChainDepth;
4156         facts->WhoInit = mpi_reply.WhoInit;
4157         facts->NumberOfPorts = mpi_reply.NumberOfPorts;
4158         facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
4159         facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
4160         facts->MaxReplyDescriptorPostQueueDepth =
4161             le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
4162         facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
4163         facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
4164         if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
4165                 ioc->ir_firmware = 1;
4166         if ((facts->IOCCapabilities &
4167               MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
4168                 ioc->rdpq_array_capable = 1;
4169         facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
4170         facts->IOCRequestFrameSize =
4171             le16_to_cpu(mpi_reply.IOCRequestFrameSize);
4172         facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
4173         facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
4174         ioc->shost->max_id = -1;
4175         facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
4176         facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
4177         facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
4178         facts->HighPriorityCredit =
4179             le16_to_cpu(mpi_reply.HighPriorityCredit);
4180         facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
4181         facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
4182
4183         dinitprintk(ioc, pr_info(MPT3SAS_FMT
4184                 "hba queue depth(%d), max chains per io(%d)\n",
4185                 ioc->name, facts->RequestCredit,
4186             facts->MaxChainDepth));
4187         dinitprintk(ioc, pr_info(MPT3SAS_FMT
4188                 "request frame size(%d), reply frame size(%d)\n", ioc->name,
4189             facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
4190         return 0;
4191 }
4192
4193 /**
4194  * _base_send_ioc_init - send ioc_init to firmware
4195  * @ioc: per adapter object
4196  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4197  *
4198  * Returns 0 for success, non-zero for failure.
4199  */
4200 static int
4201 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4202 {
4203         Mpi2IOCInitRequest_t mpi_request;
4204         Mpi2IOCInitReply_t mpi_reply;
4205         int i, r = 0;
4206         struct timeval current_time;
4207         u16 ioc_status;
4208         u32 reply_post_free_array_sz = 0;
4209         Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
4210         dma_addr_t reply_post_free_array_dma;
4211
4212         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4213             __func__));
4214
4215         memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
4216         mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
4217         mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
4218         mpi_request.VF_ID = 0; /* TODO */
4219         mpi_request.VP_ID = 0;
4220         mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
4221         mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
4222
4223         if (_base_is_controller_msix_enabled(ioc))
4224                 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
4225         mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
4226         mpi_request.ReplyDescriptorPostQueueDepth =
4227             cpu_to_le16(ioc->reply_post_queue_depth);
4228         mpi_request.ReplyFreeQueueDepth =
4229             cpu_to_le16(ioc->reply_free_queue_depth);
4230
4231         mpi_request.SenseBufferAddressHigh =
4232             cpu_to_le32((u64)ioc->sense_dma >> 32);
4233         mpi_request.SystemReplyAddressHigh =
4234             cpu_to_le32((u64)ioc->reply_dma >> 32);
4235         mpi_request.SystemRequestFrameBaseAddress =
4236             cpu_to_le64((u64)ioc->request_dma);
4237         mpi_request.ReplyFreeQueueAddress =
4238             cpu_to_le64((u64)ioc->reply_free_dma);
4239
4240         if (ioc->rdpq_array_enable) {
4241                 reply_post_free_array_sz = ioc->reply_queue_count *
4242                     sizeof(Mpi2IOCInitRDPQArrayEntry);
4243                 reply_post_free_array = pci_alloc_consistent(ioc->pdev,
4244                         reply_post_free_array_sz, &reply_post_free_array_dma);
4245                 if (!reply_post_free_array) {
4246                         pr_err(MPT3SAS_FMT
4247                         "reply_post_free_array: pci_alloc_consistent failed\n",
4248                         ioc->name);
4249                         r = -ENOMEM;
4250                         goto out;
4251                 }
4252                 memset(reply_post_free_array, 0, reply_post_free_array_sz);
4253                 for (i = 0; i < ioc->reply_queue_count; i++)
4254                         reply_post_free_array[i].RDPQBaseAddress =
4255                             cpu_to_le64(
4256                                 (u64)ioc->reply_post[i].reply_post_free_dma);
4257                 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
4258                 mpi_request.ReplyDescriptorPostQueueAddress =
4259                     cpu_to_le64((u64)reply_post_free_array_dma);
4260         } else {
4261                 mpi_request.ReplyDescriptorPostQueueAddress =
4262                     cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
4263         }
4264
4265         /* This time stamp specifies number of milliseconds
4266          * since epoch ~ midnight January 1, 1970.
4267          */
4268         do_gettimeofday(&current_time);
4269         mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
4270             (current_time.tv_usec / 1000));
4271
4272         if (ioc->logging_level & MPT_DEBUG_INIT) {
4273                 __le32 *mfp;
4274                 int i;
4275
4276                 mfp = (__le32 *)&mpi_request;
4277                 pr_info("\toffset:data\n");
4278                 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
4279                         pr_info("\t[0x%02x]:%08x\n", i*4,
4280                             le32_to_cpu(mfp[i]));
4281         }
4282
4283         r = _base_handshake_req_reply_wait(ioc,
4284             sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
4285             sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
4286             sleep_flag);
4287
4288         if (r != 0) {
4289                 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4290                     ioc->name, __func__, r);
4291                 goto out;
4292         }
4293
4294         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
4295         if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
4296             mpi_reply.IOCLogInfo) {
4297                 pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
4298                 r = -EIO;
4299         }
4300
4301 out:
4302         if (reply_post_free_array)
4303                 pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
4304                                     reply_post_free_array,
4305                                     reply_post_free_array_dma);
4306         return r;
4307 }
4308
4309 /**
4310  * mpt3sas_port_enable_done - command completion routine for port enable
4311  * @ioc: per adapter object
4312  * @smid: system request message index
4313  * @msix_index: MSIX table index supplied by the OS
4314  * @reply: reply message frame(lower 32bit addr)
4315  *
4316  * Return 1 meaning mf should be freed from _base_interrupt
4317  *        0 means the mf is freed from this function.
4318  */
4319 u8
4320 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4321         u32 reply)
4322 {
4323         MPI2DefaultReply_t *mpi_reply;
4324         u16 ioc_status;
4325
4326         if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
4327                 return 1;
4328
4329         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
4330         if (!mpi_reply)
4331                 return 1;
4332
4333         if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
4334                 return 1;
4335
4336         ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
4337         ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
4338         ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
4339         memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
4340         ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
4341         if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
4342                 ioc->port_enable_failed = 1;
4343
4344         if (ioc->is_driver_loading) {
4345                 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4346                         mpt3sas_port_enable_complete(ioc);
4347                         return 1;
4348                 } else {
4349                         ioc->start_scan_failed = ioc_status;
4350                         ioc->start_scan = 0;
4351                         return 1;
4352                 }
4353         }
4354         complete(&ioc->port_enable_cmds.done);
4355         return 1;
4356 }
4357
4358 /**
4359  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
4360  * @ioc: per adapter object
4361  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4362  *
4363  * Returns 0 for success, non-zero for failure.
4364  */
4365 static int
4366 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4367 {
4368         Mpi2PortEnableRequest_t *mpi_request;
4369         Mpi2PortEnableReply_t *mpi_reply;
4370         unsigned long timeleft;
4371         int r = 0;
4372         u16 smid;
4373         u16 ioc_status;
4374
4375         pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
4376
4377         if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4378                 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4379                     ioc->name, __func__);
4380                 return -EAGAIN;
4381         }
4382
4383         smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
4384         if (!smid) {
4385                 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4386                     ioc->name, __func__);
4387                 return -EAGAIN;
4388         }
4389
4390         ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
4391         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4392         ioc->port_enable_cmds.smid = smid;
4393         memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
4394         mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4395
4396         init_completion(&ioc->port_enable_cmds.done);
4397         mpt3sas_base_put_smid_default(ioc, smid);
4398         timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
4399             300*HZ);
4400         if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
4401                 pr_err(MPT3SAS_FMT "%s: timeout\n",
4402                     ioc->name, __func__);
4403                 _debug_dump_mf(mpi_request,
4404                     sizeof(Mpi2PortEnableRequest_t)/4);
4405                 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
4406                         r = -EFAULT;
4407                 else
4408                         r = -ETIME;
4409                 goto out;
4410         }
4411
4412         mpi_reply = ioc->port_enable_cmds.reply;
4413         ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
4414         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4415                 pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
4416                     ioc->name, __func__, ioc_status);
4417                 r = -EFAULT;
4418                 goto out;
4419         }
4420
4421  out:
4422         ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
4423         pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
4424             "SUCCESS" : "FAILED"));
4425         return r;
4426 }
4427
4428 /**
4429  * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
4430  * @ioc: per adapter object
4431  *
4432  * Returns 0 for success, non-zero for failure.
4433  */
4434 int
4435 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
4436 {
4437         Mpi2PortEnableRequest_t *mpi_request;
4438         u16 smid;
4439
4440         pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
4441
4442         if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4443                 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4444                     ioc->name, __func__);
4445                 return -EAGAIN;
4446         }
4447
4448         smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
4449         if (!smid) {
4450                 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4451                     ioc->name, __func__);
4452                 return -EAGAIN;
4453         }
4454
4455         ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
4456         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4457         ioc->port_enable_cmds.smid = smid;
4458         memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
4459         mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4460
4461         mpt3sas_base_put_smid_default(ioc, smid);
4462         return 0;
4463 }
4464
4465 /**
4466  * _base_determine_wait_on_discovery - desposition
4467  * @ioc: per adapter object
4468  *
4469  * Decide whether to wait on discovery to complete. Used to either
4470  * locate boot device, or report volumes ahead of physical devices.
4471  *
4472  * Returns 1 for wait, 0 for don't wait
4473  */
4474 static int
4475 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
4476 {
4477         /* We wait for discovery to complete if IR firmware is loaded.
4478          * The sas topology events arrive before PD events, so we need time to
4479          * turn on the bit in ioc->pd_handles to indicate PD
4480          * Also, it maybe required to report Volumes ahead of physical
4481          * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
4482          */
4483         if (ioc->ir_firmware)
4484                 return 1;
4485
4486         /* if no Bios, then we don't need to wait */
4487         if (!ioc->bios_pg3.BiosVersion)
4488                 return 0;
4489
4490         /* Bios is present, then we drop down here.
4491          *
4492          * If there any entries in the Bios Page 2, then we wait
4493          * for discovery to complete.
4494          */
4495
4496         /* Current Boot Device */
4497         if ((ioc->bios_pg2.CurrentBootDeviceForm &
4498             MPI2_BIOSPAGE2_FORM_MASK) ==
4499             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
4500         /* Request Boot Device */
4501            (ioc->bios_pg2.ReqBootDeviceForm &
4502             MPI2_BIOSPAGE2_FORM_MASK) ==
4503             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
4504         /* Alternate Request Boot Device */
4505            (ioc->bios_pg2.ReqAltBootDeviceForm &
4506             MPI2_BIOSPAGE2_FORM_MASK) ==
4507             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
4508                 return 0;
4509
4510         return 1;
4511 }
4512
4513 /**
4514  * _base_unmask_events - turn on notification for this event
4515  * @ioc: per adapter object
4516  * @event: firmware event
4517  *
4518  * The mask is stored in ioc->event_masks.
4519  */
4520 static void
4521 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
4522 {
4523         u32 desired_event;
4524
4525         if (event >= 128)
4526                 return;
4527
4528         desired_event = (1 << (event % 32));
4529
4530         if (event < 32)
4531                 ioc->event_masks[0] &= ~desired_event;
4532         else if (event < 64)
4533                 ioc->event_masks[1] &= ~desired_event;
4534         else if (event < 96)
4535                 ioc->event_masks[2] &= ~desired_event;
4536         else if (event < 128)
4537                 ioc->event_masks[3] &= ~desired_event;
4538 }
4539
4540 /**
4541  * _base_event_notification - send event notification
4542  * @ioc: per adapter object
4543  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4544  *
4545  * Returns 0 for success, non-zero for failure.
4546  */
4547 static int
4548 _base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4549 {
4550         Mpi2EventNotificationRequest_t *mpi_request;
4551         unsigned long timeleft;
4552         u16 smid;
4553         int r = 0;
4554         int i;
4555
4556         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4557             __func__));
4558
4559         if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4560                 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4561                     ioc->name, __func__);
4562                 return -EAGAIN;
4563         }
4564
4565         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4566         if (!smid) {
4567                 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4568                     ioc->name, __func__);
4569                 return -EAGAIN;
4570         }
4571         ioc->base_cmds.status = MPT3_CMD_PENDING;
4572         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4573         ioc->base_cmds.smid = smid;
4574         memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
4575         mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
4576         mpi_request->VF_ID = 0; /* TODO */
4577         mpi_request->VP_ID = 0;
4578         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4579                 mpi_request->EventMasks[i] =
4580                     cpu_to_le32(ioc->event_masks[i]);
4581         init_completion(&ioc->base_cmds.done);
4582         mpt3sas_base_put_smid_default(ioc, smid);
4583         timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
4584         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4585                 pr_err(MPT3SAS_FMT "%s: timeout\n",
4586                     ioc->name, __func__);
4587                 _debug_dump_mf(mpi_request,
4588                     sizeof(Mpi2EventNotificationRequest_t)/4);
4589                 if (ioc->base_cmds.status & MPT3_CMD_RESET)
4590                         r = -EFAULT;
4591                 else
4592                         r = -ETIME;
4593         } else
4594                 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
4595                     ioc->name, __func__));
4596         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4597         return r;
4598 }
4599
4600 /**
4601  * mpt3sas_base_validate_event_type - validating event types
4602  * @ioc: per adapter object
4603  * @event: firmware event
4604  *
4605  * This will turn on firmware event notification when application
4606  * ask for that event. We don't mask events that are already enabled.
4607  */
4608 void
4609 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
4610 {
4611         int i, j;
4612         u32 event_mask, desired_event;
4613         u8 send_update_to_fw;
4614
4615         for (i = 0, send_update_to_fw = 0; i <
4616             MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
4617                 event_mask = ~event_type[i];
4618                 desired_event = 1;
4619                 for (j = 0; j < 32; j++) {
4620                         if (!(event_mask & desired_event) &&
4621                             (ioc->event_masks[i] & desired_event)) {
4622                                 ioc->event_masks[i] &= ~desired_event;
4623                                 send_update_to_fw = 1;
4624                         }
4625                         desired_event = (desired_event << 1);
4626                 }
4627         }
4628
4629         if (!send_update_to_fw)
4630                 return;
4631
4632         mutex_lock(&ioc->base_cmds.mutex);
4633         _base_event_notification(ioc, CAN_SLEEP);
4634         mutex_unlock(&ioc->base_cmds.mutex);
4635 }
4636
4637 /**
4638  * _base_diag_reset - the "big hammer" start of day reset
4639  * @ioc: per adapter object
4640  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4641  *
4642  * Returns 0 for success, non-zero for failure.
4643  */
4644 static int
4645 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4646 {
4647         u32 host_diagnostic;
4648         u32 ioc_state;
4649         u32 count;
4650         u32 hcb_size;
4651
4652         pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
4653
4654         drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
4655             ioc->name));
4656
4657         count = 0;
4658         do {
4659                 /* Write magic sequence to WriteSequence register
4660                  * Loop until in diagnostic mode
4661                  */
4662                 drsprintk(ioc, pr_info(MPT3SAS_FMT
4663                         "write magic sequence\n", ioc->name));
4664                 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4665                 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
4666                 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
4667                 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
4668                 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
4669                 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
4670                 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
4671
4672                 /* wait 100 msec */
4673                 if (sleep_flag == CAN_SLEEP)
4674                         msleep(100);
4675                 else
4676                         mdelay(100);
4677
4678                 if (count++ > 20)
4679                         goto out;
4680
4681                 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4682                 drsprintk(ioc, pr_info(MPT3SAS_FMT
4683                         "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
4684                     ioc->name, count, host_diagnostic));
4685
4686         } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
4687
4688         hcb_size = readl(&ioc->chip->HCBSize);
4689
4690         drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
4691             ioc->name));
4692         writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
4693              &ioc->chip->HostDiagnostic);
4694
4695         /*This delay allows the chip PCIe hardware time to finish reset tasks*/
4696         if (sleep_flag == CAN_SLEEP)
4697                 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4698         else
4699                 mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4700
4701         /* Approximately 300 second max wait */
4702         for (count = 0; count < (300000000 /
4703                 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
4704
4705                 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4706
4707                 if (host_diagnostic == 0xFFFFFFFF)
4708                         goto out;
4709                 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
4710                         break;
4711
4712                 /* Wait to pass the second read delay window */
4713                 if (sleep_flag == CAN_SLEEP)
4714                         msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4715                                                                 / 1000);
4716                 else
4717                         mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4718                                                                 / 1000);
4719         }
4720
4721         if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
4722
4723                 drsprintk(ioc, pr_info(MPT3SAS_FMT
4724                 "restart the adapter assuming the HCB Address points to good F/W\n",
4725                     ioc->name));
4726                 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
4727                 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
4728                 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
4729
4730                 drsprintk(ioc, pr_info(MPT3SAS_FMT
4731                     "re-enable the HCDW\n", ioc->name));
4732                 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
4733                     &ioc->chip->HCBSize);
4734         }
4735
4736         drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
4737             ioc->name));
4738         writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
4739             &ioc->chip->HostDiagnostic);
4740
4741         drsprintk(ioc, pr_info(MPT3SAS_FMT
4742                 "disable writes to the diagnostic register\n", ioc->name));
4743         writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4744
4745         drsprintk(ioc, pr_info(MPT3SAS_FMT
4746                 "Wait for FW to go to the READY state\n", ioc->name));
4747         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
4748             sleep_flag);
4749         if (ioc_state) {
4750                 pr_err(MPT3SAS_FMT
4751                         "%s: failed going to ready state (ioc_state=0x%x)\n",
4752                         ioc->name, __func__, ioc_state);
4753                 goto out;
4754         }
4755
4756         pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
4757         return 0;
4758
4759  out:
4760         pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
4761         return -EFAULT;
4762 }
4763
4764 /**
4765  * _base_make_ioc_ready - put controller in READY state
4766  * @ioc: per adapter object
4767  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4768  * @type: FORCE_BIG_HAMMER or SOFT_RESET
4769  *
4770  * Returns 0 for success, non-zero for failure.
4771  */
4772 static int
4773 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
4774         enum reset_type type)
4775 {
4776         u32 ioc_state;
4777         int rc;
4778         int count;
4779
4780         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4781             __func__));
4782
4783         if (ioc->pci_error_recovery)
4784                 return 0;
4785
4786         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4787         dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4788             ioc->name, __func__, ioc_state));
4789
4790         /* if in RESET state, it should move to READY state shortly */
4791         count = 0;
4792         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
4793                 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
4794                     MPI2_IOC_STATE_READY) {
4795                         if (count++ == 10) {
4796                                 pr_err(MPT3SAS_FMT
4797                                         "%s: failed going to ready state (ioc_state=0x%x)\n",
4798                                     ioc->name, __func__, ioc_state);
4799                                 return -EFAULT;
4800                         }
4801                         if (sleep_flag == CAN_SLEEP)
4802                                 ssleep(1);
4803                         else
4804                                 mdelay(1000);
4805                         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4806                 }
4807         }
4808
4809         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
4810                 return 0;
4811
4812         if (ioc_state & MPI2_DOORBELL_USED) {
4813                 dhsprintk(ioc, pr_info(MPT3SAS_FMT
4814                         "unexpected doorbell active!\n",
4815                         ioc->name));
4816                 goto issue_diag_reset;
4817         }
4818
4819         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4820                 mpt3sas_base_fault_info(ioc, ioc_state &
4821                     MPI2_DOORBELL_DATA_MASK);
4822                 goto issue_diag_reset;
4823         }
4824
4825         if (type == FORCE_BIG_HAMMER)
4826                 goto issue_diag_reset;
4827
4828         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4829                 if (!(_base_send_ioc_reset(ioc,
4830                     MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
4831                         return 0;
4832         }
4833
4834  issue_diag_reset:
4835         rc = _base_diag_reset(ioc, CAN_SLEEP);
4836         return rc;
4837 }
4838
4839 /**
4840  * _base_make_ioc_operational - put controller in OPERATIONAL state
4841  * @ioc: per adapter object
4842  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4843  *
4844  * Returns 0 for success, non-zero for failure.
4845  */
4846 static int
4847 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4848 {
4849         int r, i;
4850         unsigned long   flags;
4851         u32 reply_address;
4852         u16 smid;
4853         struct _tr_list *delayed_tr, *delayed_tr_next;
4854         u8 hide_flag;
4855         struct adapter_reply_queue *reply_q;
4856         long reply_post_free;
4857         u32 reply_post_free_sz, index = 0;
4858
4859         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4860             __func__));
4861
4862         /* clean the delayed target reset list */
4863         list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4864             &ioc->delayed_tr_list, list) {
4865                 list_del(&delayed_tr->list);
4866                 kfree(delayed_tr);
4867         }
4868
4869
4870         list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4871             &ioc->delayed_tr_volume_list, list) {
4872                 list_del(&delayed_tr->list);
4873                 kfree(delayed_tr);
4874         }
4875
4876         /* initialize the scsi lookup free list */
4877         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4878         INIT_LIST_HEAD(&ioc->free_list);
4879         smid = 1;
4880         for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
4881                 INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
4882                 ioc->scsi_lookup[i].cb_idx = 0xFF;
4883                 ioc->scsi_lookup[i].smid = smid;
4884                 ioc->scsi_lookup[i].scmd = NULL;
4885                 ioc->scsi_lookup[i].direct_io = 0;
4886                 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
4887                     &ioc->free_list);
4888         }
4889
4890         /* hi-priority queue */
4891         INIT_LIST_HEAD(&ioc->hpr_free_list);
4892         smid = ioc->hi_priority_smid;
4893         for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
4894                 ioc->hpr_lookup[i].cb_idx = 0xFF;
4895                 ioc->hpr_lookup[i].smid = smid;
4896                 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
4897                     &ioc->hpr_free_list);
4898         }
4899
4900         /* internal queue */
4901         INIT_LIST_HEAD(&ioc->internal_free_list);
4902         smid = ioc->internal_smid;
4903         for (i = 0; i < ioc->internal_depth; i++, smid++) {
4904                 ioc->internal_lookup[i].cb_idx = 0xFF;
4905                 ioc->internal_lookup[i].smid = smid;
4906                 list_add_tail(&ioc->internal_lookup[i].tracker_list,
4907                     &ioc->internal_free_list);
4908         }
4909
4910         /* chain pool */
4911         INIT_LIST_HEAD(&ioc->free_chain_list);
4912         for (i = 0; i < ioc->chain_depth; i++)
4913                 list_add_tail(&ioc->chain_lookup[i].tracker_list,
4914                     &ioc->free_chain_list);
4915
4916         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4917
4918         /* initialize Reply Free Queue */
4919         for (i = 0, reply_address = (u32)ioc->reply_dma ;
4920             i < ioc->reply_free_queue_depth ; i++, reply_address +=
4921             ioc->reply_sz)
4922                 ioc->reply_free[i] = cpu_to_le32(reply_address);
4923
4924         /* initialize reply queues */
4925         if (ioc->is_driver_loading)
4926                 _base_assign_reply_queues(ioc);
4927
4928         /* initialize Reply Post Free Queue */
4929         reply_post_free_sz = ioc->reply_post_queue_depth *
4930             sizeof(Mpi2DefaultReplyDescriptor_t);
4931         reply_post_free = (long)ioc->reply_post[index].reply_post_free;
4932         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4933                 reply_q->reply_post_host_index = 0;
4934                 reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
4935                     reply_post_free;
4936                 for (i = 0; i < ioc->reply_post_queue_depth; i++)
4937                         reply_q->reply_post_free[i].Words =
4938                             cpu_to_le64(ULLONG_MAX);
4939                 if (!_base_is_controller_msix_enabled(ioc))
4940                         goto skip_init_reply_post_free_queue;
4941                 /*
4942                  * If RDPQ is enabled, switch to the next allocation.
4943                  * Otherwise advance within the contiguous region.
4944                  */
4945                 if (ioc->rdpq_array_enable)
4946                         reply_post_free = (long)
4947                             ioc->reply_post[++index].reply_post_free;
4948                 else
4949                         reply_post_free += reply_post_free_sz;
4950         }
4951  skip_init_reply_post_free_queue:
4952
4953         r = _base_send_ioc_init(ioc, sleep_flag);
4954         if (r)
4955                 return r;
4956
4957         /* initialize reply free host index */
4958         ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
4959         writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
4960
4961         /* initialize reply post host index */
4962         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4963                 if (ioc->msix96_vector)
4964                         writel((reply_q->msix_index & 7)<<
4965                            MPI2_RPHI_MSIX_INDEX_SHIFT,
4966                            ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
4967                 else
4968                         writel(reply_q->msix_index <<
4969                                 MPI2_RPHI_MSIX_INDEX_SHIFT,
4970                                 &ioc->chip->ReplyPostHostIndex);
4971
4972                 if (!_base_is_controller_msix_enabled(ioc))
4973                         goto skip_init_reply_post_host_index;
4974         }
4975
4976  skip_init_reply_post_host_index:
4977
4978         _base_unmask_interrupts(ioc);
4979         r = _base_event_notification(ioc, sleep_flag);
4980         if (r)
4981                 return r;
4982
4983         if (sleep_flag == CAN_SLEEP)
4984                 _base_static_config_pages(ioc);
4985
4986
4987         if (ioc->is_driver_loading) {
4988
4989                 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
4990                     == 0x80) {
4991                         hide_flag = (u8) (
4992                             le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
4993                             MFG_PAGE10_HIDE_SSDS_MASK);
4994                         if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
4995                                 ioc->mfg_pg10_hide_flag = hide_flag;
4996                 }
4997
4998                 ioc->wait_for_discovery_to_complete =
4999                     _base_determine_wait_on_discovery(ioc);
5000
5001                 return r; /* scan_start and scan_finished support */
5002         }
5003
5004         r = _base_send_port_enable(ioc, sleep_flag);
5005         if (r)
5006                 return r;
5007
5008         return r;
5009 }
5010
5011 /**
5012  * mpt3sas_base_free_resources - free resources controller resources
5013  * @ioc: per adapter object
5014  *
5015  * Return nothing.
5016  */
5017 void
5018 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
5019 {
5020         dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5021             __func__));
5022
5023         if (ioc->chip_phys && ioc->chip) {
5024                 _base_mask_interrupts(ioc);
5025                 ioc->shost_recovery = 1;
5026                 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
5027                 ioc->shost_recovery = 0;
5028         }
5029
5030         mpt3sas_base_unmap_resources(ioc);
5031         return;
5032 }
5033
5034 /**
5035  * mpt3sas_base_attach - attach controller instance
5036  * @ioc: per adapter object
5037  *
5038  * Returns 0 for success, non-zero for failure.
5039  */
5040 int
5041 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5042 {
5043         int r, i;
5044         int cpu_id, last_cpu_id = 0;
5045
5046         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5047             __func__));
5048
5049         /* setup cpu_msix_table */
5050         ioc->cpu_count = num_online_cpus();
5051         for_each_online_cpu(cpu_id)
5052                 last_cpu_id = cpu_id;
5053         ioc->cpu_msix_table_sz = last_cpu_id + 1;
5054         ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
5055         ioc->reply_queue_count = 1;
5056         if (!ioc->cpu_msix_table) {
5057                 dfailprintk(ioc, pr_info(MPT3SAS_FMT
5058                         "allocation for cpu_msix_table failed!!!\n",
5059                         ioc->name));
5060                 r = -ENOMEM;
5061                 goto out_free_resources;
5062         }
5063
5064         if (ioc->is_warpdrive) {
5065                 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
5066                     sizeof(resource_size_t *), GFP_KERNEL);
5067                 if (!ioc->reply_post_host_index) {
5068                         dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
5069                                 "for cpu_msix_table failed!!!\n", ioc->name));
5070                         r = -ENOMEM;
5071                         goto out_free_resources;
5072                 }
5073         }
5074
5075         ioc->rdpq_array_enable_assigned = 0;
5076         ioc->dma_mask = 0;
5077         r = mpt3sas_base_map_resources(ioc);
5078         if (r)
5079                 goto out_free_resources;
5080
5081         if (ioc->is_warpdrive) {
5082                 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
5083                     &ioc->chip->ReplyPostHostIndex;
5084
5085                 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
5086                         ioc->reply_post_host_index[i] =
5087                         (resource_size_t __iomem *)
5088                         ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
5089                         * 4)));
5090         }
5091
5092         pci_set_drvdata(ioc->pdev, ioc->shost);
5093         r = _base_get_ioc_facts(ioc, CAN_SLEEP);
5094         if (r)
5095                 goto out_free_resources;
5096
5097         switch (ioc->hba_mpi_version_belonged) {
5098         case MPI2_VERSION:
5099                 ioc->build_sg_scmd = &_base_build_sg_scmd;
5100                 ioc->build_sg = &_base_build_sg;
5101                 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
5102                 break;
5103         case MPI25_VERSION:
5104                 /*
5105                  * In SAS3.0,
5106                  * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
5107                  * Target Status - all require the IEEE formated scatter gather
5108                  * elements.
5109                  */
5110                 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
5111                 ioc->build_sg = &_base_build_sg_ieee;
5112                 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
5113                 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
5114                 break;
5115         }
5116
5117         /*
5118          * These function pointers for other requests that don't
5119          * the require IEEE scatter gather elements.
5120          *
5121          * For example Configuration Pages and SAS IOUNIT Control don't.
5122          */
5123         ioc->build_sg_mpi = &_base_build_sg;
5124         ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
5125
5126         r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
5127         if (r)
5128                 goto out_free_resources;
5129
5130         ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
5131             sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
5132         if (!ioc->pfacts) {
5133                 r = -ENOMEM;
5134                 goto out_free_resources;
5135         }
5136
5137         for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
5138                 r = _base_get_port_facts(ioc, i, CAN_SLEEP);
5139                 if (r)
5140                         goto out_free_resources;
5141         }
5142
5143         r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
5144         if (r)
5145                 goto out_free_resources;
5146
5147         init_waitqueue_head(&ioc->reset_wq);
5148
5149         /* allocate memory pd handle bitmask list */
5150         ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
5151         if (ioc->facts.MaxDevHandle % 8)
5152                 ioc->pd_handles_sz++;
5153         ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
5154             GFP_KERNEL);
5155         if (!ioc->pd_handles) {
5156                 r = -ENOMEM;
5157                 goto out_free_resources;
5158         }
5159         ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
5160             GFP_KERNEL);
5161         if (!ioc->blocking_handles) {
5162                 r = -ENOMEM;
5163                 goto out_free_resources;
5164         }
5165
5166         ioc->fwfault_debug = mpt3sas_fwfault_debug;
5167
5168         /* base internal command bits */
5169         mutex_init(&ioc->base_cmds.mutex);
5170         ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5171         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5172
5173         /* port_enable command bits */
5174         ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5175         ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
5176
5177         /* transport internal command bits */
5178         ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5179         ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
5180         mutex_init(&ioc->transport_cmds.mutex);
5181
5182         /* scsih internal command bits */
5183         ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5184         ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
5185         mutex_init(&ioc->scsih_cmds.mutex);
5186
5187         /* task management internal command bits */
5188         ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5189         ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
5190         mutex_init(&ioc->tm_cmds.mutex);
5191
5192         /* config page internal command bits */
5193         ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5194         ioc->config_cmds.status = MPT3_CMD_NOT_USED;
5195         mutex_init(&ioc->config_cmds.mutex);
5196
5197         /* ctl module internal command bits */
5198         ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5199         ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
5200         ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
5201         mutex_init(&ioc->ctl_cmds.mutex);
5202
5203         if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
5204             !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
5205             !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
5206             !ioc->ctl_cmds.sense) {
5207                 r = -ENOMEM;
5208                 goto out_free_resources;
5209         }
5210
5211         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5212                 ioc->event_masks[i] = -1;
5213
5214         /* here we enable the events we care about */
5215         _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
5216         _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
5217         _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
5218         _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5219         _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
5220         _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
5221         _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
5222         _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
5223         _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
5224         _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
5225         _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
5226
5227         r = _base_make_ioc_operational(ioc, CAN_SLEEP);
5228         if (r)
5229                 goto out_free_resources;
5230
5231         ioc->non_operational_loop = 0;
5232         return 0;
5233
5234  out_free_resources:
5235
5236         ioc->remove_host = 1;
5237
5238         mpt3sas_base_free_resources(ioc);
5239         _base_release_memory_pools(ioc);
5240         pci_set_drvdata(ioc->pdev, NULL);
5241         kfree(ioc->cpu_msix_table);
5242         if (ioc->is_warpdrive)
5243                 kfree(ioc->reply_post_host_index);
5244         kfree(ioc->pd_handles);
5245         kfree(ioc->blocking_handles);
5246         kfree(ioc->tm_cmds.reply);
5247         kfree(ioc->transport_cmds.reply);
5248         kfree(ioc->scsih_cmds.reply);
5249         kfree(ioc->config_cmds.reply);
5250         kfree(ioc->base_cmds.reply);
5251         kfree(ioc->port_enable_cmds.reply);
5252         kfree(ioc->ctl_cmds.reply);
5253         kfree(ioc->ctl_cmds.sense);
5254         kfree(ioc->pfacts);
5255         ioc->ctl_cmds.reply = NULL;
5256         ioc->base_cmds.reply = NULL;
5257         ioc->tm_cmds.reply = NULL;
5258         ioc->scsih_cmds.reply = NULL;
5259         ioc->transport_cmds.reply = NULL;
5260         ioc->config_cmds.reply = NULL;
5261         ioc->pfacts = NULL;
5262         return r;
5263 }
5264
5265
5266 /**
5267  * mpt3sas_base_detach - remove controller instance
5268  * @ioc: per adapter object
5269  *
5270  * Return nothing.
5271  */
5272 void
5273 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
5274 {
5275         dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5276             __func__));
5277
5278         mpt3sas_base_stop_watchdog(ioc);
5279         mpt3sas_base_free_resources(ioc);
5280         _base_release_memory_pools(ioc);
5281         pci_set_drvdata(ioc->pdev, NULL);
5282         kfree(ioc->cpu_msix_table);
5283         if (ioc->is_warpdrive)
5284                 kfree(ioc->reply_post_host_index);
5285         kfree(ioc->pd_handles);
5286         kfree(ioc->blocking_handles);
5287         kfree(ioc->pfacts);
5288         kfree(ioc->ctl_cmds.reply);
5289         kfree(ioc->ctl_cmds.sense);
5290         kfree(ioc->base_cmds.reply);
5291         kfree(ioc->port_enable_cmds.reply);
5292         kfree(ioc->tm_cmds.reply);
5293         kfree(ioc->transport_cmds.reply);
5294         kfree(ioc->scsih_cmds.reply);
5295         kfree(ioc->config_cmds.reply);
5296 }
5297
5298 /**
5299  * _base_reset_handler - reset callback handler (for base)
5300  * @ioc: per adapter object
5301  * @reset_phase: phase
5302  *
5303  * The handler for doing any required cleanup or initialization.
5304  *
5305  * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
5306  * MPT3_IOC_DONE_RESET
5307  *
5308  * Return nothing.
5309  */
5310 static void
5311 _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
5312 {
5313         mpt3sas_scsih_reset_handler(ioc, reset_phase);
5314         mpt3sas_ctl_reset_handler(ioc, reset_phase);
5315         switch (reset_phase) {
5316         case MPT3_IOC_PRE_RESET:
5317                 dtmprintk(ioc, pr_info(MPT3SAS_FMT
5318                 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
5319                 break;
5320         case MPT3_IOC_AFTER_RESET:
5321                 dtmprintk(ioc, pr_info(MPT3SAS_FMT
5322                 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
5323                 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
5324                         ioc->transport_cmds.status |= MPT3_CMD_RESET;
5325                         mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
5326                         complete(&ioc->transport_cmds.done);
5327                 }
5328                 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
5329                         ioc->base_cmds.status |= MPT3_CMD_RESET;
5330                         mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
5331                         complete(&ioc->base_cmds.done);
5332                 }
5333                 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5334                         ioc->port_enable_failed = 1;
5335                         ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
5336                         mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
5337                         if (ioc->is_driver_loading) {
5338                                 ioc->start_scan_failed =
5339                                     MPI2_IOCSTATUS_INTERNAL_ERROR;
5340                                 ioc->start_scan = 0;
5341                                 ioc->port_enable_cmds.status =
5342                                     MPT3_CMD_NOT_USED;
5343                         } else
5344                                 complete(&ioc->port_enable_cmds.done);
5345                 }
5346                 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
5347                         ioc->config_cmds.status |= MPT3_CMD_RESET;
5348                         mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
5349                         ioc->config_cmds.smid = USHRT_MAX;
5350                         complete(&ioc->config_cmds.done);
5351                 }
5352                 break;
5353         case MPT3_IOC_DONE_RESET:
5354                 dtmprintk(ioc, pr_info(MPT3SAS_FMT
5355                         "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
5356                 break;
5357         }
5358 }
5359
5360 /**
5361  * _wait_for_commands_to_complete - reset controller
5362  * @ioc: Pointer to MPT_ADAPTER structure
5363  * @sleep_flag: CAN_SLEEP or NO_SLEEP
5364  *
5365  * This function waiting(3s) for all pending commands to complete
5366  * prior to putting controller in reset.
5367  */
5368 static void
5369 _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5370 {
5371         u32 ioc_state;
5372         unsigned long flags;
5373         u16 i;
5374
5375         ioc->pending_io_count = 0;
5376         if (sleep_flag != CAN_SLEEP)
5377                 return;
5378
5379         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5380         if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
5381                 return;
5382
5383         /* pending command count */
5384         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5385         for (i = 0; i < ioc->scsiio_depth; i++)
5386                 if (ioc->scsi_lookup[i].cb_idx != 0xFF)
5387                         ioc->pending_io_count++;
5388         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5389
5390         if (!ioc->pending_io_count)
5391                 return;
5392
5393         /* wait for pending commands to complete */
5394         wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
5395 }
5396
5397 /**
5398  * mpt3sas_base_hard_reset_handler - reset controller
5399  * @ioc: Pointer to MPT_ADAPTER structure
5400  * @sleep_flag: CAN_SLEEP or NO_SLEEP
5401  * @type: FORCE_BIG_HAMMER or SOFT_RESET
5402  *
5403  * Returns 0 for success, non-zero for failure.
5404  */
5405 int
5406 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
5407         enum reset_type type)
5408 {
5409         int r;
5410         unsigned long flags;
5411         u32 ioc_state;
5412         u8 is_fault = 0, is_trigger = 0;
5413
5414         dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
5415             __func__));
5416
5417         if (ioc->pci_error_recovery) {
5418                 pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
5419                     ioc->name, __func__);
5420                 r = 0;
5421                 goto out_unlocked;
5422         }
5423
5424         if (mpt3sas_fwfault_debug)
5425                 mpt3sas_halt_firmware(ioc);
5426
5427         /* TODO - What we really should be doing is pulling
5428          * out all the code associated with NO_SLEEP; its never used.
5429          * That is legacy code from mpt fusion driver, ported over.
5430          * I will leave this BUG_ON here for now till its been resolved.
5431          */
5432         BUG_ON(sleep_flag == NO_SLEEP);
5433
5434         /* wait for an active reset in progress to complete */
5435         if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
5436                 do {
5437                         ssleep(1);
5438                 } while (ioc->shost_recovery == 1);
5439                 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
5440                     __func__));
5441                 return ioc->ioc_reset_in_progress_status;
5442         }
5443
5444         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5445         ioc->shost_recovery = 1;
5446         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5447
5448         if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
5449             MPT3_DIAG_BUFFER_IS_REGISTERED) &&
5450             (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
5451             MPT3_DIAG_BUFFER_IS_RELEASED))) {
5452                 is_trigger = 1;
5453                 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5454                 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
5455                         is_fault = 1;
5456         }
5457         _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
5458         _wait_for_commands_to_complete(ioc, sleep_flag);
5459         _base_mask_interrupts(ioc);
5460         r = _base_make_ioc_ready(ioc, sleep_flag, type);
5461         if (r)
5462                 goto out;
5463         _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
5464
5465         /* If this hard reset is called while port enable is active, then
5466          * there is no reason to call make_ioc_operational
5467          */
5468         if (ioc->is_driver_loading && ioc->port_enable_failed) {
5469                 ioc->remove_host = 1;
5470                 r = -EFAULT;
5471                 goto out;
5472         }
5473         r = _base_get_ioc_facts(ioc, CAN_SLEEP);
5474         if (r)
5475                 goto out;
5476
5477         if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
5478                 panic("%s: Issue occurred with flashing controller firmware."
5479                       "Please reboot the system and ensure that the correct"
5480                       " firmware version is running\n", ioc->name);
5481
5482         r = _base_make_ioc_operational(ioc, sleep_flag);
5483         if (!r)
5484                 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
5485
5486  out:
5487         dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
5488             ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
5489
5490         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5491         ioc->ioc_reset_in_progress_status = r;
5492         ioc->shost_recovery = 0;
5493         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5494         ioc->ioc_reset_count++;
5495         mutex_unlock(&ioc->reset_in_progress_mutex);
5496
5497  out_unlocked:
5498         if ((r == 0) && is_trigger) {
5499                 if (is_fault)
5500                         mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
5501                 else
5502                         mpt3sas_trigger_master(ioc,
5503                             MASTER_TRIGGER_ADAPTER_RESET);
5504         }
5505         dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
5506             __func__));
5507         return r;
5508 }