ipr: Fix locking for unit attention handling
[cascardo/linux.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
198 };
199
200 static int ipr_max_bus_speeds[] = {
201         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
202 };
203
204 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
205 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
206 module_param_named(max_speed, ipr_max_speed, uint, 0);
207 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
208 module_param_named(log_level, ipr_log_level, uint, 0);
209 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
210 module_param_named(testmode, ipr_testmode, int, 0);
211 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
212 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
213 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
214 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
215 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
216 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
217 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
218 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
219 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
220 module_param_named(max_devs, ipr_max_devs, int, 0);
221 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
222                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
223 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
224 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");
225 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
226 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(IPR_DRIVER_VERSION);
229
230 /*  A constant array of IOASCs/URCs/Error Messages */
231 static const
232 struct ipr_error_table_t ipr_error_table[] = {
233         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
234         "8155: An unknown error was received"},
235         {0x00330000, 0, 0,
236         "Soft underlength error"},
237         {0x005A0000, 0, 0,
238         "Command to be cancelled not found"},
239         {0x00808000, 0, 0,
240         "Qualified success"},
241         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
242         "FFFE: Soft device bus error recovered by the IOA"},
243         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
244         "4101: Soft device bus fabric error"},
245         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
246         "FFFC: Logical block guard error recovered by the device"},
247         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
248         "FFFC: Logical block reference tag error recovered by the device"},
249         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
250         "4171: Recovered scatter list tag / sequence number error"},
251         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
252         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
253         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
254         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
255         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
256         "FFFD: Recovered logical block reference tag error detected by the IOA"},
257         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
258         "FFFD: Logical block guard error recovered by the IOA"},
259         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
260         "FFF9: Device sector reassign successful"},
261         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
262         "FFF7: Media error recovered by device rewrite procedures"},
263         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
264         "7001: IOA sector reassignment successful"},
265         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
266         "FFF9: Soft media error. Sector reassignment recommended"},
267         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
268         "FFF7: Media error recovered by IOA rewrite procedures"},
269         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
270         "FF3D: Soft PCI bus error recovered by the IOA"},
271         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
272         "FFF6: Device hardware error recovered by the IOA"},
273         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
274         "FFF6: Device hardware error recovered by the device"},
275         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
276         "FF3D: Soft IOA error recovered by the IOA"},
277         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
278         "FFFA: Undefined device response recovered by the IOA"},
279         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
280         "FFF6: Device bus error, message or command phase"},
281         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
282         "FFFE: Task Management Function failed"},
283         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
284         "FFF6: Failure prediction threshold exceeded"},
285         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
286         "8009: Impending cache battery pack failure"},
287         {0x02040100, 0, 0,
288         "Logical Unit in process of becoming ready"},
289         {0x02040200, 0, 0,
290         "Initializing command required"},
291         {0x02040400, 0, 0,
292         "34FF: Disk device format in progress"},
293         {0x02040C00, 0, 0,
294         "Logical unit not accessible, target port in unavailable state"},
295         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
296         "9070: IOA requested reset"},
297         {0x023F0000, 0, 0,
298         "Synchronization required"},
299         {0x02408500, 0, 0,
300         "IOA microcode download required"},
301         {0x02408600, 0, 0,
302         "Device bus connection is prohibited by host"},
303         {0x024E0000, 0, 0,
304         "No ready, IOA shutdown"},
305         {0x025A0000, 0, 0,
306         "Not ready, IOA has been shutdown"},
307         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
308         "3020: Storage subsystem configuration error"},
309         {0x03110B00, 0, 0,
310         "FFF5: Medium error, data unreadable, recommend reassign"},
311         {0x03110C00, 0, 0,
312         "7000: Medium error, data unreadable, do not reassign"},
313         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
314         "FFF3: Disk media format bad"},
315         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
316         "3002: Addressed device failed to respond to selection"},
317         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
318         "3100: Device bus error"},
319         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
320         "3109: IOA timed out a device command"},
321         {0x04088000, 0, 0,
322         "3120: SCSI bus is not operational"},
323         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
324         "4100: Hard device bus fabric error"},
325         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
326         "310C: Logical block guard error detected by the device"},
327         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
328         "310C: Logical block reference tag error detected by the device"},
329         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
330         "4170: Scatter list tag / sequence number error"},
331         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
332         "8150: Logical block CRC error on IOA to Host transfer"},
333         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
334         "4170: Logical block sequence number error on IOA to Host transfer"},
335         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
336         "310D: Logical block reference tag error detected by the IOA"},
337         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
338         "310D: Logical block guard error detected by the IOA"},
339         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
340         "9000: IOA reserved area data check"},
341         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
342         "9001: IOA reserved area invalid data pattern"},
343         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
344         "9002: IOA reserved area LRC error"},
345         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
346         "Hardware Error, IOA metadata access error"},
347         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
348         "102E: Out of alternate sectors for disk storage"},
349         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
350         "FFF4: Data transfer underlength error"},
351         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
352         "FFF4: Data transfer overlength error"},
353         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
354         "3400: Logical unit failure"},
355         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
356         "FFF4: Device microcode is corrupt"},
357         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
358         "8150: PCI bus error"},
359         {0x04430000, 1, 0,
360         "Unsupported device bus message received"},
361         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
362         "FFF4: Disk device problem"},
363         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
364         "8150: Permanent IOA failure"},
365         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
366         "3010: Disk device returned wrong response to IOA"},
367         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
368         "8151: IOA microcode error"},
369         {0x04448500, 0, 0,
370         "Device bus status error"},
371         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
372         "8157: IOA error requiring IOA reset to recover"},
373         {0x04448700, 0, 0,
374         "ATA device status error"},
375         {0x04490000, 0, 0,
376         "Message reject received from the device"},
377         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
378         "8008: A permanent cache battery pack failure occurred"},
379         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
380         "9090: Disk unit has been modified after the last known status"},
381         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
382         "9081: IOA detected device error"},
383         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
384         "9082: IOA detected device error"},
385         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
386         "3110: Device bus error, message or command phase"},
387         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
388         "3110: SAS Command / Task Management Function failed"},
389         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
390         "9091: Incorrect hardware configuration change has been detected"},
391         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
392         "9073: Invalid multi-adapter configuration"},
393         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
394         "4010: Incorrect connection between cascaded expanders"},
395         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
396         "4020: Connections exceed IOA design limits"},
397         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
398         "4030: Incorrect multipath connection"},
399         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
400         "4110: Unsupported enclosure function"},
401         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
402         "4120: SAS cable VPD cannot be read"},
403         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
404         "FFF4: Command to logical unit failed"},
405         {0x05240000, 1, 0,
406         "Illegal request, invalid request type or request packet"},
407         {0x05250000, 0, 0,
408         "Illegal request, invalid resource handle"},
409         {0x05258000, 0, 0,
410         "Illegal request, commands not allowed to this device"},
411         {0x05258100, 0, 0,
412         "Illegal request, command not allowed to a secondary adapter"},
413         {0x05258200, 0, 0,
414         "Illegal request, command not allowed to a non-optimized resource"},
415         {0x05260000, 0, 0,
416         "Illegal request, invalid field in parameter list"},
417         {0x05260100, 0, 0,
418         "Illegal request, parameter not supported"},
419         {0x05260200, 0, 0,
420         "Illegal request, parameter value invalid"},
421         {0x052C0000, 0, 0,
422         "Illegal request, command sequence error"},
423         {0x052C8000, 1, 0,
424         "Illegal request, dual adapter support not enabled"},
425         {0x052C8100, 1, 0,
426         "Illegal request, another cable connector was physically disabled"},
427         {0x054E8000, 1, 0,
428         "Illegal request, inconsistent group id/group count"},
429         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
430         "9031: Array protection temporarily suspended, protection resuming"},
431         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
432         "9040: Array protection temporarily suspended, protection resuming"},
433         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
434         "4080: IOA exceeded maximum operating temperature"},
435         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
436         "4085: Service required"},
437         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
438         "3140: Device bus not ready to ready transition"},
439         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
440         "FFFB: SCSI bus was reset"},
441         {0x06290500, 0, 0,
442         "FFFE: SCSI bus transition to single ended"},
443         {0x06290600, 0, 0,
444         "FFFE: SCSI bus transition to LVD"},
445         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
446         "FFFB: SCSI bus was reset by another initiator"},
447         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
448         "3029: A device replacement has occurred"},
449         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
450         "4102: Device bus fabric performance degradation"},
451         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
452         "9051: IOA cache data exists for a missing or failed device"},
453         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
454         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
455         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
456         "9025: Disk unit is not supported at its physical location"},
457         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
458         "3020: IOA detected a SCSI bus configuration error"},
459         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
460         "3150: SCSI bus configuration error"},
461         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
462         "9074: Asymmetric advanced function disk configuration"},
463         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
464         "4040: Incomplete multipath connection between IOA and enclosure"},
465         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
466         "4041: Incomplete multipath connection between enclosure and device"},
467         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
468         "9075: Incomplete multipath connection between IOA and remote IOA"},
469         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
470         "9076: Configuration error, missing remote IOA"},
471         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
472         "4050: Enclosure does not support a required multipath function"},
473         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
474         "4121: Configuration error, required cable is missing"},
475         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
476         "4122: Cable is not plugged into the correct location on remote IOA"},
477         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
478         "4123: Configuration error, invalid cable vital product data"},
479         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
480         "4124: Configuration error, both cable ends are plugged into the same IOA"},
481         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
482         "4070: Logically bad block written on device"},
483         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
484         "9041: Array protection temporarily suspended"},
485         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
486         "9042: Corrupt array parity detected on specified device"},
487         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
488         "9030: Array no longer protected due to missing or failed disk unit"},
489         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
490         "9071: Link operational transition"},
491         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
492         "9072: Link not operational transition"},
493         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
494         "9032: Array exposed but still protected"},
495         {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
496         "70DD: Device forced failed by disrupt device command"},
497         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
498         "4061: Multipath redundancy level got better"},
499         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
500         "4060: Multipath redundancy level got worse"},
501         {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
502         "9083: Device raw mode enabled"},
503         {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
504         "9084: Device raw mode disabled"},
505         {0x07270000, 0, 0,
506         "Failure due to other device"},
507         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
508         "9008: IOA does not support functions expected by devices"},
509         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
510         "9010: Cache data associated with attached devices cannot be found"},
511         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
512         "9011: Cache data belongs to devices other than those attached"},
513         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
514         "9020: Array missing 2 or more devices with only 1 device present"},
515         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
516         "9021: Array missing 2 or more devices with 2 or more devices present"},
517         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
518         "9022: Exposed array is missing a required device"},
519         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
520         "9023: Array member(s) not at required physical locations"},
521         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
522         "9024: Array not functional due to present hardware configuration"},
523         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
524         "9026: Array not functional due to present hardware configuration"},
525         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
526         "9027: Array is missing a device and parity is out of sync"},
527         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
528         "9028: Maximum number of arrays already exist"},
529         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
530         "9050: Required cache data cannot be located for a disk unit"},
531         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
532         "9052: Cache data exists for a device that has been modified"},
533         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
534         "9054: IOA resources not available due to previous problems"},
535         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
536         "9092: Disk unit requires initialization before use"},
537         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
538         "9029: Incorrect hardware configuration change has been detected"},
539         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
540         "9060: One or more disk pairs are missing from an array"},
541         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
542         "9061: One or more disks are missing from an array"},
543         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
544         "9062: One or more disks are missing from an array"},
545         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
546         "9063: Maximum number of functional arrays has been exceeded"},
547         {0x07279A00, 0, 0,
548         "Data protect, other volume set problem"},
549         {0x0B260000, 0, 0,
550         "Aborted command, invalid descriptor"},
551         {0x0B3F9000, 0, 0,
552         "Target operating conditions have changed, dual adapter takeover"},
553         {0x0B530200, 0, 0,
554         "Aborted command, medium removal prevented"},
555         {0x0B5A0000, 0, 0,
556         "Command terminated by host"},
557         {0x0B5B8000, 0, 0,
558         "Aborted command, command terminated by host"}
559 };
560
561 static const struct ipr_ses_table_entry ipr_ses_table[] = {
562         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
563         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
564         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
565         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
566         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
567         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
568         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
569         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
570         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
571         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
573         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
574         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
575 };
576
577 /*
578  *  Function Prototypes
579  */
580 static int ipr_reset_alert(struct ipr_cmnd *);
581 static void ipr_process_ccn(struct ipr_cmnd *);
582 static void ipr_process_error(struct ipr_cmnd *);
583 static void ipr_reset_ioa_job(struct ipr_cmnd *);
584 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
585                                    enum ipr_shutdown_type);
586
587 #ifdef CONFIG_SCSI_IPR_TRACE
588 /**
589  * ipr_trc_hook - Add a trace entry to the driver trace
590  * @ipr_cmd:    ipr command struct
591  * @type:               trace type
592  * @add_data:   additional data
593  *
594  * Return value:
595  *      none
596  **/
597 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
598                          u8 type, u32 add_data)
599 {
600         struct ipr_trace_entry *trace_entry;
601         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
602
603         trace_entry = &ioa_cfg->trace[atomic_add_return
604                         (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
605         trace_entry->time = jiffies;
606         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
607         trace_entry->type = type;
608         if (ipr_cmd->ioa_cfg->sis64)
609                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
610         else
611                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
612         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
613         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
614         trace_entry->u.add_data = add_data;
615         wmb();
616 }
617 #else
618 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
619 #endif
620
621 /**
622  * ipr_lock_and_done - Acquire lock and complete command
623  * @ipr_cmd:    ipr command struct
624  *
625  * Return value:
626  *      none
627  **/
628 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
629 {
630         unsigned long lock_flags;
631         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
632
633         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
634         ipr_cmd->done(ipr_cmd);
635         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
636 }
637
638 /**
639  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
640  * @ipr_cmd:    ipr command struct
641  *
642  * Return value:
643  *      none
644  **/
645 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
646 {
647         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
648         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
649         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
650         dma_addr_t dma_addr = ipr_cmd->dma_addr;
651         int hrrq_id;
652
653         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
654         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
655         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
656         ioarcb->data_transfer_length = 0;
657         ioarcb->read_data_transfer_length = 0;
658         ioarcb->ioadl_len = 0;
659         ioarcb->read_ioadl_len = 0;
660
661         if (ipr_cmd->ioa_cfg->sis64) {
662                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
663                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
664                 ioasa64->u.gata.status = 0;
665         } else {
666                 ioarcb->write_ioadl_addr =
667                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
668                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
669                 ioasa->u.gata.status = 0;
670         }
671
672         ioasa->hdr.ioasc = 0;
673         ioasa->hdr.residual_data_len = 0;
674         ipr_cmd->scsi_cmd = NULL;
675         ipr_cmd->qc = NULL;
676         ipr_cmd->sense_buffer[0] = 0;
677         ipr_cmd->dma_use_sg = 0;
678 }
679
680 /**
681  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
682  * @ipr_cmd:    ipr command struct
683  *
684  * Return value:
685  *      none
686  **/
687 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
688                               void (*fast_done) (struct ipr_cmnd *))
689 {
690         ipr_reinit_ipr_cmnd(ipr_cmd);
691         ipr_cmd->u.scratch = 0;
692         ipr_cmd->sibling = NULL;
693         ipr_cmd->eh_comp = NULL;
694         ipr_cmd->fast_done = fast_done;
695         init_timer(&ipr_cmd->timer);
696 }
697
698 /**
699  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
700  * @ioa_cfg:    ioa config struct
701  *
702  * Return value:
703  *      pointer to ipr command struct
704  **/
705 static
706 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
707 {
708         struct ipr_cmnd *ipr_cmd = NULL;
709
710         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
711                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
712                         struct ipr_cmnd, queue);
713                 list_del(&ipr_cmd->queue);
714         }
715
716
717         return ipr_cmd;
718 }
719
720 /**
721  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
722  * @ioa_cfg:    ioa config struct
723  *
724  * Return value:
725  *      pointer to ipr command struct
726  **/
727 static
728 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
729 {
730         struct ipr_cmnd *ipr_cmd =
731                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
732         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
733         return ipr_cmd;
734 }
735
736 /**
737  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
738  * @ioa_cfg:    ioa config struct
739  * @clr_ints:     interrupts to clear
740  *
741  * This function masks all interrupts on the adapter, then clears the
742  * interrupts specified in the mask
743  *
744  * Return value:
745  *      none
746  **/
747 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
748                                           u32 clr_ints)
749 {
750         volatile u32 int_reg;
751         int i;
752
753         /* Stop new interrupts */
754         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
755                 spin_lock(&ioa_cfg->hrrq[i]._lock);
756                 ioa_cfg->hrrq[i].allow_interrupts = 0;
757                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
758         }
759         wmb();
760
761         /* Set interrupt mask to stop all new interrupts */
762         if (ioa_cfg->sis64)
763                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
764         else
765                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
766
767         /* Clear any pending interrupts */
768         if (ioa_cfg->sis64)
769                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
770         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
771         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
772 }
773
774 /**
775  * ipr_save_pcix_cmd_reg - Save PCI-X command register
776  * @ioa_cfg:    ioa config struct
777  *
778  * Return value:
779  *      0 on success / -EIO on failure
780  **/
781 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
782 {
783         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
784
785         if (pcix_cmd_reg == 0)
786                 return 0;
787
788         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
789                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
790                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
791                 return -EIO;
792         }
793
794         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
795         return 0;
796 }
797
798 /**
799  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
800  * @ioa_cfg:    ioa config struct
801  *
802  * Return value:
803  *      0 on success / -EIO on failure
804  **/
805 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
806 {
807         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
808
809         if (pcix_cmd_reg) {
810                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
811                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
812                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
813                         return -EIO;
814                 }
815         }
816
817         return 0;
818 }
819
820 /**
821  * ipr_sata_eh_done - done function for aborted SATA commands
822  * @ipr_cmd:    ipr command struct
823  *
824  * This function is invoked for ops generated to SATA
825  * devices which are being aborted.
826  *
827  * Return value:
828  *      none
829  **/
830 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
831 {
832         struct ata_queued_cmd *qc = ipr_cmd->qc;
833         struct ipr_sata_port *sata_port = qc->ap->private_data;
834
835         qc->err_mask |= AC_ERR_OTHER;
836         sata_port->ioasa.status |= ATA_BUSY;
837         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
838         ata_qc_complete(qc);
839 }
840
841 /**
842  * ipr_scsi_eh_done - mid-layer done function for aborted ops
843  * @ipr_cmd:    ipr command struct
844  *
845  * This function is invoked by the interrupt handler for
846  * ops generated by the SCSI mid-layer which are being aborted.
847  *
848  * Return value:
849  *      none
850  **/
851 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
852 {
853         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
854
855         scsi_cmd->result |= (DID_ERROR << 16);
856
857         scsi_dma_unmap(ipr_cmd->scsi_cmd);
858         scsi_cmd->scsi_done(scsi_cmd);
859         if (ipr_cmd->eh_comp)
860                 complete(ipr_cmd->eh_comp);
861         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
862 }
863
864 /**
865  * ipr_fail_all_ops - Fails all outstanding ops.
866  * @ioa_cfg:    ioa config struct
867  *
868  * This function fails all outstanding ops.
869  *
870  * Return value:
871  *      none
872  **/
873 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
874 {
875         struct ipr_cmnd *ipr_cmd, *temp;
876         struct ipr_hrr_queue *hrrq;
877
878         ENTER;
879         for_each_hrrq(hrrq, ioa_cfg) {
880                 spin_lock(&hrrq->_lock);
881                 list_for_each_entry_safe(ipr_cmd,
882                                         temp, &hrrq->hrrq_pending_q, queue) {
883                         list_del(&ipr_cmd->queue);
884
885                         ipr_cmd->s.ioasa.hdr.ioasc =
886                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
887                         ipr_cmd->s.ioasa.hdr.ilid =
888                                 cpu_to_be32(IPR_DRIVER_ILID);
889
890                         if (ipr_cmd->scsi_cmd)
891                                 ipr_cmd->done = ipr_scsi_eh_done;
892                         else if (ipr_cmd->qc)
893                                 ipr_cmd->done = ipr_sata_eh_done;
894
895                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
896                                      IPR_IOASC_IOA_WAS_RESET);
897                         del_timer(&ipr_cmd->timer);
898                         ipr_cmd->done(ipr_cmd);
899                 }
900                 spin_unlock(&hrrq->_lock);
901         }
902         LEAVE;
903 }
904
905 /**
906  * ipr_send_command -  Send driver initiated requests.
907  * @ipr_cmd:            ipr command struct
908  *
909  * This function sends a command to the adapter using the correct write call.
910  * In the case of sis64, calculate the ioarcb size required. Then or in the
911  * appropriate bits.
912  *
913  * Return value:
914  *      none
915  **/
916 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
917 {
918         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
919         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
920
921         if (ioa_cfg->sis64) {
922                 /* The default size is 256 bytes */
923                 send_dma_addr |= 0x1;
924
925                 /* If the number of ioadls * size of ioadl > 128 bytes,
926                    then use a 512 byte ioarcb */
927                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
928                         send_dma_addr |= 0x4;
929                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
930         } else
931                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
932 }
933
934 /**
935  * ipr_do_req -  Send driver initiated requests.
936  * @ipr_cmd:            ipr command struct
937  * @done:                       done function
938  * @timeout_func:       timeout function
939  * @timeout:            timeout value
940  *
941  * This function sends the specified command to the adapter with the
942  * timeout given. The done function is invoked on command completion.
943  *
944  * Return value:
945  *      none
946  **/
947 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
948                        void (*done) (struct ipr_cmnd *),
949                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
950 {
951         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
952
953         ipr_cmd->done = done;
954
955         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
956         ipr_cmd->timer.expires = jiffies + timeout;
957         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
958
959         add_timer(&ipr_cmd->timer);
960
961         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
962
963         ipr_send_command(ipr_cmd);
964 }
965
966 /**
967  * ipr_internal_cmd_done - Op done function for an internally generated op.
968  * @ipr_cmd:    ipr command struct
969  *
970  * This function is the op done function for an internally generated,
971  * blocking op. It simply wakes the sleeping thread.
972  *
973  * Return value:
974  *      none
975  **/
976 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
977 {
978         if (ipr_cmd->sibling)
979                 ipr_cmd->sibling = NULL;
980         else
981                 complete(&ipr_cmd->completion);
982 }
983
984 /**
985  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
986  * @ipr_cmd:    ipr command struct
987  * @dma_addr:   dma address
988  * @len:        transfer length
989  * @flags:      ioadl flag value
990  *
991  * This function initializes an ioadl in the case where there is only a single
992  * descriptor.
993  *
994  * Return value:
995  *      nothing
996  **/
997 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
998                            u32 len, int flags)
999 {
1000         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1001         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1002
1003         ipr_cmd->dma_use_sg = 1;
1004
1005         if (ipr_cmd->ioa_cfg->sis64) {
1006                 ioadl64->flags = cpu_to_be32(flags);
1007                 ioadl64->data_len = cpu_to_be32(len);
1008                 ioadl64->address = cpu_to_be64(dma_addr);
1009
1010                 ipr_cmd->ioarcb.ioadl_len =
1011                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1012                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1013         } else {
1014                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1015                 ioadl->address = cpu_to_be32(dma_addr);
1016
1017                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1018                         ipr_cmd->ioarcb.read_ioadl_len =
1019                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1020                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1021                 } else {
1022                         ipr_cmd->ioarcb.ioadl_len =
1023                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1024                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1025                 }
1026         }
1027 }
1028
1029 /**
1030  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1031  * @ipr_cmd:    ipr command struct
1032  * @timeout_func:       function to invoke if command times out
1033  * @timeout:    timeout
1034  *
1035  * Return value:
1036  *      none
1037  **/
1038 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1039                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1040                                   u32 timeout)
1041 {
1042         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1043
1044         init_completion(&ipr_cmd->completion);
1045         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1046
1047         spin_unlock_irq(ioa_cfg->host->host_lock);
1048         wait_for_completion(&ipr_cmd->completion);
1049         spin_lock_irq(ioa_cfg->host->host_lock);
1050 }
1051
1052 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1053 {
1054         if (ioa_cfg->hrrq_num == 1)
1055                 return 0;
1056         else
1057                 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1058 }
1059
1060 /**
1061  * ipr_send_hcam - Send an HCAM to the adapter.
1062  * @ioa_cfg:    ioa config struct
1063  * @type:               HCAM type
1064  * @hostrcb:    hostrcb struct
1065  *
1066  * This function will send a Host Controlled Async command to the adapter.
1067  * If HCAMs are currently not allowed to be issued to the adapter, it will
1068  * place the hostrcb on the free queue.
1069  *
1070  * Return value:
1071  *      none
1072  **/
1073 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1074                           struct ipr_hostrcb *hostrcb)
1075 {
1076         struct ipr_cmnd *ipr_cmd;
1077         struct ipr_ioarcb *ioarcb;
1078
1079         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1080                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1081                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1082                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1083
1084                 ipr_cmd->u.hostrcb = hostrcb;
1085                 ioarcb = &ipr_cmd->ioarcb;
1086
1087                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1088                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1089                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1090                 ioarcb->cmd_pkt.cdb[1] = type;
1091                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1092                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1093
1094                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1095                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1096
1097                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1098                         ipr_cmd->done = ipr_process_ccn;
1099                 else
1100                         ipr_cmd->done = ipr_process_error;
1101
1102                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1103
1104                 ipr_send_command(ipr_cmd);
1105         } else {
1106                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1107         }
1108 }
1109
1110 /**
1111  * ipr_update_ata_class - Update the ata class in the resource entry
1112  * @res:        resource entry struct
1113  * @proto:      cfgte device bus protocol value
1114  *
1115  * Return value:
1116  *      none
1117  **/
1118 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1119 {
1120         switch (proto) {
1121         case IPR_PROTO_SATA:
1122         case IPR_PROTO_SAS_STP:
1123                 res->ata_class = ATA_DEV_ATA;
1124                 break;
1125         case IPR_PROTO_SATA_ATAPI:
1126         case IPR_PROTO_SAS_STP_ATAPI:
1127                 res->ata_class = ATA_DEV_ATAPI;
1128                 break;
1129         default:
1130                 res->ata_class = ATA_DEV_UNKNOWN;
1131                 break;
1132         };
1133 }
1134
1135 /**
1136  * ipr_init_res_entry - Initialize a resource entry struct.
1137  * @res:        resource entry struct
1138  * @cfgtew:     config table entry wrapper struct
1139  *
1140  * Return value:
1141  *      none
1142  **/
1143 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1144                                struct ipr_config_table_entry_wrapper *cfgtew)
1145 {
1146         int found = 0;
1147         unsigned int proto;
1148         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1149         struct ipr_resource_entry *gscsi_res = NULL;
1150
1151         res->needs_sync_complete = 0;
1152         res->in_erp = 0;
1153         res->add_to_ml = 0;
1154         res->del_from_ml = 0;
1155         res->resetting_device = 0;
1156         res->reset_occurred = 0;
1157         res->sdev = NULL;
1158         res->sata_port = NULL;
1159
1160         if (ioa_cfg->sis64) {
1161                 proto = cfgtew->u.cfgte64->proto;
1162                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1163                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1164                 res->type = cfgtew->u.cfgte64->res_type;
1165
1166                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1167                         sizeof(res->res_path));
1168
1169                 res->bus = 0;
1170                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1171                         sizeof(res->dev_lun.scsi_lun));
1172                 res->lun = scsilun_to_int(&res->dev_lun);
1173
1174                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1175                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1176                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1177                                         found = 1;
1178                                         res->target = gscsi_res->target;
1179                                         break;
1180                                 }
1181                         }
1182                         if (!found) {
1183                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1184                                                                   ioa_cfg->max_devs_supported);
1185                                 set_bit(res->target, ioa_cfg->target_ids);
1186                         }
1187                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1188                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1189                         res->target = 0;
1190                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1191                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1192                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1193                                                           ioa_cfg->max_devs_supported);
1194                         set_bit(res->target, ioa_cfg->array_ids);
1195                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1196                         res->bus = IPR_VSET_VIRTUAL_BUS;
1197                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1198                                                           ioa_cfg->max_devs_supported);
1199                         set_bit(res->target, ioa_cfg->vset_ids);
1200                 } else {
1201                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1202                                                           ioa_cfg->max_devs_supported);
1203                         set_bit(res->target, ioa_cfg->target_ids);
1204                 }
1205         } else {
1206                 proto = cfgtew->u.cfgte->proto;
1207                 res->qmodel = IPR_QUEUEING_MODEL(res);
1208                 res->flags = cfgtew->u.cfgte->flags;
1209                 if (res->flags & IPR_IS_IOA_RESOURCE)
1210                         res->type = IPR_RES_TYPE_IOAFP;
1211                 else
1212                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1213
1214                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1215                 res->target = cfgtew->u.cfgte->res_addr.target;
1216                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1217                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1218         }
1219
1220         ipr_update_ata_class(res, proto);
1221 }
1222
1223 /**
1224  * ipr_is_same_device - Determine if two devices are the same.
1225  * @res:        resource entry struct
1226  * @cfgtew:     config table entry wrapper struct
1227  *
1228  * Return value:
1229  *      1 if the devices are the same / 0 otherwise
1230  **/
1231 static int ipr_is_same_device(struct ipr_resource_entry *res,
1232                               struct ipr_config_table_entry_wrapper *cfgtew)
1233 {
1234         if (res->ioa_cfg->sis64) {
1235                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1236                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1237                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1238                                         sizeof(cfgtew->u.cfgte64->lun))) {
1239                         return 1;
1240                 }
1241         } else {
1242                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1243                     res->target == cfgtew->u.cfgte->res_addr.target &&
1244                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1245                         return 1;
1246         }
1247
1248         return 0;
1249 }
1250
1251 /**
1252  * __ipr_format_res_path - Format the resource path for printing.
1253  * @res_path:   resource path
1254  * @buf:        buffer
1255  * @len:        length of buffer provided
1256  *
1257  * Return value:
1258  *      pointer to buffer
1259  **/
1260 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1261 {
1262         int i;
1263         char *p = buffer;
1264
1265         *p = '\0';
1266         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1267         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1268                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1269
1270         return buffer;
1271 }
1272
1273 /**
1274  * ipr_format_res_path - Format the resource path for printing.
1275  * @ioa_cfg:    ioa config struct
1276  * @res_path:   resource path
1277  * @buf:        buffer
1278  * @len:        length of buffer provided
1279  *
1280  * Return value:
1281  *      pointer to buffer
1282  **/
1283 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1284                                  u8 *res_path, char *buffer, int len)
1285 {
1286         char *p = buffer;
1287
1288         *p = '\0';
1289         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1290         __ipr_format_res_path(res_path, p, len - (buffer - p));
1291         return buffer;
1292 }
1293
1294 /**
1295  * ipr_update_res_entry - Update the resource entry.
1296  * @res:        resource entry struct
1297  * @cfgtew:     config table entry wrapper struct
1298  *
1299  * Return value:
1300  *      none
1301  **/
1302 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1303                                  struct ipr_config_table_entry_wrapper *cfgtew)
1304 {
1305         char buffer[IPR_MAX_RES_PATH_LENGTH];
1306         unsigned int proto;
1307         int new_path = 0;
1308
1309         if (res->ioa_cfg->sis64) {
1310                 res->flags = cfgtew->u.cfgte64->flags;
1311                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1312                 res->type = cfgtew->u.cfgte64->res_type;
1313
1314                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1315                         sizeof(struct ipr_std_inq_data));
1316
1317                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1318                 proto = cfgtew->u.cfgte64->proto;
1319                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1320                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1321
1322                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1323                         sizeof(res->dev_lun.scsi_lun));
1324
1325                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1326                                         sizeof(res->res_path))) {
1327                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1328                                 sizeof(res->res_path));
1329                         new_path = 1;
1330                 }
1331
1332                 if (res->sdev && new_path)
1333                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1334                                     ipr_format_res_path(res->ioa_cfg,
1335                                         res->res_path, buffer, sizeof(buffer)));
1336         } else {
1337                 res->flags = cfgtew->u.cfgte->flags;
1338                 if (res->flags & IPR_IS_IOA_RESOURCE)
1339                         res->type = IPR_RES_TYPE_IOAFP;
1340                 else
1341                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1342
1343                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1344                         sizeof(struct ipr_std_inq_data));
1345
1346                 res->qmodel = IPR_QUEUEING_MODEL(res);
1347                 proto = cfgtew->u.cfgte->proto;
1348                 res->res_handle = cfgtew->u.cfgte->res_handle;
1349         }
1350
1351         ipr_update_ata_class(res, proto);
1352 }
1353
1354 /**
1355  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1356  *                        for the resource.
1357  * @res:        resource entry struct
1358  * @cfgtew:     config table entry wrapper struct
1359  *
1360  * Return value:
1361  *      none
1362  **/
1363 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1364 {
1365         struct ipr_resource_entry *gscsi_res = NULL;
1366         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1367
1368         if (!ioa_cfg->sis64)
1369                 return;
1370
1371         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1372                 clear_bit(res->target, ioa_cfg->array_ids);
1373         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1374                 clear_bit(res->target, ioa_cfg->vset_ids);
1375         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1376                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1377                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1378                                 return;
1379                 clear_bit(res->target, ioa_cfg->target_ids);
1380
1381         } else if (res->bus == 0)
1382                 clear_bit(res->target, ioa_cfg->target_ids);
1383 }
1384
1385 /**
1386  * ipr_handle_config_change - Handle a config change from the adapter
1387  * @ioa_cfg:    ioa config struct
1388  * @hostrcb:    hostrcb
1389  *
1390  * Return value:
1391  *      none
1392  **/
1393 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1394                                      struct ipr_hostrcb *hostrcb)
1395 {
1396         struct ipr_resource_entry *res = NULL;
1397         struct ipr_config_table_entry_wrapper cfgtew;
1398         __be32 cc_res_handle;
1399
1400         u32 is_ndn = 1;
1401
1402         if (ioa_cfg->sis64) {
1403                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1404                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1405         } else {
1406                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1407                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1408         }
1409
1410         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1411                 if (res->res_handle == cc_res_handle) {
1412                         is_ndn = 0;
1413                         break;
1414                 }
1415         }
1416
1417         if (is_ndn) {
1418                 if (list_empty(&ioa_cfg->free_res_q)) {
1419                         ipr_send_hcam(ioa_cfg,
1420                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1421                                       hostrcb);
1422                         return;
1423                 }
1424
1425                 res = list_entry(ioa_cfg->free_res_q.next,
1426                                  struct ipr_resource_entry, queue);
1427
1428                 list_del(&res->queue);
1429                 ipr_init_res_entry(res, &cfgtew);
1430                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1431         }
1432
1433         ipr_update_res_entry(res, &cfgtew);
1434
1435         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1436                 if (res->sdev) {
1437                         res->del_from_ml = 1;
1438                         res->res_handle = IPR_INVALID_RES_HANDLE;
1439                         schedule_work(&ioa_cfg->work_q);
1440                 } else {
1441                         ipr_clear_res_target(res);
1442                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1443                 }
1444         } else if (!res->sdev || res->del_from_ml) {
1445                 res->add_to_ml = 1;
1446                 schedule_work(&ioa_cfg->work_q);
1447         }
1448
1449         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1450 }
1451
1452 /**
1453  * ipr_process_ccn - Op done function for a CCN.
1454  * @ipr_cmd:    ipr command struct
1455  *
1456  * This function is the op done function for a configuration
1457  * change notification host controlled async from the adapter.
1458  *
1459  * Return value:
1460  *      none
1461  **/
1462 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1463 {
1464         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1465         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1466         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1467
1468         list_del(&hostrcb->queue);
1469         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1470
1471         if (ioasc) {
1472                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1473                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1474                         dev_err(&ioa_cfg->pdev->dev,
1475                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1476
1477                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1478         } else {
1479                 ipr_handle_config_change(ioa_cfg, hostrcb);
1480         }
1481 }
1482
1483 /**
1484  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1485  * @i:          index into buffer
1486  * @buf:                string to modify
1487  *
1488  * This function will strip all trailing whitespace, pad the end
1489  * of the string with a single space, and NULL terminate the string.
1490  *
1491  * Return value:
1492  *      new length of string
1493  **/
1494 static int strip_and_pad_whitespace(int i, char *buf)
1495 {
1496         while (i && buf[i] == ' ')
1497                 i--;
1498         buf[i+1] = ' ';
1499         buf[i+2] = '\0';
1500         return i + 2;
1501 }
1502
1503 /**
1504  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1505  * @prefix:             string to print at start of printk
1506  * @hostrcb:    hostrcb pointer
1507  * @vpd:                vendor/product id/sn struct
1508  *
1509  * Return value:
1510  *      none
1511  **/
1512 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1513                                 struct ipr_vpd *vpd)
1514 {
1515         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1516         int i = 0;
1517
1518         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1519         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1520
1521         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1522         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1523
1524         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1525         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1526
1527         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1528 }
1529
1530 /**
1531  * ipr_log_vpd - Log the passed VPD to the error log.
1532  * @vpd:                vendor/product id/sn struct
1533  *
1534  * Return value:
1535  *      none
1536  **/
1537 static void ipr_log_vpd(struct ipr_vpd *vpd)
1538 {
1539         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1540                     + IPR_SERIAL_NUM_LEN];
1541
1542         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1543         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1544                IPR_PROD_ID_LEN);
1545         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1546         ipr_err("Vendor/Product ID: %s\n", buffer);
1547
1548         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1549         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1550         ipr_err("    Serial Number: %s\n", buffer);
1551 }
1552
1553 /**
1554  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1555  * @prefix:             string to print at start of printk
1556  * @hostrcb:    hostrcb pointer
1557  * @vpd:                vendor/product id/sn/wwn struct
1558  *
1559  * Return value:
1560  *      none
1561  **/
1562 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1563                                     struct ipr_ext_vpd *vpd)
1564 {
1565         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1566         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1567                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1568 }
1569
1570 /**
1571  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1572  * @vpd:                vendor/product id/sn/wwn struct
1573  *
1574  * Return value:
1575  *      none
1576  **/
1577 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1578 {
1579         ipr_log_vpd(&vpd->vpd);
1580         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1581                 be32_to_cpu(vpd->wwid[1]));
1582 }
1583
1584 /**
1585  * ipr_log_enhanced_cache_error - Log a cache error.
1586  * @ioa_cfg:    ioa config struct
1587  * @hostrcb:    hostrcb struct
1588  *
1589  * Return value:
1590  *      none
1591  **/
1592 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1593                                          struct ipr_hostrcb *hostrcb)
1594 {
1595         struct ipr_hostrcb_type_12_error *error;
1596
1597         if (ioa_cfg->sis64)
1598                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1599         else
1600                 error = &hostrcb->hcam.u.error.u.type_12_error;
1601
1602         ipr_err("-----Current Configuration-----\n");
1603         ipr_err("Cache Directory Card Information:\n");
1604         ipr_log_ext_vpd(&error->ioa_vpd);
1605         ipr_err("Adapter Card Information:\n");
1606         ipr_log_ext_vpd(&error->cfc_vpd);
1607
1608         ipr_err("-----Expected Configuration-----\n");
1609         ipr_err("Cache Directory Card Information:\n");
1610         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1611         ipr_err("Adapter Card Information:\n");
1612         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1613
1614         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1615                      be32_to_cpu(error->ioa_data[0]),
1616                      be32_to_cpu(error->ioa_data[1]),
1617                      be32_to_cpu(error->ioa_data[2]));
1618 }
1619
1620 /**
1621  * ipr_log_cache_error - Log a cache error.
1622  * @ioa_cfg:    ioa config struct
1623  * @hostrcb:    hostrcb struct
1624  *
1625  * Return value:
1626  *      none
1627  **/
1628 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1629                                 struct ipr_hostrcb *hostrcb)
1630 {
1631         struct ipr_hostrcb_type_02_error *error =
1632                 &hostrcb->hcam.u.error.u.type_02_error;
1633
1634         ipr_err("-----Current Configuration-----\n");
1635         ipr_err("Cache Directory Card Information:\n");
1636         ipr_log_vpd(&error->ioa_vpd);
1637         ipr_err("Adapter Card Information:\n");
1638         ipr_log_vpd(&error->cfc_vpd);
1639
1640         ipr_err("-----Expected Configuration-----\n");
1641         ipr_err("Cache Directory Card Information:\n");
1642         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1643         ipr_err("Adapter Card Information:\n");
1644         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1645
1646         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1647                      be32_to_cpu(error->ioa_data[0]),
1648                      be32_to_cpu(error->ioa_data[1]),
1649                      be32_to_cpu(error->ioa_data[2]));
1650 }
1651
1652 /**
1653  * ipr_log_enhanced_config_error - Log a configuration error.
1654  * @ioa_cfg:    ioa config struct
1655  * @hostrcb:    hostrcb struct
1656  *
1657  * Return value:
1658  *      none
1659  **/
1660 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1661                                           struct ipr_hostrcb *hostrcb)
1662 {
1663         int errors_logged, i;
1664         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1665         struct ipr_hostrcb_type_13_error *error;
1666
1667         error = &hostrcb->hcam.u.error.u.type_13_error;
1668         errors_logged = be32_to_cpu(error->errors_logged);
1669
1670         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1671                 be32_to_cpu(error->errors_detected), errors_logged);
1672
1673         dev_entry = error->dev;
1674
1675         for (i = 0; i < errors_logged; i++, dev_entry++) {
1676                 ipr_err_separator;
1677
1678                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1679                 ipr_log_ext_vpd(&dev_entry->vpd);
1680
1681                 ipr_err("-----New Device Information-----\n");
1682                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1683
1684                 ipr_err("Cache Directory Card Information:\n");
1685                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1686
1687                 ipr_err("Adapter Card Information:\n");
1688                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1689         }
1690 }
1691
1692 /**
1693  * ipr_log_sis64_config_error - Log a device error.
1694  * @ioa_cfg:    ioa config struct
1695  * @hostrcb:    hostrcb struct
1696  *
1697  * Return value:
1698  *      none
1699  **/
1700 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1701                                        struct ipr_hostrcb *hostrcb)
1702 {
1703         int errors_logged, i;
1704         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1705         struct ipr_hostrcb_type_23_error *error;
1706         char buffer[IPR_MAX_RES_PATH_LENGTH];
1707
1708         error = &hostrcb->hcam.u.error64.u.type_23_error;
1709         errors_logged = be32_to_cpu(error->errors_logged);
1710
1711         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1712                 be32_to_cpu(error->errors_detected), errors_logged);
1713
1714         dev_entry = error->dev;
1715
1716         for (i = 0; i < errors_logged; i++, dev_entry++) {
1717                 ipr_err_separator;
1718
1719                 ipr_err("Device %d : %s", i + 1,
1720                         __ipr_format_res_path(dev_entry->res_path,
1721                                               buffer, sizeof(buffer)));
1722                 ipr_log_ext_vpd(&dev_entry->vpd);
1723
1724                 ipr_err("-----New Device Information-----\n");
1725                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1726
1727                 ipr_err("Cache Directory Card Information:\n");
1728                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1729
1730                 ipr_err("Adapter Card Information:\n");
1731                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1732         }
1733 }
1734
1735 /**
1736  * ipr_log_config_error - Log a configuration error.
1737  * @ioa_cfg:    ioa config struct
1738  * @hostrcb:    hostrcb struct
1739  *
1740  * Return value:
1741  *      none
1742  **/
1743 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1744                                  struct ipr_hostrcb *hostrcb)
1745 {
1746         int errors_logged, i;
1747         struct ipr_hostrcb_device_data_entry *dev_entry;
1748         struct ipr_hostrcb_type_03_error *error;
1749
1750         error = &hostrcb->hcam.u.error.u.type_03_error;
1751         errors_logged = be32_to_cpu(error->errors_logged);
1752
1753         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1754                 be32_to_cpu(error->errors_detected), errors_logged);
1755
1756         dev_entry = error->dev;
1757
1758         for (i = 0; i < errors_logged; i++, dev_entry++) {
1759                 ipr_err_separator;
1760
1761                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1762                 ipr_log_vpd(&dev_entry->vpd);
1763
1764                 ipr_err("-----New Device Information-----\n");
1765                 ipr_log_vpd(&dev_entry->new_vpd);
1766
1767                 ipr_err("Cache Directory Card Information:\n");
1768                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1769
1770                 ipr_err("Adapter Card Information:\n");
1771                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1772
1773                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1774                         be32_to_cpu(dev_entry->ioa_data[0]),
1775                         be32_to_cpu(dev_entry->ioa_data[1]),
1776                         be32_to_cpu(dev_entry->ioa_data[2]),
1777                         be32_to_cpu(dev_entry->ioa_data[3]),
1778                         be32_to_cpu(dev_entry->ioa_data[4]));
1779         }
1780 }
1781
1782 /**
1783  * ipr_log_enhanced_array_error - Log an array configuration error.
1784  * @ioa_cfg:    ioa config struct
1785  * @hostrcb:    hostrcb struct
1786  *
1787  * Return value:
1788  *      none
1789  **/
1790 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1791                                          struct ipr_hostrcb *hostrcb)
1792 {
1793         int i, num_entries;
1794         struct ipr_hostrcb_type_14_error *error;
1795         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1796         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1797
1798         error = &hostrcb->hcam.u.error.u.type_14_error;
1799
1800         ipr_err_separator;
1801
1802         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1803                 error->protection_level,
1804                 ioa_cfg->host->host_no,
1805                 error->last_func_vset_res_addr.bus,
1806                 error->last_func_vset_res_addr.target,
1807                 error->last_func_vset_res_addr.lun);
1808
1809         ipr_err_separator;
1810
1811         array_entry = error->array_member;
1812         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1813                             ARRAY_SIZE(error->array_member));
1814
1815         for (i = 0; i < num_entries; i++, array_entry++) {
1816                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1817                         continue;
1818
1819                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1820                         ipr_err("Exposed Array Member %d:\n", i);
1821                 else
1822                         ipr_err("Array Member %d:\n", i);
1823
1824                 ipr_log_ext_vpd(&array_entry->vpd);
1825                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1826                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1827                                  "Expected Location");
1828
1829                 ipr_err_separator;
1830         }
1831 }
1832
1833 /**
1834  * ipr_log_array_error - Log an array configuration error.
1835  * @ioa_cfg:    ioa config struct
1836  * @hostrcb:    hostrcb struct
1837  *
1838  * Return value:
1839  *      none
1840  **/
1841 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1842                                 struct ipr_hostrcb *hostrcb)
1843 {
1844         int i;
1845         struct ipr_hostrcb_type_04_error *error;
1846         struct ipr_hostrcb_array_data_entry *array_entry;
1847         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1848
1849         error = &hostrcb->hcam.u.error.u.type_04_error;
1850
1851         ipr_err_separator;
1852
1853         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854                 error->protection_level,
1855                 ioa_cfg->host->host_no,
1856                 error->last_func_vset_res_addr.bus,
1857                 error->last_func_vset_res_addr.target,
1858                 error->last_func_vset_res_addr.lun);
1859
1860         ipr_err_separator;
1861
1862         array_entry = error->array_member;
1863
1864         for (i = 0; i < 18; i++) {
1865                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1866                         continue;
1867
1868                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1869                         ipr_err("Exposed Array Member %d:\n", i);
1870                 else
1871                         ipr_err("Array Member %d:\n", i);
1872
1873                 ipr_log_vpd(&array_entry->vpd);
1874
1875                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1876                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1877                                  "Expected Location");
1878
1879                 ipr_err_separator;
1880
1881                 if (i == 9)
1882                         array_entry = error->array_member2;
1883                 else
1884                         array_entry++;
1885         }
1886 }
1887
1888 /**
1889  * ipr_log_hex_data - Log additional hex IOA error data.
1890  * @ioa_cfg:    ioa config struct
1891  * @data:               IOA error data
1892  * @len:                data length
1893  *
1894  * Return value:
1895  *      none
1896  **/
1897 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1898 {
1899         int i;
1900
1901         if (len == 0)
1902                 return;
1903
1904         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1905                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1906
1907         for (i = 0; i < len / 4; i += 4) {
1908                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1909                         be32_to_cpu(data[i]),
1910                         be32_to_cpu(data[i+1]),
1911                         be32_to_cpu(data[i+2]),
1912                         be32_to_cpu(data[i+3]));
1913         }
1914 }
1915
1916 /**
1917  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1918  * @ioa_cfg:    ioa config struct
1919  * @hostrcb:    hostrcb struct
1920  *
1921  * Return value:
1922  *      none
1923  **/
1924 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1925                                             struct ipr_hostrcb *hostrcb)
1926 {
1927         struct ipr_hostrcb_type_17_error *error;
1928
1929         if (ioa_cfg->sis64)
1930                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1931         else
1932                 error = &hostrcb->hcam.u.error.u.type_17_error;
1933
1934         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1935         strim(error->failure_reason);
1936
1937         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1938                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1939         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1940         ipr_log_hex_data(ioa_cfg, error->data,
1941                          be32_to_cpu(hostrcb->hcam.length) -
1942                          (offsetof(struct ipr_hostrcb_error, u) +
1943                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1944 }
1945
1946 /**
1947  * ipr_log_dual_ioa_error - Log a dual adapter error.
1948  * @ioa_cfg:    ioa config struct
1949  * @hostrcb:    hostrcb struct
1950  *
1951  * Return value:
1952  *      none
1953  **/
1954 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1955                                    struct ipr_hostrcb *hostrcb)
1956 {
1957         struct ipr_hostrcb_type_07_error *error;
1958
1959         error = &hostrcb->hcam.u.error.u.type_07_error;
1960         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1961         strim(error->failure_reason);
1962
1963         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1964                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1965         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1966         ipr_log_hex_data(ioa_cfg, error->data,
1967                          be32_to_cpu(hostrcb->hcam.length) -
1968                          (offsetof(struct ipr_hostrcb_error, u) +
1969                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1970 }
1971
1972 static const struct {
1973         u8 active;
1974         char *desc;
1975 } path_active_desc[] = {
1976         { IPR_PATH_NO_INFO, "Path" },
1977         { IPR_PATH_ACTIVE, "Active path" },
1978         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1979 };
1980
1981 static const struct {
1982         u8 state;
1983         char *desc;
1984 } path_state_desc[] = {
1985         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1986         { IPR_PATH_HEALTHY, "is healthy" },
1987         { IPR_PATH_DEGRADED, "is degraded" },
1988         { IPR_PATH_FAILED, "is failed" }
1989 };
1990
1991 /**
1992  * ipr_log_fabric_path - Log a fabric path error
1993  * @hostrcb:    hostrcb struct
1994  * @fabric:             fabric descriptor
1995  *
1996  * Return value:
1997  *      none
1998  **/
1999 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2000                                 struct ipr_hostrcb_fabric_desc *fabric)
2001 {
2002         int i, j;
2003         u8 path_state = fabric->path_state;
2004         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2005         u8 state = path_state & IPR_PATH_STATE_MASK;
2006
2007         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2008                 if (path_active_desc[i].active != active)
2009                         continue;
2010
2011                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2012                         if (path_state_desc[j].state != state)
2013                                 continue;
2014
2015                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2016                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2017                                              path_active_desc[i].desc, path_state_desc[j].desc,
2018                                              fabric->ioa_port);
2019                         } else if (fabric->cascaded_expander == 0xff) {
2020                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2021                                              path_active_desc[i].desc, path_state_desc[j].desc,
2022                                              fabric->ioa_port, fabric->phy);
2023                         } else if (fabric->phy == 0xff) {
2024                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2025                                              path_active_desc[i].desc, path_state_desc[j].desc,
2026                                              fabric->ioa_port, fabric->cascaded_expander);
2027                         } else {
2028                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2029                                              path_active_desc[i].desc, path_state_desc[j].desc,
2030                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2031                         }
2032                         return;
2033                 }
2034         }
2035
2036         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2037                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2038 }
2039
2040 /**
2041  * ipr_log64_fabric_path - Log a fabric path error
2042  * @hostrcb:    hostrcb struct
2043  * @fabric:             fabric descriptor
2044  *
2045  * Return value:
2046  *      none
2047  **/
2048 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2049                                   struct ipr_hostrcb64_fabric_desc *fabric)
2050 {
2051         int i, j;
2052         u8 path_state = fabric->path_state;
2053         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2054         u8 state = path_state & IPR_PATH_STATE_MASK;
2055         char buffer[IPR_MAX_RES_PATH_LENGTH];
2056
2057         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2058                 if (path_active_desc[i].active != active)
2059                         continue;
2060
2061                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2062                         if (path_state_desc[j].state != state)
2063                                 continue;
2064
2065                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2066                                      path_active_desc[i].desc, path_state_desc[j].desc,
2067                                      ipr_format_res_path(hostrcb->ioa_cfg,
2068                                                 fabric->res_path,
2069                                                 buffer, sizeof(buffer)));
2070                         return;
2071                 }
2072         }
2073
2074         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2075                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2076                                     buffer, sizeof(buffer)));
2077 }
2078
2079 static const struct {
2080         u8 type;
2081         char *desc;
2082 } path_type_desc[] = {
2083         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2084         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2085         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2086         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2087 };
2088
2089 static const struct {
2090         u8 status;
2091         char *desc;
2092 } path_status_desc[] = {
2093         { IPR_PATH_CFG_NO_PROB, "Functional" },
2094         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2095         { IPR_PATH_CFG_FAILED, "Failed" },
2096         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2097         { IPR_PATH_NOT_DETECTED, "Missing" },
2098         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2099 };
2100
2101 static const char *link_rate[] = {
2102         "unknown",
2103         "disabled",
2104         "phy reset problem",
2105         "spinup hold",
2106         "port selector",
2107         "unknown",
2108         "unknown",
2109         "unknown",
2110         "1.5Gbps",
2111         "3.0Gbps",
2112         "unknown",
2113         "unknown",
2114         "unknown",
2115         "unknown",
2116         "unknown",
2117         "unknown"
2118 };
2119
2120 /**
2121  * ipr_log_path_elem - Log a fabric path element.
2122  * @hostrcb:    hostrcb struct
2123  * @cfg:                fabric path element struct
2124  *
2125  * Return value:
2126  *      none
2127  **/
2128 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2129                               struct ipr_hostrcb_config_element *cfg)
2130 {
2131         int i, j;
2132         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2133         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2134
2135         if (type == IPR_PATH_CFG_NOT_EXIST)
2136                 return;
2137
2138         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2139                 if (path_type_desc[i].type != type)
2140                         continue;
2141
2142                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2143                         if (path_status_desc[j].status != status)
2144                                 continue;
2145
2146                         if (type == IPR_PATH_CFG_IOA_PORT) {
2147                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2148                                              path_status_desc[j].desc, path_type_desc[i].desc,
2149                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2150                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2151                         } else {
2152                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2153                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2154                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2155                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2156                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2157                                 } else if (cfg->cascaded_expander == 0xff) {
2158                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2159                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2160                                                      path_type_desc[i].desc, cfg->phy,
2161                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2162                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2163                                 } else if (cfg->phy == 0xff) {
2164                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2165                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2166                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2167                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2168                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2169                                 } else {
2170                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2171                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2172                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2173                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2174                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2175                                 }
2176                         }
2177                         return;
2178                 }
2179         }
2180
2181         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2182                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2183                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2184                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2185 }
2186
2187 /**
2188  * ipr_log64_path_elem - Log a fabric path element.
2189  * @hostrcb:    hostrcb struct
2190  * @cfg:                fabric path element struct
2191  *
2192  * Return value:
2193  *      none
2194  **/
2195 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2196                                 struct ipr_hostrcb64_config_element *cfg)
2197 {
2198         int i, j;
2199         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2200         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2201         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2202         char buffer[IPR_MAX_RES_PATH_LENGTH];
2203
2204         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2205                 return;
2206
2207         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2208                 if (path_type_desc[i].type != type)
2209                         continue;
2210
2211                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2212                         if (path_status_desc[j].status != status)
2213                                 continue;
2214
2215                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2216                                      path_status_desc[j].desc, path_type_desc[i].desc,
2217                                      ipr_format_res_path(hostrcb->ioa_cfg,
2218                                         cfg->res_path, buffer, sizeof(buffer)),
2219                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2220                                         be32_to_cpu(cfg->wwid[0]),
2221                                         be32_to_cpu(cfg->wwid[1]));
2222                         return;
2223                 }
2224         }
2225         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2226                      "WWN=%08X%08X\n", cfg->type_status,
2227                      ipr_format_res_path(hostrcb->ioa_cfg,
2228                         cfg->res_path, buffer, sizeof(buffer)),
2229                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2230                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2231 }
2232
2233 /**
2234  * ipr_log_fabric_error - Log a fabric error.
2235  * @ioa_cfg:    ioa config struct
2236  * @hostrcb:    hostrcb struct
2237  *
2238  * Return value:
2239  *      none
2240  **/
2241 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2242                                  struct ipr_hostrcb *hostrcb)
2243 {
2244         struct ipr_hostrcb_type_20_error *error;
2245         struct ipr_hostrcb_fabric_desc *fabric;
2246         struct ipr_hostrcb_config_element *cfg;
2247         int i, add_len;
2248
2249         error = &hostrcb->hcam.u.error.u.type_20_error;
2250         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2251         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2252
2253         add_len = be32_to_cpu(hostrcb->hcam.length) -
2254                 (offsetof(struct ipr_hostrcb_error, u) +
2255                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2256
2257         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2258                 ipr_log_fabric_path(hostrcb, fabric);
2259                 for_each_fabric_cfg(fabric, cfg)
2260                         ipr_log_path_elem(hostrcb, cfg);
2261
2262                 add_len -= be16_to_cpu(fabric->length);
2263                 fabric = (struct ipr_hostrcb_fabric_desc *)
2264                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2265         }
2266
2267         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2268 }
2269
2270 /**
2271  * ipr_log_sis64_array_error - Log a sis64 array error.
2272  * @ioa_cfg:    ioa config struct
2273  * @hostrcb:    hostrcb struct
2274  *
2275  * Return value:
2276  *      none
2277  **/
2278 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2279                                       struct ipr_hostrcb *hostrcb)
2280 {
2281         int i, num_entries;
2282         struct ipr_hostrcb_type_24_error *error;
2283         struct ipr_hostrcb64_array_data_entry *array_entry;
2284         char buffer[IPR_MAX_RES_PATH_LENGTH];
2285         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2286
2287         error = &hostrcb->hcam.u.error64.u.type_24_error;
2288
2289         ipr_err_separator;
2290
2291         ipr_err("RAID %s Array Configuration: %s\n",
2292                 error->protection_level,
2293                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2294                         buffer, sizeof(buffer)));
2295
2296         ipr_err_separator;
2297
2298         array_entry = error->array_member;
2299         num_entries = min_t(u32, error->num_entries,
2300                             ARRAY_SIZE(error->array_member));
2301
2302         for (i = 0; i < num_entries; i++, array_entry++) {
2303
2304                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2305                         continue;
2306
2307                 if (error->exposed_mode_adn == i)
2308                         ipr_err("Exposed Array Member %d:\n", i);
2309                 else
2310                         ipr_err("Array Member %d:\n", i);
2311
2312                 ipr_err("Array Member %d:\n", i);
2313                 ipr_log_ext_vpd(&array_entry->vpd);
2314                 ipr_err("Current Location: %s\n",
2315                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2316                                 buffer, sizeof(buffer)));
2317                 ipr_err("Expected Location: %s\n",
2318                          ipr_format_res_path(ioa_cfg,
2319                                 array_entry->expected_res_path,
2320                                 buffer, sizeof(buffer)));
2321
2322                 ipr_err_separator;
2323         }
2324 }
2325
2326 /**
2327  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2328  * @ioa_cfg:    ioa config struct
2329  * @hostrcb:    hostrcb struct
2330  *
2331  * Return value:
2332  *      none
2333  **/
2334 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2335                                        struct ipr_hostrcb *hostrcb)
2336 {
2337         struct ipr_hostrcb_type_30_error *error;
2338         struct ipr_hostrcb64_fabric_desc *fabric;
2339         struct ipr_hostrcb64_config_element *cfg;
2340         int i, add_len;
2341
2342         error = &hostrcb->hcam.u.error64.u.type_30_error;
2343
2344         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2345         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2346
2347         add_len = be32_to_cpu(hostrcb->hcam.length) -
2348                 (offsetof(struct ipr_hostrcb64_error, u) +
2349                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2350
2351         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2352                 ipr_log64_fabric_path(hostrcb, fabric);
2353                 for_each_fabric_cfg(fabric, cfg)
2354                         ipr_log64_path_elem(hostrcb, cfg);
2355
2356                 add_len -= be16_to_cpu(fabric->length);
2357                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2358                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2359         }
2360
2361         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2362 }
2363
2364 /**
2365  * ipr_log_generic_error - Log an adapter error.
2366  * @ioa_cfg:    ioa config struct
2367  * @hostrcb:    hostrcb struct
2368  *
2369  * Return value:
2370  *      none
2371  **/
2372 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2373                                   struct ipr_hostrcb *hostrcb)
2374 {
2375         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2376                          be32_to_cpu(hostrcb->hcam.length));
2377 }
2378
2379 /**
2380  * ipr_log_sis64_device_error - Log a cache error.
2381  * @ioa_cfg:    ioa config struct
2382  * @hostrcb:    hostrcb struct
2383  *
2384  * Return value:
2385  *      none
2386  **/
2387 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2388                                          struct ipr_hostrcb *hostrcb)
2389 {
2390         struct ipr_hostrcb_type_21_error *error;
2391         char buffer[IPR_MAX_RES_PATH_LENGTH];
2392
2393         error = &hostrcb->hcam.u.error64.u.type_21_error;
2394
2395         ipr_err("-----Failing Device Information-----\n");
2396         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2397                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2398                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2399         ipr_err("Device Resource Path: %s\n",
2400                 __ipr_format_res_path(error->res_path,
2401                                       buffer, sizeof(buffer)));
2402         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2403         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2404         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2405         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2406         ipr_err("SCSI Sense Data:\n");
2407         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2408         ipr_err("SCSI Command Descriptor Block: \n");
2409         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2410
2411         ipr_err("Additional IOA Data:\n");
2412         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2413 }
2414
2415 /**
2416  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2417  * @ioasc:      IOASC
2418  *
2419  * This function will return the index of into the ipr_error_table
2420  * for the specified IOASC. If the IOASC is not in the table,
2421  * 0 will be returned, which points to the entry used for unknown errors.
2422  *
2423  * Return value:
2424  *      index into the ipr_error_table
2425  **/
2426 static u32 ipr_get_error(u32 ioasc)
2427 {
2428         int i;
2429
2430         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2431                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2432                         return i;
2433
2434         return 0;
2435 }
2436
2437 /**
2438  * ipr_handle_log_data - Log an adapter error.
2439  * @ioa_cfg:    ioa config struct
2440  * @hostrcb:    hostrcb struct
2441  *
2442  * This function logs an adapter error to the system.
2443  *
2444  * Return value:
2445  *      none
2446  **/
2447 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2448                                 struct ipr_hostrcb *hostrcb)
2449 {
2450         u32 ioasc;
2451         int error_index;
2452         struct ipr_hostrcb_type_21_error *error;
2453
2454         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2455                 return;
2456
2457         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2458                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2459
2460         if (ioa_cfg->sis64)
2461                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2462         else
2463                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2464
2465         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2466             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2467                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2468                 scsi_report_bus_reset(ioa_cfg->host,
2469                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2470         }
2471
2472         error_index = ipr_get_error(ioasc);
2473
2474         if (!ipr_error_table[error_index].log_hcam)
2475                 return;
2476
2477         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2478             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2479                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2480
2481                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2482                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2483                                 return;
2484         }
2485
2486         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2487
2488         /* Set indication we have logged an error */
2489         ioa_cfg->errors_logged++;
2490
2491         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2492                 return;
2493         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2494                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2495
2496         switch (hostrcb->hcam.overlay_id) {
2497         case IPR_HOST_RCB_OVERLAY_ID_2:
2498                 ipr_log_cache_error(ioa_cfg, hostrcb);
2499                 break;
2500         case IPR_HOST_RCB_OVERLAY_ID_3:
2501                 ipr_log_config_error(ioa_cfg, hostrcb);
2502                 break;
2503         case IPR_HOST_RCB_OVERLAY_ID_4:
2504         case IPR_HOST_RCB_OVERLAY_ID_6:
2505                 ipr_log_array_error(ioa_cfg, hostrcb);
2506                 break;
2507         case IPR_HOST_RCB_OVERLAY_ID_7:
2508                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2509                 break;
2510         case IPR_HOST_RCB_OVERLAY_ID_12:
2511                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2512                 break;
2513         case IPR_HOST_RCB_OVERLAY_ID_13:
2514                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2515                 break;
2516         case IPR_HOST_RCB_OVERLAY_ID_14:
2517         case IPR_HOST_RCB_OVERLAY_ID_16:
2518                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2519                 break;
2520         case IPR_HOST_RCB_OVERLAY_ID_17:
2521                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2522                 break;
2523         case IPR_HOST_RCB_OVERLAY_ID_20:
2524                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2525                 break;
2526         case IPR_HOST_RCB_OVERLAY_ID_21:
2527                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2528                 break;
2529         case IPR_HOST_RCB_OVERLAY_ID_23:
2530                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2531                 break;
2532         case IPR_HOST_RCB_OVERLAY_ID_24:
2533         case IPR_HOST_RCB_OVERLAY_ID_26:
2534                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2535                 break;
2536         case IPR_HOST_RCB_OVERLAY_ID_30:
2537                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2538                 break;
2539         case IPR_HOST_RCB_OVERLAY_ID_1:
2540         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2541         default:
2542                 ipr_log_generic_error(ioa_cfg, hostrcb);
2543                 break;
2544         }
2545 }
2546
2547 /**
2548  * ipr_process_error - Op done function for an adapter error log.
2549  * @ipr_cmd:    ipr command struct
2550  *
2551  * This function is the op done function for an error log host
2552  * controlled async from the adapter. It will log the error and
2553  * send the HCAM back to the adapter.
2554  *
2555  * Return value:
2556  *      none
2557  **/
2558 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2559 {
2560         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2561         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2562         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2563         u32 fd_ioasc;
2564
2565         if (ioa_cfg->sis64)
2566                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2567         else
2568                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2569
2570         list_del(&hostrcb->queue);
2571         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2572
2573         if (!ioasc) {
2574                 ipr_handle_log_data(ioa_cfg, hostrcb);
2575                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2576                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2577         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2578                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2579                 dev_err(&ioa_cfg->pdev->dev,
2580                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2581         }
2582
2583         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2584 }
2585
2586 /**
2587  * ipr_timeout -  An internally generated op has timed out.
2588  * @ipr_cmd:    ipr command struct
2589  *
2590  * This function blocks host requests and initiates an
2591  * adapter reset.
2592  *
2593  * Return value:
2594  *      none
2595  **/
2596 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2597 {
2598         unsigned long lock_flags = 0;
2599         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2600
2601         ENTER;
2602         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2603
2604         ioa_cfg->errors_logged++;
2605         dev_err(&ioa_cfg->pdev->dev,
2606                 "Adapter being reset due to command timeout.\n");
2607
2608         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2609                 ioa_cfg->sdt_state = GET_DUMP;
2610
2611         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2612                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2613
2614         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2615         LEAVE;
2616 }
2617
2618 /**
2619  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2620  * @ipr_cmd:    ipr command struct
2621  *
2622  * This function blocks host requests and initiates an
2623  * adapter reset.
2624  *
2625  * Return value:
2626  *      none
2627  **/
2628 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2629 {
2630         unsigned long lock_flags = 0;
2631         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2632
2633         ENTER;
2634         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2635
2636         ioa_cfg->errors_logged++;
2637         dev_err(&ioa_cfg->pdev->dev,
2638                 "Adapter timed out transitioning to operational.\n");
2639
2640         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2641                 ioa_cfg->sdt_state = GET_DUMP;
2642
2643         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2644                 if (ipr_fastfail)
2645                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2646                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2647         }
2648
2649         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2650         LEAVE;
2651 }
2652
2653 /**
2654  * ipr_find_ses_entry - Find matching SES in SES table
2655  * @res:        resource entry struct of SES
2656  *
2657  * Return value:
2658  *      pointer to SES table entry / NULL on failure
2659  **/
2660 static const struct ipr_ses_table_entry *
2661 ipr_find_ses_entry(struct ipr_resource_entry *res)
2662 {
2663         int i, j, matches;
2664         struct ipr_std_inq_vpids *vpids;
2665         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2666
2667         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2668                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2669                         if (ste->compare_product_id_byte[j] == 'X') {
2670                                 vpids = &res->std_inq_data.vpids;
2671                                 if (vpids->product_id[j] == ste->product_id[j])
2672                                         matches++;
2673                                 else
2674                                         break;
2675                         } else
2676                                 matches++;
2677                 }
2678
2679                 if (matches == IPR_PROD_ID_LEN)
2680                         return ste;
2681         }
2682
2683         return NULL;
2684 }
2685
2686 /**
2687  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2688  * @ioa_cfg:    ioa config struct
2689  * @bus:                SCSI bus
2690  * @bus_width:  bus width
2691  *
2692  * Return value:
2693  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2694  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2695  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2696  *      max 160MHz = max 320MB/sec).
2697  **/
2698 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2699 {
2700         struct ipr_resource_entry *res;
2701         const struct ipr_ses_table_entry *ste;
2702         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2703
2704         /* Loop through each config table entry in the config table buffer */
2705         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2706                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2707                         continue;
2708
2709                 if (bus != res->bus)
2710                         continue;
2711
2712                 if (!(ste = ipr_find_ses_entry(res)))
2713                         continue;
2714
2715                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2716         }
2717
2718         return max_xfer_rate;
2719 }
2720
2721 /**
2722  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2723  * @ioa_cfg:            ioa config struct
2724  * @max_delay:          max delay in micro-seconds to wait
2725  *
2726  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2727  *
2728  * Return value:
2729  *      0 on success / other on failure
2730  **/
2731 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2732 {
2733         volatile u32 pcii_reg;
2734         int delay = 1;
2735
2736         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2737         while (delay < max_delay) {
2738                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2739
2740                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2741                         return 0;
2742
2743                 /* udelay cannot be used if delay is more than a few milliseconds */
2744                 if ((delay / 1000) > MAX_UDELAY_MS)
2745                         mdelay(delay / 1000);
2746                 else
2747                         udelay(delay);
2748
2749                 delay += delay;
2750         }
2751         return -EIO;
2752 }
2753
2754 /**
2755  * ipr_get_sis64_dump_data_section - Dump IOA memory
2756  * @ioa_cfg:                    ioa config struct
2757  * @start_addr:                 adapter address to dump
2758  * @dest:                       destination kernel buffer
2759  * @length_in_words:            length to dump in 4 byte words
2760  *
2761  * Return value:
2762  *      0 on success
2763  **/
2764 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2765                                            u32 start_addr,
2766                                            __be32 *dest, u32 length_in_words)
2767 {
2768         int i;
2769
2770         for (i = 0; i < length_in_words; i++) {
2771                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2772                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2773                 dest++;
2774         }
2775
2776         return 0;
2777 }
2778
2779 /**
2780  * ipr_get_ldump_data_section - Dump IOA memory
2781  * @ioa_cfg:                    ioa config struct
2782  * @start_addr:                 adapter address to dump
2783  * @dest:                               destination kernel buffer
2784  * @length_in_words:    length to dump in 4 byte words
2785  *
2786  * Return value:
2787  *      0 on success / -EIO on failure
2788  **/
2789 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2790                                       u32 start_addr,
2791                                       __be32 *dest, u32 length_in_words)
2792 {
2793         volatile u32 temp_pcii_reg;
2794         int i, delay = 0;
2795
2796         if (ioa_cfg->sis64)
2797                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2798                                                        dest, length_in_words);
2799
2800         /* Write IOA interrupt reg starting LDUMP state  */
2801         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2802                ioa_cfg->regs.set_uproc_interrupt_reg32);
2803
2804         /* Wait for IO debug acknowledge */
2805         if (ipr_wait_iodbg_ack(ioa_cfg,
2806                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2807                 dev_err(&ioa_cfg->pdev->dev,
2808                         "IOA dump long data transfer timeout\n");
2809                 return -EIO;
2810         }
2811
2812         /* Signal LDUMP interlocked - clear IO debug ack */
2813         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2814                ioa_cfg->regs.clr_interrupt_reg);
2815
2816         /* Write Mailbox with starting address */
2817         writel(start_addr, ioa_cfg->ioa_mailbox);
2818
2819         /* Signal address valid - clear IOA Reset alert */
2820         writel(IPR_UPROCI_RESET_ALERT,
2821                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2822
2823         for (i = 0; i < length_in_words; i++) {
2824                 /* Wait for IO debug acknowledge */
2825                 if (ipr_wait_iodbg_ack(ioa_cfg,
2826                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2827                         dev_err(&ioa_cfg->pdev->dev,
2828                                 "IOA dump short data transfer timeout\n");
2829                         return -EIO;
2830                 }
2831
2832                 /* Read data from mailbox and increment destination pointer */
2833                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2834                 dest++;
2835
2836                 /* For all but the last word of data, signal data received */
2837                 if (i < (length_in_words - 1)) {
2838                         /* Signal dump data received - Clear IO debug Ack */
2839                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2840                                ioa_cfg->regs.clr_interrupt_reg);
2841                 }
2842         }
2843
2844         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2845         writel(IPR_UPROCI_RESET_ALERT,
2846                ioa_cfg->regs.set_uproc_interrupt_reg32);
2847
2848         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2849                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2850
2851         /* Signal dump data received - Clear IO debug Ack */
2852         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2853                ioa_cfg->regs.clr_interrupt_reg);
2854
2855         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2856         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2857                 temp_pcii_reg =
2858                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2859
2860                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2861                         return 0;
2862
2863                 udelay(10);
2864                 delay += 10;
2865         }
2866
2867         return 0;
2868 }
2869
2870 #ifdef CONFIG_SCSI_IPR_DUMP
2871 /**
2872  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2873  * @ioa_cfg:            ioa config struct
2874  * @pci_address:        adapter address
2875  * @length:                     length of data to copy
2876  *
2877  * Copy data from PCI adapter to kernel buffer.
2878  * Note: length MUST be a 4 byte multiple
2879  * Return value:
2880  *      0 on success / other on failure
2881  **/
2882 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2883                         unsigned long pci_address, u32 length)
2884 {
2885         int bytes_copied = 0;
2886         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2887         __be32 *page;
2888         unsigned long lock_flags = 0;
2889         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2890
2891         if (ioa_cfg->sis64)
2892                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2893         else
2894                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2895
2896         while (bytes_copied < length &&
2897                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2898                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2899                     ioa_dump->page_offset == 0) {
2900                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2901
2902                         if (!page) {
2903                                 ipr_trace;
2904                                 return bytes_copied;
2905                         }
2906
2907                         ioa_dump->page_offset = 0;
2908                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2909                         ioa_dump->next_page_index++;
2910                 } else
2911                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2912
2913                 rem_len = length - bytes_copied;
2914                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2915                 cur_len = min(rem_len, rem_page_len);
2916
2917                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2918                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2919                         rc = -EIO;
2920                 } else {
2921                         rc = ipr_get_ldump_data_section(ioa_cfg,
2922                                                         pci_address + bytes_copied,
2923                                                         &page[ioa_dump->page_offset / 4],
2924                                                         (cur_len / sizeof(u32)));
2925                 }
2926                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2927
2928                 if (!rc) {
2929                         ioa_dump->page_offset += cur_len;
2930                         bytes_copied += cur_len;
2931                 } else {
2932                         ipr_trace;
2933                         break;
2934                 }
2935                 schedule();
2936         }
2937
2938         return bytes_copied;
2939 }
2940
2941 /**
2942  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2943  * @hdr:        dump entry header struct
2944  *
2945  * Return value:
2946  *      nothing
2947  **/
2948 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2949 {
2950         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2951         hdr->num_elems = 1;
2952         hdr->offset = sizeof(*hdr);
2953         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2954 }
2955
2956 /**
2957  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2958  * @ioa_cfg:    ioa config struct
2959  * @driver_dump:        driver dump struct
2960  *
2961  * Return value:
2962  *      nothing
2963  **/
2964 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2965                                    struct ipr_driver_dump *driver_dump)
2966 {
2967         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2968
2969         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2970         driver_dump->ioa_type_entry.hdr.len =
2971                 sizeof(struct ipr_dump_ioa_type_entry) -
2972                 sizeof(struct ipr_dump_entry_header);
2973         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2974         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2975         driver_dump->ioa_type_entry.type = ioa_cfg->type;
2976         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2977                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2978                 ucode_vpd->minor_release[1];
2979         driver_dump->hdr.num_entries++;
2980 }
2981
2982 /**
2983  * ipr_dump_version_data - Fill in the driver version in the dump.
2984  * @ioa_cfg:    ioa config struct
2985  * @driver_dump:        driver dump struct
2986  *
2987  * Return value:
2988  *      nothing
2989  **/
2990 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2991                                   struct ipr_driver_dump *driver_dump)
2992 {
2993         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2994         driver_dump->version_entry.hdr.len =
2995                 sizeof(struct ipr_dump_version_entry) -
2996                 sizeof(struct ipr_dump_entry_header);
2997         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2998         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2999         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3000         driver_dump->hdr.num_entries++;
3001 }
3002
3003 /**
3004  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3005  * @ioa_cfg:    ioa config struct
3006  * @driver_dump:        driver dump struct
3007  *
3008  * Return value:
3009  *      nothing
3010  **/
3011 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3012                                    struct ipr_driver_dump *driver_dump)
3013 {
3014         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3015         driver_dump->trace_entry.hdr.len =
3016                 sizeof(struct ipr_dump_trace_entry) -
3017                 sizeof(struct ipr_dump_entry_header);
3018         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3019         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3020         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3021         driver_dump->hdr.num_entries++;
3022 }
3023
3024 /**
3025  * ipr_dump_location_data - Fill in the IOA location in the dump.
3026  * @ioa_cfg:    ioa config struct
3027  * @driver_dump:        driver dump struct
3028  *
3029  * Return value:
3030  *      nothing
3031  **/
3032 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3033                                    struct ipr_driver_dump *driver_dump)
3034 {
3035         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3036         driver_dump->location_entry.hdr.len =
3037                 sizeof(struct ipr_dump_location_entry) -
3038                 sizeof(struct ipr_dump_entry_header);
3039         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3040         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3041         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3042         driver_dump->hdr.num_entries++;
3043 }
3044
3045 /**
3046  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3047  * @ioa_cfg:    ioa config struct
3048  * @dump:               dump struct
3049  *
3050  * Return value:
3051  *      nothing
3052  **/
3053 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3054 {
3055         unsigned long start_addr, sdt_word;
3056         unsigned long lock_flags = 0;
3057         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3058         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3059         u32 num_entries, max_num_entries, start_off, end_off;
3060         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3061         struct ipr_sdt *sdt;
3062         int valid = 1;
3063         int i;
3064
3065         ENTER;
3066
3067         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068
3069         if (ioa_cfg->sdt_state != READ_DUMP) {
3070                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3071                 return;
3072         }
3073
3074         if (ioa_cfg->sis64) {
3075                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3076                 ssleep(IPR_DUMP_DELAY_SECONDS);
3077                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3078         }
3079
3080         start_addr = readl(ioa_cfg->ioa_mailbox);
3081
3082         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3083                 dev_err(&ioa_cfg->pdev->dev,
3084                         "Invalid dump table format: %lx\n", start_addr);
3085                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3086                 return;
3087         }
3088
3089         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3090
3091         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3092
3093         /* Initialize the overall dump header */
3094         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3095         driver_dump->hdr.num_entries = 1;
3096         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3097         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3098         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3099         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3100
3101         ipr_dump_version_data(ioa_cfg, driver_dump);
3102         ipr_dump_location_data(ioa_cfg, driver_dump);
3103         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3104         ipr_dump_trace_data(ioa_cfg, driver_dump);
3105
3106         /* Update dump_header */
3107         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3108
3109         /* IOA Dump entry */
3110         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3111         ioa_dump->hdr.len = 0;
3112         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3113         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3114
3115         /* First entries in sdt are actually a list of dump addresses and
3116          lengths to gather the real dump data.  sdt represents the pointer
3117          to the ioa generated dump table.  Dump data will be extracted based
3118          on entries in this table */
3119         sdt = &ioa_dump->sdt;
3120
3121         if (ioa_cfg->sis64) {
3122                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3123                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3124         } else {
3125                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3126                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3127         }
3128
3129         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3130                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3131         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3132                                         bytes_to_copy / sizeof(__be32));
3133
3134         /* Smart Dump table is ready to use and the first entry is valid */
3135         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3136             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3137                 dev_err(&ioa_cfg->pdev->dev,
3138                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3139                         rc, be32_to_cpu(sdt->hdr.state));
3140                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3141                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3142                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3143                 return;
3144         }
3145
3146         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3147
3148         if (num_entries > max_num_entries)
3149                 num_entries = max_num_entries;
3150
3151         /* Update dump length to the actual data to be copied */
3152         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3153         if (ioa_cfg->sis64)
3154                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3155         else
3156                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3157
3158         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3159
3160         for (i = 0; i < num_entries; i++) {
3161                 if (ioa_dump->hdr.len > max_dump_size) {
3162                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3163                         break;
3164                 }
3165
3166                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3167                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3168                         if (ioa_cfg->sis64)
3169                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3170                         else {
3171                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3172                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3173
3174                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3175                                         bytes_to_copy = end_off - start_off;
3176                                 else
3177                                         valid = 0;
3178                         }
3179                         if (valid) {
3180                                 if (bytes_to_copy > max_dump_size) {
3181                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3182                                         continue;
3183                                 }
3184
3185                                 /* Copy data from adapter to driver buffers */
3186                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3187                                                             bytes_to_copy);
3188
3189                                 ioa_dump->hdr.len += bytes_copied;
3190
3191                                 if (bytes_copied != bytes_to_copy) {
3192                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3193                                         break;
3194                                 }
3195                         }
3196                 }
3197         }
3198
3199         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3200
3201         /* Update dump_header */
3202         driver_dump->hdr.len += ioa_dump->hdr.len;
3203         wmb();
3204         ioa_cfg->sdt_state = DUMP_OBTAINED;
3205         LEAVE;
3206 }
3207
3208 #else
3209 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3210 #endif
3211
3212 /**
3213  * ipr_release_dump - Free adapter dump memory
3214  * @kref:       kref struct
3215  *
3216  * Return value:
3217  *      nothing
3218  **/
3219 static void ipr_release_dump(struct kref *kref)
3220 {
3221         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3222         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3223         unsigned long lock_flags = 0;
3224         int i;
3225
3226         ENTER;
3227         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3228         ioa_cfg->dump = NULL;
3229         ioa_cfg->sdt_state = INACTIVE;
3230         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3231
3232         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3233                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3234
3235         vfree(dump->ioa_dump.ioa_data);
3236         kfree(dump);
3237         LEAVE;
3238 }
3239
3240 /**
3241  * ipr_worker_thread - Worker thread
3242  * @work:               ioa config struct
3243  *
3244  * Called at task level from a work thread. This function takes care
3245  * of adding and removing device from the mid-layer as configuration
3246  * changes are detected by the adapter.
3247  *
3248  * Return value:
3249  *      nothing
3250  **/
3251 static void ipr_worker_thread(struct work_struct *work)
3252 {
3253         unsigned long lock_flags;
3254         struct ipr_resource_entry *res;
3255         struct scsi_device *sdev;
3256         struct ipr_dump *dump;
3257         struct ipr_ioa_cfg *ioa_cfg =
3258                 container_of(work, struct ipr_ioa_cfg, work_q);
3259         u8 bus, target, lun;
3260         int did_work;
3261
3262         ENTER;
3263         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3264
3265         if (ioa_cfg->sdt_state == READ_DUMP) {
3266                 dump = ioa_cfg->dump;
3267                 if (!dump) {
3268                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3269                         return;
3270                 }
3271                 kref_get(&dump->kref);
3272                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3273                 ipr_get_ioa_dump(ioa_cfg, dump);
3274                 kref_put(&dump->kref, ipr_release_dump);
3275
3276                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3277                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3278                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3279                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3280                 return;
3281         }
3282
3283 restart:
3284         do {
3285                 did_work = 0;
3286                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3287                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3288                         return;
3289                 }
3290
3291                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3292                         if (res->del_from_ml && res->sdev) {
3293                                 did_work = 1;
3294                                 sdev = res->sdev;
3295                                 if (!scsi_device_get(sdev)) {
3296                                         if (!res->add_to_ml)
3297                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3298                                         else
3299                                                 res->del_from_ml = 0;
3300                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3301                                         scsi_remove_device(sdev);
3302                                         scsi_device_put(sdev);
3303                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3304                                 }
3305                                 break;
3306                         }
3307                 }
3308         } while (did_work);
3309
3310         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3311                 if (res->add_to_ml) {
3312                         bus = res->bus;
3313                         target = res->target;
3314                         lun = res->lun;
3315                         res->add_to_ml = 0;
3316                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3317                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3318                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3319                         goto restart;
3320                 }
3321         }
3322
3323         ioa_cfg->scan_done = 1;
3324         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3325         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3326         LEAVE;
3327 }
3328
3329 #ifdef CONFIG_SCSI_IPR_TRACE
3330 /**
3331  * ipr_read_trace - Dump the adapter trace
3332  * @filp:               open sysfs file
3333  * @kobj:               kobject struct
3334  * @bin_attr:           bin_attribute struct
3335  * @buf:                buffer
3336  * @off:                offset
3337  * @count:              buffer size
3338  *
3339  * Return value:
3340  *      number of bytes printed to buffer
3341  **/
3342 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3343                               struct bin_attribute *bin_attr,
3344                               char *buf, loff_t off, size_t count)
3345 {
3346         struct device *dev = container_of(kobj, struct device, kobj);
3347         struct Scsi_Host *shost = class_to_shost(dev);
3348         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3349         unsigned long lock_flags = 0;
3350         ssize_t ret;
3351
3352         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3353         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3354                                 IPR_TRACE_SIZE);
3355         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356
3357         return ret;
3358 }
3359
3360 static struct bin_attribute ipr_trace_attr = {
3361         .attr = {
3362                 .name = "trace",
3363                 .mode = S_IRUGO,
3364         },
3365         .size = 0,
3366         .read = ipr_read_trace,
3367 };
3368 #endif
3369
3370 /**
3371  * ipr_show_fw_version - Show the firmware version
3372  * @dev:        class device struct
3373  * @buf:        buffer
3374  *
3375  * Return value:
3376  *      number of bytes printed to buffer
3377  **/
3378 static ssize_t ipr_show_fw_version(struct device *dev,
3379                                    struct device_attribute *attr, char *buf)
3380 {
3381         struct Scsi_Host *shost = class_to_shost(dev);
3382         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3383         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3384         unsigned long lock_flags = 0;
3385         int len;
3386
3387         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3388         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3389                        ucode_vpd->major_release, ucode_vpd->card_type,
3390                        ucode_vpd->minor_release[0],
3391                        ucode_vpd->minor_release[1]);
3392         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3393         return len;
3394 }
3395
3396 static struct device_attribute ipr_fw_version_attr = {
3397         .attr = {
3398                 .name =         "fw_version",
3399                 .mode =         S_IRUGO,
3400         },
3401         .show = ipr_show_fw_version,
3402 };
3403
3404 /**
3405  * ipr_show_log_level - Show the adapter's error logging level
3406  * @dev:        class device struct
3407  * @buf:        buffer
3408  *
3409  * Return value:
3410  *      number of bytes printed to buffer
3411  **/
3412 static ssize_t ipr_show_log_level(struct device *dev,
3413                                    struct device_attribute *attr, char *buf)
3414 {
3415         struct Scsi_Host *shost = class_to_shost(dev);
3416         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3417         unsigned long lock_flags = 0;
3418         int len;
3419
3420         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3421         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3422         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3423         return len;
3424 }
3425
3426 /**
3427  * ipr_store_log_level - Change the adapter's error logging level
3428  * @dev:        class device struct
3429  * @buf:        buffer
3430  *
3431  * Return value:
3432  *      number of bytes printed to buffer
3433  **/
3434 static ssize_t ipr_store_log_level(struct device *dev,
3435                                    struct device_attribute *attr,
3436                                    const char *buf, size_t count)
3437 {
3438         struct Scsi_Host *shost = class_to_shost(dev);
3439         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3440         unsigned long lock_flags = 0;
3441
3442         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3443         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3444         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3445         return strlen(buf);
3446 }
3447
3448 static struct device_attribute ipr_log_level_attr = {
3449         .attr = {
3450                 .name =         "log_level",
3451                 .mode =         S_IRUGO | S_IWUSR,
3452         },
3453         .show = ipr_show_log_level,
3454         .store = ipr_store_log_level
3455 };
3456
3457 /**
3458  * ipr_store_diagnostics - IOA Diagnostics interface
3459  * @dev:        device struct
3460  * @buf:        buffer
3461  * @count:      buffer size
3462  *
3463  * This function will reset the adapter and wait a reasonable
3464  * amount of time for any errors that the adapter might log.
3465  *
3466  * Return value:
3467  *      count on success / other on failure
3468  **/
3469 static ssize_t ipr_store_diagnostics(struct device *dev,
3470                                      struct device_attribute *attr,
3471                                      const char *buf, size_t count)
3472 {
3473         struct Scsi_Host *shost = class_to_shost(dev);
3474         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3475         unsigned long lock_flags = 0;
3476         int rc = count;
3477
3478         if (!capable(CAP_SYS_ADMIN))
3479                 return -EACCES;
3480
3481         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3482         while (ioa_cfg->in_reset_reload) {
3483                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3484                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3485                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3486         }
3487
3488         ioa_cfg->errors_logged = 0;
3489         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3490
3491         if (ioa_cfg->in_reset_reload) {
3492                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3493                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3494
3495                 /* Wait for a second for any errors to be logged */
3496                 msleep(1000);
3497         } else {
3498                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3499                 return -EIO;
3500         }
3501
3502         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3503         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3504                 rc = -EIO;
3505         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3506
3507         return rc;
3508 }
3509
3510 static struct device_attribute ipr_diagnostics_attr = {
3511         .attr = {
3512                 .name =         "run_diagnostics",
3513                 .mode =         S_IWUSR,
3514         },
3515         .store = ipr_store_diagnostics
3516 };
3517
3518 /**
3519  * ipr_show_adapter_state - Show the adapter's state
3520  * @class_dev:  device struct
3521  * @buf:        buffer
3522  *
3523  * Return value:
3524  *      number of bytes printed to buffer
3525  **/
3526 static ssize_t ipr_show_adapter_state(struct device *dev,
3527                                       struct device_attribute *attr, char *buf)
3528 {
3529         struct Scsi_Host *shost = class_to_shost(dev);
3530         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3531         unsigned long lock_flags = 0;
3532         int len;
3533
3534         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3535         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3536                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3537         else
3538                 len = snprintf(buf, PAGE_SIZE, "online\n");
3539         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3540         return len;
3541 }
3542
3543 /**
3544  * ipr_store_adapter_state - Change adapter state
3545  * @dev:        device struct
3546  * @buf:        buffer
3547  * @count:      buffer size
3548  *
3549  * This function will change the adapter's state.
3550  *
3551  * Return value:
3552  *      count on success / other on failure
3553  **/
3554 static ssize_t ipr_store_adapter_state(struct device *dev,
3555                                        struct device_attribute *attr,
3556                                        const char *buf, size_t count)
3557 {
3558         struct Scsi_Host *shost = class_to_shost(dev);
3559         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3560         unsigned long lock_flags;
3561         int result = count, i;
3562
3563         if (!capable(CAP_SYS_ADMIN))
3564                 return -EACCES;
3565
3566         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3567         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3568             !strncmp(buf, "online", 6)) {
3569                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3570                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3571                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3572                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3573                 }
3574                 wmb();
3575                 ioa_cfg->reset_retries = 0;
3576                 ioa_cfg->in_ioa_bringdown = 0;
3577                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3578         }
3579         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3580         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3581
3582         return result;
3583 }
3584
3585 static struct device_attribute ipr_ioa_state_attr = {
3586         .attr = {
3587                 .name =         "online_state",
3588                 .mode =         S_IRUGO | S_IWUSR,
3589         },
3590         .show = ipr_show_adapter_state,
3591         .store = ipr_store_adapter_state
3592 };
3593
3594 /**
3595  * ipr_store_reset_adapter - Reset the adapter
3596  * @dev:        device struct
3597  * @buf:        buffer
3598  * @count:      buffer size
3599  *
3600  * This function will reset the adapter.
3601  *
3602  * Return value:
3603  *      count on success / other on failure
3604  **/
3605 static ssize_t ipr_store_reset_adapter(struct device *dev,
3606                                        struct device_attribute *attr,
3607                                        const char *buf, size_t count)
3608 {
3609         struct Scsi_Host *shost = class_to_shost(dev);
3610         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3611         unsigned long lock_flags;
3612         int result = count;
3613
3614         if (!capable(CAP_SYS_ADMIN))
3615                 return -EACCES;
3616
3617         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3618         if (!ioa_cfg->in_reset_reload)
3619                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3620         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3621         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3622
3623         return result;
3624 }
3625
3626 static struct device_attribute ipr_ioa_reset_attr = {
3627         .attr = {
3628                 .name =         "reset_host",
3629                 .mode =         S_IWUSR,
3630         },
3631         .store = ipr_store_reset_adapter
3632 };
3633
3634 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3635  /**
3636  * ipr_show_iopoll_weight - Show ipr polling mode
3637  * @dev:        class device struct
3638  * @buf:        buffer
3639  *
3640  * Return value:
3641  *      number of bytes printed to buffer
3642  **/
3643 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3644                                    struct device_attribute *attr, char *buf)
3645 {
3646         struct Scsi_Host *shost = class_to_shost(dev);
3647         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3648         unsigned long lock_flags = 0;
3649         int len;
3650
3651         spin_lock_irqsave(shost->host_lock, lock_flags);
3652         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3653         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3654
3655         return len;
3656 }
3657
3658 /**
3659  * ipr_store_iopoll_weight - Change the adapter's polling mode
3660  * @dev:        class device struct
3661  * @buf:        buffer
3662  *
3663  * Return value:
3664  *      number of bytes printed to buffer
3665  **/
3666 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3667                                         struct device_attribute *attr,
3668                                         const char *buf, size_t count)
3669 {
3670         struct Scsi_Host *shost = class_to_shost(dev);
3671         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3672         unsigned long user_iopoll_weight;
3673         unsigned long lock_flags = 0;
3674         int i;
3675
3676         if (!ioa_cfg->sis64) {
3677                 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3678                 return -EINVAL;
3679         }
3680         if (kstrtoul(buf, 10, &user_iopoll_weight))
3681                 return -EINVAL;
3682
3683         if (user_iopoll_weight > 256) {
3684                 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3685                 return -EINVAL;
3686         }
3687
3688         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3689                 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3690                 return strlen(buf);
3691         }
3692
3693         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3694                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3695                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3696         }
3697
3698         spin_lock_irqsave(shost->host_lock, lock_flags);
3699         ioa_cfg->iopoll_weight = user_iopoll_weight;
3700         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3701                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3702                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3703                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3704                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3705                 }
3706         }
3707         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3708
3709         return strlen(buf);
3710 }
3711
3712 static struct device_attribute ipr_iopoll_weight_attr = {
3713         .attr = {
3714                 .name =         "iopoll_weight",
3715                 .mode =         S_IRUGO | S_IWUSR,
3716         },
3717         .show = ipr_show_iopoll_weight,
3718         .store = ipr_store_iopoll_weight
3719 };
3720
3721 /**
3722  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3723  * @buf_len:            buffer length
3724  *
3725  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3726  * list to use for microcode download
3727  *
3728  * Return value:
3729  *      pointer to sglist / NULL on failure
3730  **/
3731 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3732 {
3733         int sg_size, order, bsize_elem, num_elem, i, j;
3734         struct ipr_sglist *sglist;
3735         struct scatterlist *scatterlist;
3736         struct page *page;
3737
3738         /* Get the minimum size per scatter/gather element */
3739         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3740
3741         /* Get the actual size per element */
3742         order = get_order(sg_size);
3743
3744         /* Determine the actual number of bytes per element */
3745         bsize_elem = PAGE_SIZE * (1 << order);
3746
3747         /* Determine the actual number of sg entries needed */
3748         if (buf_len % bsize_elem)
3749                 num_elem = (buf_len / bsize_elem) + 1;
3750         else
3751                 num_elem = buf_len / bsize_elem;
3752
3753         /* Allocate a scatter/gather list for the DMA */
3754         sglist = kzalloc(sizeof(struct ipr_sglist) +
3755                          (sizeof(struct scatterlist) * (num_elem - 1)),
3756                          GFP_KERNEL);
3757
3758         if (sglist == NULL) {
3759                 ipr_trace;
3760                 return NULL;
3761         }
3762
3763         scatterlist = sglist->scatterlist;
3764         sg_init_table(scatterlist, num_elem);
3765
3766         sglist->order = order;
3767         sglist->num_sg = num_elem;
3768
3769         /* Allocate a bunch of sg elements */
3770         for (i = 0; i < num_elem; i++) {
3771                 page = alloc_pages(GFP_KERNEL, order);
3772                 if (!page) {
3773                         ipr_trace;
3774
3775                         /* Free up what we already allocated */
3776                         for (j = i - 1; j >= 0; j--)
3777                                 __free_pages(sg_page(&scatterlist[j]), order);
3778                         kfree(sglist);
3779                         return NULL;
3780                 }
3781
3782                 sg_set_page(&scatterlist[i], page, 0, 0);
3783         }
3784
3785         return sglist;
3786 }
3787
3788 /**
3789  * ipr_free_ucode_buffer - Frees a microcode download buffer
3790  * @p_dnld:             scatter/gather list pointer
3791  *
3792  * Free a DMA'able ucode download buffer previously allocated with
3793  * ipr_alloc_ucode_buffer
3794  *
3795  * Return value:
3796  *      nothing
3797  **/
3798 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3799 {
3800         int i;
3801
3802         for (i = 0; i < sglist->num_sg; i++)
3803                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3804
3805         kfree(sglist);
3806 }
3807
3808 /**
3809  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3810  * @sglist:             scatter/gather list pointer
3811  * @buffer:             buffer pointer
3812  * @len:                buffer length
3813  *
3814  * Copy a microcode image from a user buffer into a buffer allocated by
3815  * ipr_alloc_ucode_buffer
3816  *
3817  * Return value:
3818  *      0 on success / other on failure
3819  **/
3820 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3821                                  u8 *buffer, u32 len)
3822 {
3823         int bsize_elem, i, result = 0;
3824         struct scatterlist *scatterlist;
3825         void *kaddr;
3826
3827         /* Determine the actual number of bytes per element */
3828         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3829
3830         scatterlist = sglist->scatterlist;
3831
3832         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3833                 struct page *page = sg_page(&scatterlist[i]);
3834
3835                 kaddr = kmap(page);
3836                 memcpy(kaddr, buffer, bsize_elem);
3837                 kunmap(page);
3838
3839                 scatterlist[i].length = bsize_elem;
3840
3841                 if (result != 0) {
3842                         ipr_trace;
3843                         return result;
3844                 }
3845         }
3846
3847         if (len % bsize_elem) {
3848                 struct page *page = sg_page(&scatterlist[i]);
3849
3850                 kaddr = kmap(page);
3851                 memcpy(kaddr, buffer, len % bsize_elem);
3852                 kunmap(page);
3853
3854                 scatterlist[i].length = len % bsize_elem;
3855         }
3856
3857         sglist->buffer_len = len;
3858         return result;
3859 }
3860
3861 /**
3862  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3863  * @ipr_cmd:            ipr command struct
3864  * @sglist:             scatter/gather list
3865  *
3866  * Builds a microcode download IOA data list (IOADL).
3867  *
3868  **/
3869 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3870                                     struct ipr_sglist *sglist)
3871 {
3872         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3873         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3874         struct scatterlist *scatterlist = sglist->scatterlist;
3875         int i;
3876
3877         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3878         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3879         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3880
3881         ioarcb->ioadl_len =
3882                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3883         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3884                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3885                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3886                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3887         }
3888
3889         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3890 }
3891
3892 /**
3893  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3894  * @ipr_cmd:    ipr command struct
3895  * @sglist:             scatter/gather list
3896  *
3897  * Builds a microcode download IOA data list (IOADL).
3898  *
3899  **/
3900 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3901                                   struct ipr_sglist *sglist)
3902 {
3903         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3904         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3905         struct scatterlist *scatterlist = sglist->scatterlist;
3906         int i;
3907
3908         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3909         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3910         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3911
3912         ioarcb->ioadl_len =
3913                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3914
3915         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3916                 ioadl[i].flags_and_data_len =
3917                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3918                 ioadl[i].address =
3919                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3920         }
3921
3922         ioadl[i-1].flags_and_data_len |=
3923                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3924 }
3925
3926 /**
3927  * ipr_update_ioa_ucode - Update IOA's microcode
3928  * @ioa_cfg:    ioa config struct
3929  * @sglist:             scatter/gather list
3930  *
3931  * Initiate an adapter reset to update the IOA's microcode
3932  *
3933  * Return value:
3934  *      0 on success / -EIO on failure
3935  **/
3936 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3937                                 struct ipr_sglist *sglist)
3938 {
3939         unsigned long lock_flags;
3940
3941         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3942         while (ioa_cfg->in_reset_reload) {
3943                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3944                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3945                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3946         }
3947
3948         if (ioa_cfg->ucode_sglist) {
3949                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3950                 dev_err(&ioa_cfg->pdev->dev,
3951                         "Microcode download already in progress\n");
3952                 return -EIO;
3953         }
3954
3955         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3956                                         sglist->scatterlist, sglist->num_sg,
3957                                         DMA_TO_DEVICE);
3958
3959         if (!sglist->num_dma_sg) {
3960                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3961                 dev_err(&ioa_cfg->pdev->dev,
3962                         "Failed to map microcode download buffer!\n");
3963                 return -EIO;
3964         }
3965
3966         ioa_cfg->ucode_sglist = sglist;
3967         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3968         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3969         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3970
3971         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3972         ioa_cfg->ucode_sglist = NULL;
3973         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3974         return 0;
3975 }
3976
3977 /**
3978  * ipr_store_update_fw - Update the firmware on the adapter
3979  * @class_dev:  device struct
3980  * @buf:        buffer
3981  * @count:      buffer size
3982  *
3983  * This function will update the firmware on the adapter.
3984  *
3985  * Return value:
3986  *      count on success / other on failure
3987  **/
3988 static ssize_t ipr_store_update_fw(struct device *dev,
3989                                    struct device_attribute *attr,
3990                                    const char *buf, size_t count)
3991 {
3992         struct Scsi_Host *shost = class_to_shost(dev);
3993         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3994         struct ipr_ucode_image_header *image_hdr;
3995         const struct firmware *fw_entry;
3996         struct ipr_sglist *sglist;
3997         char fname[100];
3998         char *src;
3999         int len, result, dnld_size;
4000
4001         if (!capable(CAP_SYS_ADMIN))
4002                 return -EACCES;
4003
4004         len = snprintf(fname, 99, "%s", buf);
4005         fname[len-1] = '\0';
4006
4007         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4008                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4009                 return -EIO;
4010         }
4011
4012         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4013
4014         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4015         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4016         sglist = ipr_alloc_ucode_buffer(dnld_size);
4017
4018         if (!sglist) {
4019                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4020                 release_firmware(fw_entry);
4021                 return -ENOMEM;
4022         }
4023
4024         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4025
4026         if (result) {
4027                 dev_err(&ioa_cfg->pdev->dev,
4028                         "Microcode buffer copy to DMA buffer failed\n");
4029                 goto out;
4030         }
4031
4032         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4033
4034         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4035
4036         if (!result)
4037                 result = count;
4038 out:
4039         ipr_free_ucode_buffer(sglist);
4040         release_firmware(fw_entry);
4041         return result;
4042 }
4043
4044 static struct device_attribute ipr_update_fw_attr = {
4045         .attr = {
4046                 .name =         "update_fw",
4047                 .mode =         S_IWUSR,
4048         },
4049         .store = ipr_store_update_fw
4050 };
4051
4052 /**
4053  * ipr_show_fw_type - Show the adapter's firmware type.
4054  * @dev:        class device struct
4055  * @buf:        buffer
4056  *
4057  * Return value:
4058  *      number of bytes printed to buffer
4059  **/
4060 static ssize_t ipr_show_fw_type(struct device *dev,
4061                                 struct device_attribute *attr, char *buf)
4062 {
4063         struct Scsi_Host *shost = class_to_shost(dev);
4064         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4065         unsigned long lock_flags = 0;
4066         int len;
4067
4068         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4069         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4070         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4071         return len;
4072 }
4073
4074 static struct device_attribute ipr_ioa_fw_type_attr = {
4075         .attr = {
4076                 .name =         "fw_type",
4077                 .mode =         S_IRUGO,
4078         },
4079         .show = ipr_show_fw_type
4080 };
4081
4082 static struct device_attribute *ipr_ioa_attrs[] = {
4083         &ipr_fw_version_attr,
4084         &ipr_log_level_attr,
4085         &ipr_diagnostics_attr,
4086         &ipr_ioa_state_attr,
4087         &ipr_ioa_reset_attr,
4088         &ipr_update_fw_attr,
4089         &ipr_ioa_fw_type_attr,
4090         &ipr_iopoll_weight_attr,
4091         NULL,
4092 };
4093
4094 #ifdef CONFIG_SCSI_IPR_DUMP
4095 /**
4096  * ipr_read_dump - Dump the adapter
4097  * @filp:               open sysfs file
4098  * @kobj:               kobject struct
4099  * @bin_attr:           bin_attribute struct
4100  * @buf:                buffer
4101  * @off:                offset
4102  * @count:              buffer size
4103  *
4104  * Return value:
4105  *      number of bytes printed to buffer
4106  **/
4107 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4108                              struct bin_attribute *bin_attr,
4109                              char *buf, loff_t off, size_t count)
4110 {
4111         struct device *cdev = container_of(kobj, struct device, kobj);
4112         struct Scsi_Host *shost = class_to_shost(cdev);
4113         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4114         struct ipr_dump *dump;
4115         unsigned long lock_flags = 0;
4116         char *src;
4117         int len, sdt_end;
4118         size_t rc = count;
4119
4120         if (!capable(CAP_SYS_ADMIN))
4121                 return -EACCES;
4122
4123         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4124         dump = ioa_cfg->dump;
4125
4126         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4127                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4128                 return 0;
4129         }
4130         kref_get(&dump->kref);
4131         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4132
4133         if (off > dump->driver_dump.hdr.len) {
4134                 kref_put(&dump->kref, ipr_release_dump);
4135                 return 0;
4136         }
4137
4138         if (off + count > dump->driver_dump.hdr.len) {
4139                 count = dump->driver_dump.hdr.len - off;
4140                 rc = count;
4141         }
4142
4143         if (count && off < sizeof(dump->driver_dump)) {
4144                 if (off + count > sizeof(dump->driver_dump))
4145                         len = sizeof(dump->driver_dump) - off;
4146                 else
4147                         len = count;
4148                 src = (u8 *)&dump->driver_dump + off;
4149                 memcpy(buf, src, len);
4150                 buf += len;
4151                 off += len;
4152                 count -= len;
4153         }
4154
4155         off -= sizeof(dump->driver_dump);
4156
4157         if (ioa_cfg->sis64)
4158                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4159                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4160                            sizeof(struct ipr_sdt_entry));
4161         else
4162                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4163                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4164
4165         if (count && off < sdt_end) {
4166                 if (off + count > sdt_end)
4167                         len = sdt_end - off;
4168                 else
4169                         len = count;
4170                 src = (u8 *)&dump->ioa_dump + off;
4171                 memcpy(buf, src, len);
4172                 buf += len;
4173                 off += len;
4174                 count -= len;
4175         }
4176
4177         off -= sdt_end;
4178
4179         while (count) {
4180                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4181                         len = PAGE_ALIGN(off) - off;
4182                 else
4183                         len = count;
4184                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4185                 src += off & ~PAGE_MASK;
4186                 memcpy(buf, src, len);
4187                 buf += len;
4188                 off += len;
4189                 count -= len;
4190         }
4191
4192         kref_put(&dump->kref, ipr_release_dump);
4193         return rc;
4194 }
4195
4196 /**
4197  * ipr_alloc_dump - Prepare for adapter dump
4198  * @ioa_cfg:    ioa config struct
4199  *
4200  * Return value:
4201  *      0 on success / other on failure
4202  **/
4203 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4204 {
4205         struct ipr_dump *dump;
4206         __be32 **ioa_data;
4207         unsigned long lock_flags = 0;
4208
4209         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4210
4211         if (!dump) {
4212                 ipr_err("Dump memory allocation failed\n");
4213                 return -ENOMEM;
4214         }
4215
4216         if (ioa_cfg->sis64)
4217                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4218         else
4219                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4220
4221         if (!ioa_data) {
4222                 ipr_err("Dump memory allocation failed\n");
4223                 kfree(dump);
4224                 return -ENOMEM;
4225         }
4226
4227         dump->ioa_dump.ioa_data = ioa_data;
4228
4229         kref_init(&dump->kref);
4230         dump->ioa_cfg = ioa_cfg;
4231
4232         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4233
4234         if (INACTIVE != ioa_cfg->sdt_state) {
4235                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4236                 vfree(dump->ioa_dump.ioa_data);
4237                 kfree(dump);
4238                 return 0;
4239         }
4240
4241         ioa_cfg->dump = dump;
4242         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4243         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4244                 ioa_cfg->dump_taken = 1;
4245                 schedule_work(&ioa_cfg->work_q);
4246         }
4247         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4248
4249         return 0;
4250 }
4251
4252 /**
4253  * ipr_free_dump - Free adapter dump memory
4254  * @ioa_cfg:    ioa config struct
4255  *
4256  * Return value:
4257  *      0 on success / other on failure
4258  **/
4259 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4260 {
4261         struct ipr_dump *dump;
4262         unsigned long lock_flags = 0;
4263
4264         ENTER;
4265
4266         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4267         dump = ioa_cfg->dump;
4268         if (!dump) {
4269                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4270                 return 0;
4271         }
4272
4273         ioa_cfg->dump = NULL;
4274         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4275
4276         kref_put(&dump->kref, ipr_release_dump);
4277
4278         LEAVE;
4279         return 0;
4280 }
4281
4282 /**
4283  * ipr_write_dump - Setup dump state of adapter
4284  * @filp:               open sysfs file
4285  * @kobj:               kobject struct
4286  * @bin_attr:           bin_attribute struct
4287  * @buf:                buffer
4288  * @off:                offset
4289  * @count:              buffer size
4290  *
4291  * Return value:
4292  *      number of bytes printed to buffer
4293  **/
4294 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4295                               struct bin_attribute *bin_attr,
4296                               char *buf, loff_t off, size_t count)
4297 {
4298         struct device *cdev = container_of(kobj, struct device, kobj);
4299         struct Scsi_Host *shost = class_to_shost(cdev);
4300         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4301         int rc;
4302
4303         if (!capable(CAP_SYS_ADMIN))
4304                 return -EACCES;
4305
4306         if (buf[0] == '1')
4307                 rc = ipr_alloc_dump(ioa_cfg);
4308         else if (buf[0] == '0')
4309                 rc = ipr_free_dump(ioa_cfg);
4310         else
4311                 return -EINVAL;
4312
4313         if (rc)
4314                 return rc;
4315         else
4316                 return count;
4317 }
4318
4319 static struct bin_attribute ipr_dump_attr = {
4320         .attr = {
4321                 .name = "dump",
4322                 .mode = S_IRUSR | S_IWUSR,
4323         },
4324         .size = 0,
4325         .read = ipr_read_dump,
4326         .write = ipr_write_dump
4327 };
4328 #else
4329 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4330 #endif
4331
4332 /**
4333  * ipr_change_queue_depth - Change the device's queue depth
4334  * @sdev:       scsi device struct
4335  * @qdepth:     depth to set
4336  * @reason:     calling context
4337  *
4338  * Return value:
4339  *      actual depth set
4340  **/
4341 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4342 {
4343         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4344         struct ipr_resource_entry *res;
4345         unsigned long lock_flags = 0;
4346
4347         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4348         res = (struct ipr_resource_entry *)sdev->hostdata;
4349
4350         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4351                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4352         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4353
4354         scsi_change_queue_depth(sdev, qdepth);
4355         return sdev->queue_depth;
4356 }
4357
4358 /**
4359  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4360  * @dev:        device struct
4361  * @attr:       device attribute structure
4362  * @buf:        buffer
4363  *
4364  * Return value:
4365  *      number of bytes printed to buffer
4366  **/
4367 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4368 {
4369         struct scsi_device *sdev = to_scsi_device(dev);
4370         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4371         struct ipr_resource_entry *res;
4372         unsigned long lock_flags = 0;
4373         ssize_t len = -ENXIO;
4374
4375         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4376         res = (struct ipr_resource_entry *)sdev->hostdata;
4377         if (res)
4378                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4379         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4380         return len;
4381 }
4382
4383 static struct device_attribute ipr_adapter_handle_attr = {
4384         .attr = {
4385                 .name =         "adapter_handle",
4386                 .mode =         S_IRUSR,
4387         },
4388         .show = ipr_show_adapter_handle
4389 };
4390
4391 /**
4392  * ipr_show_resource_path - Show the resource path or the resource address for
4393  *                          this device.
4394  * @dev:        device struct
4395  * @attr:       device attribute structure
4396  * @buf:        buffer
4397  *
4398  * Return value:
4399  *      number of bytes printed to buffer
4400  **/
4401 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4402 {
4403         struct scsi_device *sdev = to_scsi_device(dev);
4404         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4405         struct ipr_resource_entry *res;
4406         unsigned long lock_flags = 0;
4407         ssize_t len = -ENXIO;
4408         char buffer[IPR_MAX_RES_PATH_LENGTH];
4409
4410         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4411         res = (struct ipr_resource_entry *)sdev->hostdata;
4412         if (res && ioa_cfg->sis64)
4413                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4414                                __ipr_format_res_path(res->res_path, buffer,
4415                                                      sizeof(buffer)));
4416         else if (res)
4417                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4418                                res->bus, res->target, res->lun);
4419
4420         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4421         return len;
4422 }
4423
4424 static struct device_attribute ipr_resource_path_attr = {
4425         .attr = {
4426                 .name =         "resource_path",
4427                 .mode =         S_IRUGO,
4428         },
4429         .show = ipr_show_resource_path
4430 };
4431
4432 /**
4433  * ipr_show_device_id - Show the device_id for this device.
4434  * @dev:        device struct
4435  * @attr:       device attribute structure
4436  * @buf:        buffer
4437  *
4438  * Return value:
4439  *      number of bytes printed to buffer
4440  **/
4441 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4442 {
4443         struct scsi_device *sdev = to_scsi_device(dev);
4444         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4445         struct ipr_resource_entry *res;
4446         unsigned long lock_flags = 0;
4447         ssize_t len = -ENXIO;
4448
4449         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4450         res = (struct ipr_resource_entry *)sdev->hostdata;
4451         if (res && ioa_cfg->sis64)
4452                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4453         else if (res)
4454                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4455
4456         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4457         return len;
4458 }
4459
4460 static struct device_attribute ipr_device_id_attr = {
4461         .attr = {
4462                 .name =         "device_id",
4463                 .mode =         S_IRUGO,
4464         },
4465         .show = ipr_show_device_id
4466 };
4467
4468 /**
4469  * ipr_show_resource_type - Show the resource type for this device.
4470  * @dev:        device struct
4471  * @attr:       device attribute structure
4472  * @buf:        buffer
4473  *
4474  * Return value:
4475  *      number of bytes printed to buffer
4476  **/
4477 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4478 {
4479         struct scsi_device *sdev = to_scsi_device(dev);
4480         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4481         struct ipr_resource_entry *res;
4482         unsigned long lock_flags = 0;
4483         ssize_t len = -ENXIO;
4484
4485         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4486         res = (struct ipr_resource_entry *)sdev->hostdata;
4487
4488         if (res)
4489                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4490
4491         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4492         return len;
4493 }
4494
4495 static struct device_attribute ipr_resource_type_attr = {
4496         .attr = {
4497                 .name =         "resource_type",
4498                 .mode =         S_IRUGO,
4499         },
4500         .show = ipr_show_resource_type
4501 };
4502
4503 /**
4504  * ipr_show_raw_mode - Show the adapter's raw mode
4505  * @dev:        class device struct
4506  * @buf:        buffer
4507  *
4508  * Return value:
4509  *      number of bytes printed to buffer
4510  **/
4511 static ssize_t ipr_show_raw_mode(struct device *dev,
4512                                  struct device_attribute *attr, char *buf)
4513 {
4514         struct scsi_device *sdev = to_scsi_device(dev);
4515         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4516         struct ipr_resource_entry *res;
4517         unsigned long lock_flags = 0;
4518         ssize_t len;
4519
4520         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4521         res = (struct ipr_resource_entry *)sdev->hostdata;
4522         if (res)
4523                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4524         else
4525                 len = -ENXIO;
4526         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4527         return len;
4528 }
4529
4530 /**
4531  * ipr_store_raw_mode - Change the adapter's raw mode
4532  * @dev:        class device struct
4533  * @buf:        buffer
4534  *
4535  * Return value:
4536  *      number of bytes printed to buffer
4537  **/
4538 static ssize_t ipr_store_raw_mode(struct device *dev,
4539                                   struct device_attribute *attr,
4540                                   const char *buf, size_t count)
4541 {
4542         struct scsi_device *sdev = to_scsi_device(dev);
4543         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4544         struct ipr_resource_entry *res;
4545         unsigned long lock_flags = 0;
4546         ssize_t len;
4547
4548         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4549         res = (struct ipr_resource_entry *)sdev->hostdata;
4550         if (res) {
4551                 if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
4552                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4553                         len = strlen(buf);
4554                         if (res->sdev)
4555                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4556                                         res->raw_mode ? "enabled" : "disabled");
4557                 } else
4558                         len = -EINVAL;
4559         } else
4560                 len = -ENXIO;
4561         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4562         return len;
4563 }
4564
4565 static struct device_attribute ipr_raw_mode_attr = {
4566         .attr = {
4567                 .name =         "raw_mode",
4568                 .mode =         S_IRUGO | S_IWUSR,
4569         },
4570         .show = ipr_show_raw_mode,
4571         .store = ipr_store_raw_mode
4572 };
4573
4574 static struct device_attribute *ipr_dev_attrs[] = {
4575         &ipr_adapter_handle_attr,
4576         &ipr_resource_path_attr,
4577         &ipr_device_id_attr,
4578         &ipr_resource_type_attr,
4579         &ipr_raw_mode_attr,
4580         NULL,
4581 };
4582
4583 /**
4584  * ipr_biosparam - Return the HSC mapping
4585  * @sdev:                       scsi device struct
4586  * @block_device:       block device pointer
4587  * @capacity:           capacity of the device
4588  * @parm:                       Array containing returned HSC values.
4589  *
4590  * This function generates the HSC parms that fdisk uses.
4591  * We want to make sure we return something that places partitions
4592  * on 4k boundaries for best performance with the IOA.
4593  *
4594  * Return value:
4595  *      0 on success
4596  **/
4597 static int ipr_biosparam(struct scsi_device *sdev,
4598                          struct block_device *block_device,
4599                          sector_t capacity, int *parm)
4600 {
4601         int heads, sectors;
4602         sector_t cylinders;
4603
4604         heads = 128;
4605         sectors = 32;
4606
4607         cylinders = capacity;
4608         sector_div(cylinders, (128 * 32));
4609
4610         /* return result */
4611         parm[0] = heads;
4612         parm[1] = sectors;
4613         parm[2] = cylinders;
4614
4615         return 0;
4616 }
4617
4618 /**
4619  * ipr_find_starget - Find target based on bus/target.
4620  * @starget:    scsi target struct
4621  *
4622  * Return value:
4623  *      resource entry pointer if found / NULL if not found
4624  **/
4625 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4626 {
4627         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4628         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4629         struct ipr_resource_entry *res;
4630
4631         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4632                 if ((res->bus == starget->channel) &&
4633                     (res->target == starget->id)) {
4634                         return res;
4635                 }
4636         }
4637
4638         return NULL;
4639 }
4640
4641 static struct ata_port_info sata_port_info;
4642
4643 /**
4644  * ipr_target_alloc - Prepare for commands to a SCSI target
4645  * @starget:    scsi target struct
4646  *
4647  * If the device is a SATA device, this function allocates an
4648  * ATA port with libata, else it does nothing.
4649  *
4650  * Return value:
4651  *      0 on success / non-0 on failure
4652  **/
4653 static int ipr_target_alloc(struct scsi_target *starget)
4654 {
4655         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4656         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4657         struct ipr_sata_port *sata_port;
4658         struct ata_port *ap;
4659         struct ipr_resource_entry *res;
4660         unsigned long lock_flags;
4661
4662         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4663         res = ipr_find_starget(starget);
4664         starget->hostdata = NULL;
4665
4666         if (res && ipr_is_gata(res)) {
4667                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4668                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4669                 if (!sata_port)
4670                         return -ENOMEM;
4671
4672                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4673                 if (ap) {
4674                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4675                         sata_port->ioa_cfg = ioa_cfg;
4676                         sata_port->ap = ap;
4677                         sata_port->res = res;
4678
4679                         res->sata_port = sata_port;
4680                         ap->private_data = sata_port;
4681                         starget->hostdata = sata_port;
4682                 } else {
4683                         kfree(sata_port);
4684                         return -ENOMEM;
4685                 }
4686         }
4687         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4688
4689         return 0;
4690 }
4691
4692 /**
4693  * ipr_target_destroy - Destroy a SCSI target
4694  * @starget:    scsi target struct
4695  *
4696  * If the device was a SATA device, this function frees the libata
4697  * ATA port, else it does nothing.
4698  *
4699  **/
4700 static void ipr_target_destroy(struct scsi_target *starget)
4701 {
4702         struct ipr_sata_port *sata_port = starget->hostdata;
4703         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4704         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4705
4706         if (ioa_cfg->sis64) {
4707                 if (!ipr_find_starget(starget)) {
4708                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4709                                 clear_bit(starget->id, ioa_cfg->array_ids);
4710                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4711                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4712                         else if (starget->channel == 0)
4713                                 clear_bit(starget->id, ioa_cfg->target_ids);
4714                 }
4715         }
4716
4717         if (sata_port) {
4718                 starget->hostdata = NULL;
4719                 ata_sas_port_destroy(sata_port->ap);
4720                 kfree(sata_port);
4721         }
4722 }
4723
4724 /**
4725  * ipr_find_sdev - Find device based on bus/target/lun.
4726  * @sdev:       scsi device struct
4727  *
4728  * Return value:
4729  *      resource entry pointer if found / NULL if not found
4730  **/
4731 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4732 {
4733         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4734         struct ipr_resource_entry *res;
4735
4736         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4737                 if ((res->bus == sdev->channel) &&
4738                     (res->target == sdev->id) &&
4739                     (res->lun == sdev->lun))
4740                         return res;
4741         }
4742
4743         return NULL;
4744 }
4745
4746 /**
4747  * ipr_slave_destroy - Unconfigure a SCSI device
4748  * @sdev:       scsi device struct
4749  *
4750  * Return value:
4751  *      nothing
4752  **/
4753 static void ipr_slave_destroy(struct scsi_device *sdev)
4754 {
4755         struct ipr_resource_entry *res;
4756         struct ipr_ioa_cfg *ioa_cfg;
4757         unsigned long lock_flags = 0;
4758
4759         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4760
4761         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4762         res = (struct ipr_resource_entry *) sdev->hostdata;
4763         if (res) {
4764                 if (res->sata_port)
4765                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4766                 sdev->hostdata = NULL;
4767                 res->sdev = NULL;
4768                 res->sata_port = NULL;
4769         }
4770         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4771 }
4772
4773 /**
4774  * ipr_slave_configure - Configure a SCSI device
4775  * @sdev:       scsi device struct
4776  *
4777  * This function configures the specified scsi device.
4778  *
4779  * Return value:
4780  *      0 on success
4781  **/
4782 static int ipr_slave_configure(struct scsi_device *sdev)
4783 {
4784         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4785         struct ipr_resource_entry *res;
4786         struct ata_port *ap = NULL;
4787         unsigned long lock_flags = 0;
4788         char buffer[IPR_MAX_RES_PATH_LENGTH];
4789
4790         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4791         res = sdev->hostdata;
4792         if (res) {
4793                 if (ipr_is_af_dasd_device(res))
4794                         sdev->type = TYPE_RAID;
4795                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4796                         sdev->scsi_level = 4;
4797                         sdev->no_uld_attach = 1;
4798                 }
4799                 if (ipr_is_vset_device(res)) {
4800                         sdev->scsi_level = SCSI_SPC_3;
4801                         blk_queue_rq_timeout(sdev->request_queue,
4802                                              IPR_VSET_RW_TIMEOUT);
4803                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4804                 }
4805                 if (ipr_is_gata(res) && res->sata_port)
4806                         ap = res->sata_port->ap;
4807                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4808
4809                 if (ap) {
4810                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4811                         ata_sas_slave_configure(sdev, ap);
4812                 }
4813
4814                 if (ioa_cfg->sis64)
4815                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4816                                     ipr_format_res_path(ioa_cfg,
4817                                 res->res_path, buffer, sizeof(buffer)));
4818                 return 0;
4819         }
4820         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4821         return 0;
4822 }
4823
4824 /**
4825  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4826  * @sdev:       scsi device struct
4827  *
4828  * This function initializes an ATA port so that future commands
4829  * sent through queuecommand will work.
4830  *
4831  * Return value:
4832  *      0 on success
4833  **/
4834 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4835 {
4836         struct ipr_sata_port *sata_port = NULL;
4837         int rc = -ENXIO;
4838
4839         ENTER;
4840         if (sdev->sdev_target)
4841                 sata_port = sdev->sdev_target->hostdata;
4842         if (sata_port) {
4843                 rc = ata_sas_port_init(sata_port->ap);
4844                 if (rc == 0)
4845                         rc = ata_sas_sync_probe(sata_port->ap);
4846         }
4847
4848         if (rc)
4849                 ipr_slave_destroy(sdev);
4850
4851         LEAVE;
4852         return rc;
4853 }
4854
4855 /**
4856  * ipr_slave_alloc - Prepare for commands to a device.
4857  * @sdev:       scsi device struct
4858  *
4859  * This function saves a pointer to the resource entry
4860  * in the scsi device struct if the device exists. We
4861  * can then use this pointer in ipr_queuecommand when
4862  * handling new commands.
4863  *
4864  * Return value:
4865  *      0 on success / -ENXIO if device does not exist
4866  **/
4867 static int ipr_slave_alloc(struct scsi_device *sdev)
4868 {
4869         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4870         struct ipr_resource_entry *res;
4871         unsigned long lock_flags;
4872         int rc = -ENXIO;
4873
4874         sdev->hostdata = NULL;
4875
4876         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4877
4878         res = ipr_find_sdev(sdev);
4879         if (res) {
4880                 res->sdev = sdev;
4881                 res->add_to_ml = 0;
4882                 res->in_erp = 0;
4883                 sdev->hostdata = res;
4884                 if (!ipr_is_naca_model(res))
4885                         res->needs_sync_complete = 1;
4886                 rc = 0;
4887                 if (ipr_is_gata(res)) {
4888                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4889                         return ipr_ata_slave_alloc(sdev);
4890                 }
4891         }
4892
4893         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4894
4895         return rc;
4896 }
4897
4898 /**
4899  * ipr_match_lun - Match function for specified LUN
4900  * @ipr_cmd:    ipr command struct
4901  * @device:             device to match (sdev)
4902  *
4903  * Returns:
4904  *      1 if command matches sdev / 0 if command does not match sdev
4905  **/
4906 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4907 {
4908         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4909                 return 1;
4910         return 0;
4911 }
4912
4913 /**
4914  * ipr_wait_for_ops - Wait for matching commands to complete
4915  * @ipr_cmd:    ipr command struct
4916  * @device:             device to match (sdev)
4917  * @match:              match function to use
4918  *
4919  * Returns:
4920  *      SUCCESS / FAILED
4921  **/
4922 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4923                             int (*match)(struct ipr_cmnd *, void *))
4924 {
4925         struct ipr_cmnd *ipr_cmd;
4926         int wait;
4927         unsigned long flags;
4928         struct ipr_hrr_queue *hrrq;
4929         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4930         DECLARE_COMPLETION_ONSTACK(comp);
4931
4932         ENTER;
4933         do {
4934                 wait = 0;
4935
4936                 for_each_hrrq(hrrq, ioa_cfg) {
4937                         spin_lock_irqsave(hrrq->lock, flags);
4938                         list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4939                                 if (match(ipr_cmd, device)) {
4940                                         ipr_cmd->eh_comp = &comp;
4941                                         wait++;
4942                                 }
4943                         }
4944                         spin_unlock_irqrestore(hrrq->lock, flags);
4945                 }
4946
4947                 if (wait) {
4948                         timeout = wait_for_completion_timeout(&comp, timeout);
4949
4950                         if (!timeout) {
4951                                 wait = 0;
4952
4953                                 for_each_hrrq(hrrq, ioa_cfg) {
4954                                         spin_lock_irqsave(hrrq->lock, flags);
4955                                         list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4956                                                 if (match(ipr_cmd, device)) {
4957                                                         ipr_cmd->eh_comp = NULL;
4958                                                         wait++;
4959                                                 }
4960                                         }
4961                                         spin_unlock_irqrestore(hrrq->lock, flags);
4962                                 }
4963
4964                                 if (wait)
4965                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4966                                 LEAVE;
4967                                 return wait ? FAILED : SUCCESS;
4968                         }
4969                 }
4970         } while (wait);
4971
4972         LEAVE;
4973         return SUCCESS;
4974 }
4975
4976 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4977 {
4978         struct ipr_ioa_cfg *ioa_cfg;
4979         unsigned long lock_flags = 0;
4980         int rc = SUCCESS;
4981
4982         ENTER;
4983         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4984         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4985
4986         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4987                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4988                 dev_err(&ioa_cfg->pdev->dev,
4989                         "Adapter being reset as a result of error recovery.\n");
4990
4991                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4992                         ioa_cfg->sdt_state = GET_DUMP;
4993         }
4994
4995         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4996         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4997         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4998
4999         /* If we got hit with a host reset while we were already resetting
5000          the adapter for some reason, and the reset failed. */
5001         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5002                 ipr_trace;
5003                 rc = FAILED;
5004         }
5005
5006         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5007         LEAVE;
5008         return rc;
5009 }
5010
5011 /**
5012  * ipr_device_reset - Reset the device
5013  * @ioa_cfg:    ioa config struct
5014  * @res:                resource entry struct
5015  *
5016  * This function issues a device reset to the affected device.
5017  * If the device is a SCSI device, a LUN reset will be sent
5018  * to the device first. If that does not work, a target reset
5019  * will be sent. If the device is a SATA device, a PHY reset will
5020  * be sent.
5021  *
5022  * Return value:
5023  *      0 on success / non-zero on failure
5024  **/
5025 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5026                             struct ipr_resource_entry *res)
5027 {
5028         struct ipr_cmnd *ipr_cmd;
5029         struct ipr_ioarcb *ioarcb;
5030         struct ipr_cmd_pkt *cmd_pkt;
5031         struct ipr_ioarcb_ata_regs *regs;
5032         u32 ioasc;
5033
5034         ENTER;
5035         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5036         ioarcb = &ipr_cmd->ioarcb;
5037         cmd_pkt = &ioarcb->cmd_pkt;
5038
5039         if (ipr_cmd->ioa_cfg->sis64) {
5040                 regs = &ipr_cmd->i.ata_ioadl.regs;
5041                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5042         } else
5043                 regs = &ioarcb->u.add_data.u.regs;
5044
5045         ioarcb->res_handle = res->res_handle;
5046         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5047         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5048         if (ipr_is_gata(res)) {
5049                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5050                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5051                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5052         }
5053
5054         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5055         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5056         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5057         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5058                 if (ipr_cmd->ioa_cfg->sis64)
5059                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5060                                sizeof(struct ipr_ioasa_gata));
5061                 else
5062                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5063                                sizeof(struct ipr_ioasa_gata));
5064         }
5065
5066         LEAVE;
5067         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5068 }
5069
5070 /**
5071  * ipr_sata_reset - Reset the SATA port
5072  * @link:       SATA link to reset
5073  * @classes:    class of the attached device
5074  *
5075  * This function issues a SATA phy reset to the affected ATA link.
5076  *
5077  * Return value:
5078  *      0 on success / non-zero on failure
5079  **/
5080 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5081                                 unsigned long deadline)
5082 {
5083         struct ipr_sata_port *sata_port = link->ap->private_data;
5084         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5085         struct ipr_resource_entry *res;
5086         unsigned long lock_flags = 0;
5087         int rc = -ENXIO;
5088
5089         ENTER;
5090         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5091         while (ioa_cfg->in_reset_reload) {
5092                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5093                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5094                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5095         }
5096
5097         res = sata_port->res;
5098         if (res) {
5099                 rc = ipr_device_reset(ioa_cfg, res);
5100                 *classes = res->ata_class;
5101         }
5102
5103         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5104         LEAVE;
5105         return rc;
5106 }
5107
5108 /**
5109  * ipr_eh_dev_reset - Reset the device
5110  * @scsi_cmd:   scsi command struct
5111  *
5112  * This function issues a device reset to the affected device.
5113  * A LUN reset will be sent to the device first. If that does
5114  * not work, a target reset will be sent.
5115  *
5116  * Return value:
5117  *      SUCCESS / FAILED
5118  **/
5119 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5120 {
5121         struct ipr_cmnd *ipr_cmd;
5122         struct ipr_ioa_cfg *ioa_cfg;
5123         struct ipr_resource_entry *res;
5124         struct ata_port *ap;
5125         int rc = 0;
5126         struct ipr_hrr_queue *hrrq;
5127
5128         ENTER;
5129         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5130         res = scsi_cmd->device->hostdata;
5131
5132         if (!res)
5133                 return FAILED;
5134
5135         /*
5136          * If we are currently going through reset/reload, return failed. This will force the
5137          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5138          * reset to complete
5139          */
5140         if (ioa_cfg->in_reset_reload)
5141                 return FAILED;
5142         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5143                 return FAILED;
5144
5145         for_each_hrrq(hrrq, ioa_cfg) {
5146                 spin_lock(&hrrq->_lock);
5147                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5148                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5149                                 if (ipr_cmd->scsi_cmd)
5150                                         ipr_cmd->done = ipr_scsi_eh_done;
5151                                 if (ipr_cmd->qc)
5152                                         ipr_cmd->done = ipr_sata_eh_done;
5153                                 if (ipr_cmd->qc &&
5154                                     !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5155                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5156                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5157                                 }
5158                         }
5159                 }
5160                 spin_unlock(&hrrq->_lock);
5161         }
5162         res->resetting_device = 1;
5163         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5164
5165         if (ipr_is_gata(res) && res->sata_port) {
5166                 ap = res->sata_port->ap;
5167                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5168                 ata_std_error_handler(ap);
5169                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5170
5171                 for_each_hrrq(hrrq, ioa_cfg) {
5172                         spin_lock(&hrrq->_lock);
5173                         list_for_each_entry(ipr_cmd,
5174                                             &hrrq->hrrq_pending_q, queue) {
5175                                 if (ipr_cmd->ioarcb.res_handle ==
5176                                     res->res_handle) {
5177                                         rc = -EIO;
5178                                         break;
5179                                 }
5180                         }
5181                         spin_unlock(&hrrq->_lock);
5182                 }
5183         } else
5184                 rc = ipr_device_reset(ioa_cfg, res);
5185         res->resetting_device = 0;
5186         res->reset_occurred = 1;
5187
5188         LEAVE;
5189         return rc ? FAILED : SUCCESS;
5190 }
5191
5192 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5193 {
5194         int rc;
5195         struct ipr_ioa_cfg *ioa_cfg;
5196
5197         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5198
5199         spin_lock_irq(cmd->device->host->host_lock);
5200         rc = __ipr_eh_dev_reset(cmd);
5201         spin_unlock_irq(cmd->device->host->host_lock);
5202
5203         if (rc == SUCCESS)
5204                 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5205
5206         return rc;
5207 }
5208
5209 /**
5210  * ipr_bus_reset_done - Op done function for bus reset.
5211  * @ipr_cmd:    ipr command struct
5212  *
5213  * This function is the op done function for a bus reset
5214  *
5215  * Return value:
5216  *      none
5217  **/
5218 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5219 {
5220         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5221         struct ipr_resource_entry *res;
5222
5223         ENTER;
5224         if (!ioa_cfg->sis64)
5225                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5226                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5227                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5228                                 break;
5229                         }
5230                 }
5231
5232         /*
5233          * If abort has not completed, indicate the reset has, else call the
5234          * abort's done function to wake the sleeping eh thread
5235          */
5236         if (ipr_cmd->sibling->sibling)
5237                 ipr_cmd->sibling->sibling = NULL;
5238         else
5239                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5240
5241         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5242         LEAVE;
5243 }
5244
5245 /**
5246  * ipr_abort_timeout - An abort task has timed out
5247  * @ipr_cmd:    ipr command struct
5248  *
5249  * This function handles when an abort task times out. If this
5250  * happens we issue a bus reset since we have resources tied
5251  * up that must be freed before returning to the midlayer.
5252  *
5253  * Return value:
5254  *      none
5255  **/
5256 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5257 {
5258         struct ipr_cmnd *reset_cmd;
5259         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5260         struct ipr_cmd_pkt *cmd_pkt;
5261         unsigned long lock_flags = 0;
5262
5263         ENTER;
5264         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5265         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5266                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5267                 return;
5268         }
5269
5270         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5271         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5272         ipr_cmd->sibling = reset_cmd;
5273         reset_cmd->sibling = ipr_cmd;
5274         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5275         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5276         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5277         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5278         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5279
5280         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5281         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5282         LEAVE;
5283 }
5284
5285 /**
5286  * ipr_cancel_op - Cancel specified op
5287  * @scsi_cmd:   scsi command struct
5288  *
5289  * This function cancels specified op.
5290  *
5291  * Return value:
5292  *      SUCCESS / FAILED
5293  **/
5294 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5295 {
5296         struct ipr_cmnd *ipr_cmd;
5297         struct ipr_ioa_cfg *ioa_cfg;
5298         struct ipr_resource_entry *res;
5299         struct ipr_cmd_pkt *cmd_pkt;
5300         u32 ioasc, int_reg;
5301         int op_found = 0;
5302         struct ipr_hrr_queue *hrrq;
5303
5304         ENTER;
5305         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5306         res = scsi_cmd->device->hostdata;
5307
5308         /* If we are currently going through reset/reload, return failed.
5309          * This will force the mid-layer to call ipr_eh_host_reset,
5310          * which will then go to sleep and wait for the reset to complete
5311          */
5312         if (ioa_cfg->in_reset_reload ||
5313             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5314                 return FAILED;
5315         if (!res)
5316                 return FAILED;
5317
5318         /*
5319          * If we are aborting a timed out op, chances are that the timeout was caused
5320          * by a still not detected EEH error. In such cases, reading a register will
5321          * trigger the EEH recovery infrastructure.
5322          */
5323         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5324
5325         if (!ipr_is_gscsi(res))
5326                 return FAILED;
5327
5328         for_each_hrrq(hrrq, ioa_cfg) {
5329                 spin_lock(&hrrq->_lock);
5330                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5331                         if (ipr_cmd->scsi_cmd == scsi_cmd) {
5332                                 ipr_cmd->done = ipr_scsi_eh_done;
5333                                 op_found = 1;
5334                                 break;
5335                         }
5336                 }
5337                 spin_unlock(&hrrq->_lock);
5338         }
5339
5340         if (!op_found)
5341                 return SUCCESS;
5342
5343         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5344         ipr_cmd->ioarcb.res_handle = res->res_handle;
5345         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5346         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5347         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5348         ipr_cmd->u.sdev = scsi_cmd->device;
5349
5350         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5351                     scsi_cmd->cmnd[0]);
5352         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5353         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5354
5355         /*
5356          * If the abort task timed out and we sent a bus reset, we will get
5357          * one the following responses to the abort
5358          */
5359         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5360                 ioasc = 0;
5361                 ipr_trace;
5362         }
5363
5364         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5365         if (!ipr_is_naca_model(res))
5366                 res->needs_sync_complete = 1;
5367
5368         LEAVE;
5369         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5370 }
5371
5372 /**
5373  * ipr_eh_abort - Abort a single op
5374  * @scsi_cmd:   scsi command struct
5375  *
5376  * Return value:
5377  *      0 if scan in progress / 1 if scan is complete
5378  **/
5379 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5380 {
5381         unsigned long lock_flags;
5382         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5383         int rc = 0;
5384
5385         spin_lock_irqsave(shost->host_lock, lock_flags);
5386         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5387                 rc = 1;
5388         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5389                 rc = 1;
5390         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5391         return rc;
5392 }
5393
5394 /**
5395  * ipr_eh_host_reset - Reset the host adapter
5396  * @scsi_cmd:   scsi command struct
5397  *
5398  * Return value:
5399  *      SUCCESS / FAILED
5400  **/
5401 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5402 {
5403         unsigned long flags;
5404         int rc;
5405         struct ipr_ioa_cfg *ioa_cfg;
5406
5407         ENTER;
5408
5409         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5410
5411         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5412         rc = ipr_cancel_op(scsi_cmd);
5413         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5414
5415         if (rc == SUCCESS)
5416                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5417         LEAVE;
5418         return rc;
5419 }
5420
5421 /**
5422  * ipr_handle_other_interrupt - Handle "other" interrupts
5423  * @ioa_cfg:    ioa config struct
5424  * @int_reg:    interrupt register
5425  *
5426  * Return value:
5427  *      IRQ_NONE / IRQ_HANDLED
5428  **/
5429 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5430                                               u32 int_reg)
5431 {
5432         irqreturn_t rc = IRQ_HANDLED;
5433         u32 int_mask_reg;
5434
5435         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5436         int_reg &= ~int_mask_reg;
5437
5438         /* If an interrupt on the adapter did not occur, ignore it.
5439          * Or in the case of SIS 64, check for a stage change interrupt.
5440          */
5441         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5442                 if (ioa_cfg->sis64) {
5443                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5444                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5445                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5446
5447                                 /* clear stage change */
5448                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5449                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5450                                 list_del(&ioa_cfg->reset_cmd->queue);
5451                                 del_timer(&ioa_cfg->reset_cmd->timer);
5452                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5453                                 return IRQ_HANDLED;
5454                         }
5455                 }
5456
5457                 return IRQ_NONE;
5458         }
5459
5460         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5461                 /* Mask the interrupt */
5462                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5463                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5464
5465                 list_del(&ioa_cfg->reset_cmd->queue);
5466                 del_timer(&ioa_cfg->reset_cmd->timer);
5467                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5468         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5469                 if (ioa_cfg->clear_isr) {
5470                         if (ipr_debug && printk_ratelimit())
5471                                 dev_err(&ioa_cfg->pdev->dev,
5472                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5473                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5474                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5475                         return IRQ_NONE;
5476                 }
5477         } else {
5478                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5479                         ioa_cfg->ioa_unit_checked = 1;
5480                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5481                         dev_err(&ioa_cfg->pdev->dev,
5482                                 "No Host RRQ. 0x%08X\n", int_reg);
5483                 else
5484                         dev_err(&ioa_cfg->pdev->dev,
5485                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5486
5487                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5488                         ioa_cfg->sdt_state = GET_DUMP;
5489
5490                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5491                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5492         }
5493
5494         return rc;
5495 }
5496
5497 /**
5498  * ipr_isr_eh - Interrupt service routine error handler
5499  * @ioa_cfg:    ioa config struct
5500  * @msg:        message to log
5501  *
5502  * Return value:
5503  *      none
5504  **/
5505 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5506 {
5507         ioa_cfg->errors_logged++;
5508         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5509
5510         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5511                 ioa_cfg->sdt_state = GET_DUMP;
5512
5513         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5514 }
5515
5516 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5517                                                 struct list_head *doneq)
5518 {
5519         u32 ioasc;
5520         u16 cmd_index;
5521         struct ipr_cmnd *ipr_cmd;
5522         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5523         int num_hrrq = 0;
5524
5525         /* If interrupts are disabled, ignore the interrupt */
5526         if (!hrr_queue->allow_interrupts)
5527                 return 0;
5528
5529         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5530                hrr_queue->toggle_bit) {
5531
5532                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5533                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5534                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5535
5536                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5537                              cmd_index < hrr_queue->min_cmd_id)) {
5538                         ipr_isr_eh(ioa_cfg,
5539                                 "Invalid response handle from IOA: ",
5540                                 cmd_index);
5541                         break;
5542                 }
5543
5544                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5545                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5546
5547                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5548
5549                 list_move_tail(&ipr_cmd->queue, doneq);
5550
5551                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5552                         hrr_queue->hrrq_curr++;
5553                 } else {
5554                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5555                         hrr_queue->toggle_bit ^= 1u;
5556                 }
5557                 num_hrrq++;
5558                 if (budget > 0 && num_hrrq >= budget)
5559                         break;
5560         }
5561
5562         return num_hrrq;
5563 }
5564
5565 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5566 {
5567         struct ipr_ioa_cfg *ioa_cfg;
5568         struct ipr_hrr_queue *hrrq;
5569         struct ipr_cmnd *ipr_cmd, *temp;
5570         unsigned long hrrq_flags;
5571         int completed_ops;
5572         LIST_HEAD(doneq);
5573
5574         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5575         ioa_cfg = hrrq->ioa_cfg;
5576
5577         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5578         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5579
5580         if (completed_ops < budget)
5581                 blk_iopoll_complete(iop);
5582         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5583
5584         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5585                 list_del(&ipr_cmd->queue);
5586                 del_timer(&ipr_cmd->timer);
5587                 ipr_cmd->fast_done(ipr_cmd);
5588         }
5589
5590         return completed_ops;
5591 }
5592
5593 /**
5594  * ipr_isr - Interrupt service routine
5595  * @irq:        irq number
5596  * @devp:       pointer to ioa config struct
5597  *
5598  * Return value:
5599  *      IRQ_NONE / IRQ_HANDLED
5600  **/
5601 static irqreturn_t ipr_isr(int irq, void *devp)
5602 {
5603         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5604         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5605         unsigned long hrrq_flags = 0;
5606         u32 int_reg = 0;
5607         int num_hrrq = 0;
5608         int irq_none = 0;
5609         struct ipr_cmnd *ipr_cmd, *temp;
5610         irqreturn_t rc = IRQ_NONE;
5611         LIST_HEAD(doneq);
5612
5613         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5614         /* If interrupts are disabled, ignore the interrupt */
5615         if (!hrrq->allow_interrupts) {
5616                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5617                 return IRQ_NONE;
5618         }
5619
5620         while (1) {
5621                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5622                         rc =  IRQ_HANDLED;
5623
5624                         if (!ioa_cfg->clear_isr)
5625                                 break;
5626
5627                         /* Clear the PCI interrupt */
5628                         num_hrrq = 0;
5629                         do {
5630                                 writel(IPR_PCII_HRRQ_UPDATED,
5631                                      ioa_cfg->regs.clr_interrupt_reg32);
5632                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5633                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5634                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5635
5636                 } else if (rc == IRQ_NONE && irq_none == 0) {
5637                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5638                         irq_none++;
5639                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5640                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5641                         ipr_isr_eh(ioa_cfg,
5642                                 "Error clearing HRRQ: ", num_hrrq);
5643                         rc = IRQ_HANDLED;
5644                         break;
5645                 } else
5646                         break;
5647         }
5648
5649         if (unlikely(rc == IRQ_NONE))
5650                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5651
5652         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5653         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5654                 list_del(&ipr_cmd->queue);
5655                 del_timer(&ipr_cmd->timer);
5656                 ipr_cmd->fast_done(ipr_cmd);
5657         }
5658         return rc;
5659 }
5660
5661 /**
5662  * ipr_isr_mhrrq - Interrupt service routine
5663  * @irq:        irq number
5664  * @devp:       pointer to ioa config struct
5665  *
5666  * Return value:
5667  *      IRQ_NONE / IRQ_HANDLED
5668  **/
5669 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5670 {
5671         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5672         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5673         unsigned long hrrq_flags = 0;
5674         struct ipr_cmnd *ipr_cmd, *temp;
5675         irqreturn_t rc = IRQ_NONE;
5676         LIST_HEAD(doneq);
5677
5678         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5679
5680         /* If interrupts are disabled, ignore the interrupt */
5681         if (!hrrq->allow_interrupts) {
5682                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5683                 return IRQ_NONE;
5684         }
5685
5686         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5687                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5688                        hrrq->toggle_bit) {
5689                         if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5690                                 blk_iopoll_sched(&hrrq->iopoll);
5691                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5692                         return IRQ_HANDLED;
5693                 }
5694         } else {
5695                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5696                         hrrq->toggle_bit)
5697
5698                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5699                                 rc =  IRQ_HANDLED;
5700         }
5701
5702         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5703
5704         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5705                 list_del(&ipr_cmd->queue);
5706                 del_timer(&ipr_cmd->timer);
5707                 ipr_cmd->fast_done(ipr_cmd);
5708         }
5709         return rc;
5710 }
5711
5712 /**
5713  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5714  * @ioa_cfg:    ioa config struct
5715  * @ipr_cmd:    ipr command struct
5716  *
5717  * Return value:
5718  *      0 on success / -1 on failure
5719  **/
5720 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5721                              struct ipr_cmnd *ipr_cmd)
5722 {
5723         int i, nseg;
5724         struct scatterlist *sg;
5725         u32 length;
5726         u32 ioadl_flags = 0;
5727         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5728         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5729         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5730
5731         length = scsi_bufflen(scsi_cmd);
5732         if (!length)
5733                 return 0;
5734
5735         nseg = scsi_dma_map(scsi_cmd);
5736         if (nseg < 0) {
5737                 if (printk_ratelimit())
5738                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5739                 return -1;
5740         }
5741
5742         ipr_cmd->dma_use_sg = nseg;
5743
5744         ioarcb->data_transfer_length = cpu_to_be32(length);
5745         ioarcb->ioadl_len =
5746                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5747
5748         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5749                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5750                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5751         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5752                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5753
5754         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5755                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5756                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5757                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5758         }
5759
5760         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5761         return 0;
5762 }
5763
5764 /**
5765  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5766  * @ioa_cfg:    ioa config struct
5767  * @ipr_cmd:    ipr command struct
5768  *
5769  * Return value:
5770  *      0 on success / -1 on failure
5771  **/
5772 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5773                            struct ipr_cmnd *ipr_cmd)
5774 {
5775         int i, nseg;
5776         struct scatterlist *sg;
5777         u32 length;
5778         u32 ioadl_flags = 0;
5779         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5780         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5781         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5782
5783         length = scsi_bufflen(scsi_cmd);
5784         if (!length)
5785                 return 0;
5786
5787         nseg = scsi_dma_map(scsi_cmd);
5788         if (nseg < 0) {
5789                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5790                 return -1;
5791         }
5792
5793         ipr_cmd->dma_use_sg = nseg;
5794
5795         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5796                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5797                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5798                 ioarcb->data_transfer_length = cpu_to_be32(length);
5799                 ioarcb->ioadl_len =
5800                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5801         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5802                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5803                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5804                 ioarcb->read_ioadl_len =
5805                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5806         }
5807
5808         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5809                 ioadl = ioarcb->u.add_data.u.ioadl;
5810                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5811                                     offsetof(struct ipr_ioarcb, u.add_data));
5812                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5813         }
5814
5815         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5816                 ioadl[i].flags_and_data_len =
5817                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5818                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5819         }
5820
5821         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5822         return 0;
5823 }
5824
5825 /**
5826  * ipr_erp_done - Process completion of ERP for a device
5827  * @ipr_cmd:            ipr command struct
5828  *
5829  * This function copies the sense buffer into the scsi_cmd
5830  * struct and pushes the scsi_done function.
5831  *
5832  * Return value:
5833  *      nothing
5834  **/
5835 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5836 {
5837         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5838         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5839         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5840
5841         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5842                 scsi_cmd->result |= (DID_ERROR << 16);
5843                 scmd_printk(KERN_ERR, scsi_cmd,
5844                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5845         } else {
5846                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5847                        SCSI_SENSE_BUFFERSIZE);
5848         }
5849
5850         if (res) {
5851                 if (!ipr_is_naca_model(res))
5852                         res->needs_sync_complete = 1;
5853                 res->in_erp = 0;
5854         }
5855         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5856         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5857         scsi_cmd->scsi_done(scsi_cmd);
5858 }
5859
5860 /**
5861  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5862  * @ipr_cmd:    ipr command struct
5863  *
5864  * Return value:
5865  *      none
5866  **/
5867 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5868 {
5869         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5870         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5871         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5872
5873         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5874         ioarcb->data_transfer_length = 0;
5875         ioarcb->read_data_transfer_length = 0;
5876         ioarcb->ioadl_len = 0;
5877         ioarcb->read_ioadl_len = 0;
5878         ioasa->hdr.ioasc = 0;
5879         ioasa->hdr.residual_data_len = 0;
5880
5881         if (ipr_cmd->ioa_cfg->sis64)
5882                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5883                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5884         else {
5885                 ioarcb->write_ioadl_addr =
5886                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5887                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5888         }
5889 }
5890
5891 /**
5892  * ipr_erp_request_sense - Send request sense to a device
5893  * @ipr_cmd:    ipr command struct
5894  *
5895  * This function sends a request sense to a device as a result
5896  * of a check condition.
5897  *
5898  * Return value:
5899  *      nothing
5900  **/
5901 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5902 {
5903         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5904         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5905
5906         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5907                 ipr_erp_done(ipr_cmd);
5908                 return;
5909         }
5910
5911         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5912
5913         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5914         cmd_pkt->cdb[0] = REQUEST_SENSE;
5915         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5916         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5917         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5918         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5919
5920         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5921                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5922
5923         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5924                    IPR_REQUEST_SENSE_TIMEOUT * 2);
5925 }
5926
5927 /**
5928  * ipr_erp_cancel_all - Send cancel all to a device
5929  * @ipr_cmd:    ipr command struct
5930  *
5931  * This function sends a cancel all to a device to clear the
5932  * queue. If we are running TCQ on the device, QERR is set to 1,
5933  * which means all outstanding ops have been dropped on the floor.
5934  * Cancel all will return them to us.
5935  *
5936  * Return value:
5937  *      nothing
5938  **/
5939 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5940 {
5941         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5942         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5943         struct ipr_cmd_pkt *cmd_pkt;
5944
5945         res->in_erp = 1;
5946
5947         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5948
5949         if (!scsi_cmd->device->simple_tags) {
5950                 ipr_erp_request_sense(ipr_cmd);
5951                 return;
5952         }
5953
5954         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5955         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5956         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5957
5958         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5959                    IPR_CANCEL_ALL_TIMEOUT);
5960 }
5961
5962 /**
5963  * ipr_dump_ioasa - Dump contents of IOASA
5964  * @ioa_cfg:    ioa config struct
5965  * @ipr_cmd:    ipr command struct
5966  * @res:                resource entry struct
5967  *
5968  * This function is invoked by the interrupt handler when ops
5969  * fail. It will log the IOASA if appropriate. Only called
5970  * for GPDD ops.
5971  *
5972  * Return value:
5973  *      none
5974  **/
5975 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5976                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5977 {
5978         int i;
5979         u16 data_len;
5980         u32 ioasc, fd_ioasc;
5981         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5982         __be32 *ioasa_data = (__be32 *)ioasa;
5983         int error_index;
5984
5985         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5986         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5987
5988         if (0 == ioasc)
5989                 return;
5990
5991         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5992                 return;
5993
5994         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5995                 error_index = ipr_get_error(fd_ioasc);
5996         else
5997                 error_index = ipr_get_error(ioasc);
5998
5999         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6000                 /* Don't log an error if the IOA already logged one */
6001                 if (ioasa->hdr.ilid != 0)
6002                         return;
6003
6004                 if (!ipr_is_gscsi(res))
6005                         return;
6006
6007                 if (ipr_error_table[error_index].log_ioasa == 0)
6008                         return;
6009         }
6010
6011         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6012
6013         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6014         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6015                 data_len = sizeof(struct ipr_ioasa64);
6016         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6017                 data_len = sizeof(struct ipr_ioasa);
6018
6019         ipr_err("IOASA Dump:\n");
6020
6021         for (i = 0; i < data_len / 4; i += 4) {
6022                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6023                         be32_to_cpu(ioasa_data[i]),
6024                         be32_to_cpu(ioasa_data[i+1]),
6025                         be32_to_cpu(ioasa_data[i+2]),
6026                         be32_to_cpu(ioasa_data[i+3]));
6027         }
6028 }
6029
6030 /**
6031  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6032  * @ioasa:              IOASA
6033  * @sense_buf:  sense data buffer
6034  *
6035  * Return value:
6036  *      none
6037  **/
6038 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6039 {
6040         u32 failing_lba;
6041         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6042         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6043         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6044         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6045
6046         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6047
6048         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6049                 return;
6050
6051         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6052
6053         if (ipr_is_vset_device(res) &&
6054             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6055             ioasa->u.vset.failing_lba_hi != 0) {
6056                 sense_buf[0] = 0x72;
6057                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6058                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6059                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6060
6061                 sense_buf[7] = 12;
6062                 sense_buf[8] = 0;
6063                 sense_buf[9] = 0x0A;
6064                 sense_buf[10] = 0x80;
6065
6066                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6067
6068                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6069                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6070                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6071                 sense_buf[15] = failing_lba & 0x000000ff;
6072
6073                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6074
6075                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6076                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6077                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6078                 sense_buf[19] = failing_lba & 0x000000ff;
6079         } else {
6080                 sense_buf[0] = 0x70;
6081                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6082                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6083                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6084
6085                 /* Illegal request */
6086                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6087                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6088                         sense_buf[7] = 10;      /* additional length */
6089
6090                         /* IOARCB was in error */
6091                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6092                                 sense_buf[15] = 0xC0;
6093                         else    /* Parameter data was invalid */
6094                                 sense_buf[15] = 0x80;
6095
6096                         sense_buf[16] =
6097                             ((IPR_FIELD_POINTER_MASK &
6098                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6099                         sense_buf[17] =
6100                             (IPR_FIELD_POINTER_MASK &
6101                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6102                 } else {
6103                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6104                                 if (ipr_is_vset_device(res))
6105                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6106                                 else
6107                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6108
6109                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6110                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6111                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6112                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6113                                 sense_buf[6] = failing_lba & 0x000000ff;
6114                         }
6115
6116                         sense_buf[7] = 6;       /* additional length */
6117                 }
6118         }
6119 }
6120
6121 /**
6122  * ipr_get_autosense - Copy autosense data to sense buffer
6123  * @ipr_cmd:    ipr command struct
6124  *
6125  * This function copies the autosense buffer to the buffer
6126  * in the scsi_cmd, if there is autosense available.
6127  *
6128  * Return value:
6129  *      1 if autosense was available / 0 if not
6130  **/
6131 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6132 {
6133         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6134         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6135
6136         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6137                 return 0;
6138
6139         if (ipr_cmd->ioa_cfg->sis64)
6140                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6141                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6142                            SCSI_SENSE_BUFFERSIZE));
6143         else
6144                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6145                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6146                            SCSI_SENSE_BUFFERSIZE));
6147         return 1;
6148 }
6149
6150 /**
6151  * ipr_erp_start - Process an error response for a SCSI op
6152  * @ioa_cfg:    ioa config struct
6153  * @ipr_cmd:    ipr command struct
6154  *
6155  * This function determines whether or not to initiate ERP
6156  * on the affected device.
6157  *
6158  * Return value:
6159  *      nothing
6160  **/
6161 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6162                               struct ipr_cmnd *ipr_cmd)
6163 {
6164         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6165         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6166         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6167         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6168
6169         if (!res) {
6170                 ipr_scsi_eh_done(ipr_cmd);
6171                 return;
6172         }
6173
6174         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6175                 ipr_gen_sense(ipr_cmd);
6176
6177         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6178
6179         switch (masked_ioasc) {
6180         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6181                 if (ipr_is_naca_model(res))
6182                         scsi_cmd->result |= (DID_ABORT << 16);
6183                 else
6184                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6185                 break;
6186         case IPR_IOASC_IR_RESOURCE_HANDLE:
6187         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6188                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6189                 break;
6190         case IPR_IOASC_HW_SEL_TIMEOUT:
6191                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6192                 if (!ipr_is_naca_model(res))
6193                         res->needs_sync_complete = 1;
6194                 break;
6195         case IPR_IOASC_SYNC_REQUIRED:
6196                 if (!res->in_erp)
6197                         res->needs_sync_complete = 1;
6198                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6199                 break;
6200         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6201         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6202                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6203                 break;
6204         case IPR_IOASC_BUS_WAS_RESET:
6205         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6206                 /*
6207                  * Report the bus reset and ask for a retry. The device
6208                  * will give CC/UA the next command.
6209                  */
6210                 if (!res->resetting_device)
6211                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6212                 scsi_cmd->result |= (DID_ERROR << 16);
6213                 if (!ipr_is_naca_model(res))
6214                         res->needs_sync_complete = 1;
6215                 break;
6216         case IPR_IOASC_HW_DEV_BUS_STATUS:
6217                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6218                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6219                         if (!ipr_get_autosense(ipr_cmd)) {
6220                                 if (!ipr_is_naca_model(res)) {
6221                                         ipr_erp_cancel_all(ipr_cmd);
6222                                         return;
6223                                 }
6224                         }
6225                 }
6226                 if (!ipr_is_naca_model(res))
6227                         res->needs_sync_complete = 1;
6228                 break;
6229         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6230                 break;
6231         case IPR_IOASC_IR_NON_OPTIMIZED:
6232                 if (res->raw_mode) {
6233                         res->raw_mode = 0;
6234                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6235                 } else
6236                         scsi_cmd->result |= (DID_ERROR << 16);
6237                 break;
6238         default:
6239                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6240                         scsi_cmd->result |= (DID_ERROR << 16);
6241                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6242                         res->needs_sync_complete = 1;
6243                 break;
6244         }
6245
6246         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6247         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6248         scsi_cmd->scsi_done(scsi_cmd);
6249 }
6250
6251 /**
6252  * ipr_scsi_done - mid-layer done function
6253  * @ipr_cmd:    ipr command struct
6254  *
6255  * This function is invoked by the interrupt handler for
6256  * ops generated by the SCSI mid-layer
6257  *
6258  * Return value:
6259  *      none
6260  **/
6261 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6262 {
6263         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6264         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6265         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6266         unsigned long lock_flags;
6267
6268         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6269
6270         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6271                 scsi_dma_unmap(scsi_cmd);
6272
6273                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6274                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6275                 scsi_cmd->scsi_done(scsi_cmd);
6276                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6277         } else {
6278                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6279                 spin_lock(&ipr_cmd->hrrq->_lock);
6280                 ipr_erp_start(ioa_cfg, ipr_cmd);
6281                 spin_unlock(&ipr_cmd->hrrq->_lock);
6282                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6283         }
6284 }
6285
6286 /**
6287  * ipr_queuecommand - Queue a mid-layer request
6288  * @shost:              scsi host struct
6289  * @scsi_cmd:   scsi command struct
6290  *
6291  * This function queues a request generated by the mid-layer.
6292  *
6293  * Return value:
6294  *      0 on success
6295  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6296  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6297  **/
6298 static int ipr_queuecommand(struct Scsi_Host *shost,
6299                             struct scsi_cmnd *scsi_cmd)
6300 {
6301         struct ipr_ioa_cfg *ioa_cfg;
6302         struct ipr_resource_entry *res;
6303         struct ipr_ioarcb *ioarcb;
6304         struct ipr_cmnd *ipr_cmd;
6305         unsigned long hrrq_flags, lock_flags;
6306         int rc;
6307         struct ipr_hrr_queue *hrrq;
6308         int hrrq_id;
6309
6310         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6311
6312         scsi_cmd->result = (DID_OK << 16);
6313         res = scsi_cmd->device->hostdata;
6314
6315         if (ipr_is_gata(res) && res->sata_port) {
6316                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6317                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6318                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6319                 return rc;
6320         }
6321
6322         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6323         hrrq = &ioa_cfg->hrrq[hrrq_id];
6324
6325         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6326         /*
6327          * We are currently blocking all devices due to a host reset
6328          * We have told the host to stop giving us new requests, but
6329          * ERP ops don't count. FIXME
6330          */
6331         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6332                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6333                 return SCSI_MLQUEUE_HOST_BUSY;
6334         }
6335
6336         /*
6337          * FIXME - Create scsi_set_host_offline interface
6338          *  and the ioa_is_dead check can be removed
6339          */
6340         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6341                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6342                 goto err_nodev;
6343         }
6344
6345         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6346         if (ipr_cmd == NULL) {
6347                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6348                 return SCSI_MLQUEUE_HOST_BUSY;
6349         }
6350         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6351
6352         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6353         ioarcb = &ipr_cmd->ioarcb;
6354
6355         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6356         ipr_cmd->scsi_cmd = scsi_cmd;
6357         ipr_cmd->done = ipr_scsi_eh_done;
6358
6359         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6360                 if (scsi_cmd->underflow == 0)
6361                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6362
6363                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6364                 if (ipr_is_gscsi(res) && res->reset_occurred) {
6365                         res->reset_occurred = 0;
6366                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6367                 }
6368                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6369                 if (scsi_cmd->flags & SCMD_TAGGED)
6370                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6371                 else
6372                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6373         }
6374
6375         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6376             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6377                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6378         }
6379         if (res->raw_mode && ipr_is_af_dasd_device(res))
6380                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6381
6382         if (ioa_cfg->sis64)
6383                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6384         else
6385                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6386
6387         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6388         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6389                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6390                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6391                 if (!rc)
6392                         scsi_dma_unmap(scsi_cmd);
6393                 return SCSI_MLQUEUE_HOST_BUSY;
6394         }
6395
6396         if (unlikely(hrrq->ioa_is_dead)) {
6397                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6398                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6399                 scsi_dma_unmap(scsi_cmd);
6400                 goto err_nodev;
6401         }
6402
6403         ioarcb->res_handle = res->res_handle;
6404         if (res->needs_sync_complete) {
6405                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6406                 res->needs_sync_complete = 0;
6407         }
6408         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6409         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6410         ipr_send_command(ipr_cmd);
6411         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6412         return 0;
6413
6414 err_nodev:
6415         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6416         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6417         scsi_cmd->result = (DID_NO_CONNECT << 16);
6418         scsi_cmd->scsi_done(scsi_cmd);
6419         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6420         return 0;
6421 }
6422
6423 /**
6424  * ipr_ioctl - IOCTL handler
6425  * @sdev:       scsi device struct
6426  * @cmd:        IOCTL cmd
6427  * @arg:        IOCTL arg
6428  *
6429  * Return value:
6430  *      0 on success / other on failure
6431  **/
6432 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6433 {
6434         struct ipr_resource_entry *res;
6435
6436         res = (struct ipr_resource_entry *)sdev->hostdata;
6437         if (res && ipr_is_gata(res)) {
6438                 if (cmd == HDIO_GET_IDENTITY)
6439                         return -ENOTTY;
6440                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6441         }
6442
6443         return -EINVAL;
6444 }
6445
6446 /**
6447  * ipr_info - Get information about the card/driver
6448  * @scsi_host:  scsi host struct
6449  *
6450  * Return value:
6451  *      pointer to buffer with description string
6452  **/
6453 static const char *ipr_ioa_info(struct Scsi_Host *host)
6454 {
6455         static char buffer[512];
6456         struct ipr_ioa_cfg *ioa_cfg;
6457         unsigned long lock_flags = 0;
6458
6459         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6460
6461         spin_lock_irqsave(host->host_lock, lock_flags);
6462         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6463         spin_unlock_irqrestore(host->host_lock, lock_flags);
6464
6465         return buffer;
6466 }
6467
6468 static struct scsi_host_template driver_template = {
6469         .module = THIS_MODULE,
6470         .name = "IPR",
6471         .info = ipr_ioa_info,
6472         .ioctl = ipr_ioctl,
6473         .queuecommand = ipr_queuecommand,
6474         .eh_abort_handler = ipr_eh_abort,
6475         .eh_device_reset_handler = ipr_eh_dev_reset,
6476         .eh_host_reset_handler = ipr_eh_host_reset,
6477         .slave_alloc = ipr_slave_alloc,
6478         .slave_configure = ipr_slave_configure,
6479         .slave_destroy = ipr_slave_destroy,
6480         .scan_finished = ipr_scan_finished,
6481         .target_alloc = ipr_target_alloc,
6482         .target_destroy = ipr_target_destroy,
6483         .change_queue_depth = ipr_change_queue_depth,
6484         .bios_param = ipr_biosparam,
6485         .can_queue = IPR_MAX_COMMANDS,
6486         .this_id = -1,
6487         .sg_tablesize = IPR_MAX_SGLIST,
6488         .max_sectors = IPR_IOA_MAX_SECTORS,
6489         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6490         .use_clustering = ENABLE_CLUSTERING,
6491         .shost_attrs = ipr_ioa_attrs,
6492         .sdev_attrs = ipr_dev_attrs,
6493         .proc_name = IPR_NAME,
6494         .use_blk_tags = 1,
6495 };
6496
6497 /**
6498  * ipr_ata_phy_reset - libata phy_reset handler
6499  * @ap:         ata port to reset
6500  *
6501  **/
6502 static void ipr_ata_phy_reset(struct ata_port *ap)
6503 {
6504         unsigned long flags;
6505         struct ipr_sata_port *sata_port = ap->private_data;
6506         struct ipr_resource_entry *res = sata_port->res;
6507         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6508         int rc;
6509
6510         ENTER;
6511         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6512         while (ioa_cfg->in_reset_reload) {
6513                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6514                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6515                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6516         }
6517
6518         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6519                 goto out_unlock;
6520
6521         rc = ipr_device_reset(ioa_cfg, res);
6522
6523         if (rc) {
6524                 ap->link.device[0].class = ATA_DEV_NONE;
6525                 goto out_unlock;
6526         }
6527
6528         ap->link.device[0].class = res->ata_class;
6529         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6530                 ap->link.device[0].class = ATA_DEV_NONE;
6531
6532 out_unlock:
6533         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6534         LEAVE;
6535 }
6536
6537 /**
6538  * ipr_ata_post_internal - Cleanup after an internal command
6539  * @qc: ATA queued command
6540  *
6541  * Return value:
6542  *      none
6543  **/
6544 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6545 {
6546         struct ipr_sata_port *sata_port = qc->ap->private_data;
6547         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6548         struct ipr_cmnd *ipr_cmd;
6549         struct ipr_hrr_queue *hrrq;
6550         unsigned long flags;
6551
6552         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6553         while (ioa_cfg->in_reset_reload) {
6554                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6555                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6556                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6557         }
6558
6559         for_each_hrrq(hrrq, ioa_cfg) {
6560                 spin_lock(&hrrq->_lock);
6561                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6562                         if (ipr_cmd->qc == qc) {
6563                                 ipr_device_reset(ioa_cfg, sata_port->res);
6564                                 break;
6565                         }
6566                 }
6567                 spin_unlock(&hrrq->_lock);
6568         }
6569         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6570 }
6571
6572 /**
6573  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6574  * @regs:       destination
6575  * @tf: source ATA taskfile
6576  *
6577  * Return value:
6578  *      none
6579  **/
6580 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6581                              struct ata_taskfile *tf)
6582 {
6583         regs->feature = tf->feature;
6584         regs->nsect = tf->nsect;
6585         regs->lbal = tf->lbal;
6586         regs->lbam = tf->lbam;
6587         regs->lbah = tf->lbah;
6588         regs->device = tf->device;
6589         regs->command = tf->command;
6590         regs->hob_feature = tf->hob_feature;
6591         regs->hob_nsect = tf->hob_nsect;
6592         regs->hob_lbal = tf->hob_lbal;
6593         regs->hob_lbam = tf->hob_lbam;
6594         regs->hob_lbah = tf->hob_lbah;
6595         regs->ctl = tf->ctl;
6596 }
6597
6598 /**
6599  * ipr_sata_done - done function for SATA commands
6600  * @ipr_cmd:    ipr command struct
6601  *
6602  * This function is invoked by the interrupt handler for
6603  * ops generated by the SCSI mid-layer to SATA devices
6604  *
6605  * Return value:
6606  *      none
6607  **/
6608 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6609 {
6610         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6611         struct ata_queued_cmd *qc = ipr_cmd->qc;
6612         struct ipr_sata_port *sata_port = qc->ap->private_data;
6613         struct ipr_resource_entry *res = sata_port->res;
6614         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6615
6616         spin_lock(&ipr_cmd->hrrq->_lock);
6617         if (ipr_cmd->ioa_cfg->sis64)
6618                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6619                        sizeof(struct ipr_ioasa_gata));
6620         else
6621                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6622                        sizeof(struct ipr_ioasa_gata));
6623         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6624
6625         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6626                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6627
6628         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6629                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6630         else
6631                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6632         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6633         spin_unlock(&ipr_cmd->hrrq->_lock);
6634         ata_qc_complete(qc);
6635 }
6636
6637 /**
6638  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6639  * @ipr_cmd:    ipr command struct
6640  * @qc:         ATA queued command
6641  *
6642  **/
6643 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6644                                   struct ata_queued_cmd *qc)
6645 {
6646         u32 ioadl_flags = 0;
6647         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6648         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6649         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6650         int len = qc->nbytes;
6651         struct scatterlist *sg;
6652         unsigned int si;
6653         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6654
6655         if (len == 0)
6656                 return;
6657
6658         if (qc->dma_dir == DMA_TO_DEVICE) {
6659                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6660                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6661         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6662                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6663
6664         ioarcb->data_transfer_length = cpu_to_be32(len);
6665         ioarcb->ioadl_len =
6666                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6667         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6668                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6669
6670         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6671                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6672                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6673                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6674
6675                 last_ioadl64 = ioadl64;
6676                 ioadl64++;
6677         }
6678
6679         if (likely(last_ioadl64))
6680                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6681 }
6682
6683 /**
6684  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6685  * @ipr_cmd:    ipr command struct
6686  * @qc:         ATA queued command
6687  *
6688  **/
6689 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6690                                 struct ata_queued_cmd *qc)
6691 {
6692         u32 ioadl_flags = 0;
6693         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6694         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6695         struct ipr_ioadl_desc *last_ioadl = NULL;
6696         int len = qc->nbytes;
6697         struct scatterlist *sg;
6698         unsigned int si;
6699
6700         if (len == 0)
6701                 return;
6702
6703         if (qc->dma_dir == DMA_TO_DEVICE) {
6704                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6705                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6706                 ioarcb->data_transfer_length = cpu_to_be32(len);
6707                 ioarcb->ioadl_len =
6708                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6709         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6710                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6711                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6712                 ioarcb->read_ioadl_len =
6713                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6714         }
6715
6716         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6717                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6718                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6719
6720                 last_ioadl = ioadl;
6721                 ioadl++;
6722         }
6723
6724         if (likely(last_ioadl))
6725                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6726 }
6727
6728 /**
6729  * ipr_qc_defer - Get a free ipr_cmd
6730  * @qc: queued command
6731  *
6732  * Return value:
6733  *      0 if success
6734  **/
6735 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6736 {
6737         struct ata_port *ap = qc->ap;
6738         struct ipr_sata_port *sata_port = ap->private_data;
6739         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6740         struct ipr_cmnd *ipr_cmd;
6741         struct ipr_hrr_queue *hrrq;
6742         int hrrq_id;
6743
6744         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6745         hrrq = &ioa_cfg->hrrq[hrrq_id];
6746
6747         qc->lldd_task = NULL;
6748         spin_lock(&hrrq->_lock);
6749         if (unlikely(hrrq->ioa_is_dead)) {
6750                 spin_unlock(&hrrq->_lock);
6751                 return 0;
6752         }
6753
6754         if (unlikely(!hrrq->allow_cmds)) {
6755                 spin_unlock(&hrrq->_lock);
6756                 return ATA_DEFER_LINK;
6757         }
6758
6759         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6760         if (ipr_cmd == NULL) {
6761                 spin_unlock(&hrrq->_lock);
6762                 return ATA_DEFER_LINK;
6763         }
6764
6765         qc->lldd_task = ipr_cmd;
6766         spin_unlock(&hrrq->_lock);
6767         return 0;
6768 }
6769
6770 /**
6771  * ipr_qc_issue - Issue a SATA qc to a device
6772  * @qc: queued command
6773  *
6774  * Return value:
6775  *      0 if success
6776  **/
6777 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6778 {
6779         struct ata_port *ap = qc->ap;
6780         struct ipr_sata_port *sata_port = ap->private_data;
6781         struct ipr_resource_entry *res = sata_port->res;
6782         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6783         struct ipr_cmnd *ipr_cmd;
6784         struct ipr_ioarcb *ioarcb;
6785         struct ipr_ioarcb_ata_regs *regs;
6786
6787         if (qc->lldd_task == NULL)
6788                 ipr_qc_defer(qc);
6789
6790         ipr_cmd = qc->lldd_task;
6791         if (ipr_cmd == NULL)
6792                 return AC_ERR_SYSTEM;
6793
6794         qc->lldd_task = NULL;
6795         spin_lock(&ipr_cmd->hrrq->_lock);
6796         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6797                         ipr_cmd->hrrq->ioa_is_dead)) {
6798                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6799                 spin_unlock(&ipr_cmd->hrrq->_lock);
6800                 return AC_ERR_SYSTEM;
6801         }
6802
6803         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6804         ioarcb = &ipr_cmd->ioarcb;
6805
6806         if (ioa_cfg->sis64) {
6807                 regs = &ipr_cmd->i.ata_ioadl.regs;
6808                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6809         } else
6810                 regs = &ioarcb->u.add_data.u.regs;
6811
6812         memset(regs, 0, sizeof(*regs));
6813         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6814
6815         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6816         ipr_cmd->qc = qc;
6817         ipr_cmd->done = ipr_sata_done;
6818         ipr_cmd->ioarcb.res_handle = res->res_handle;
6819         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6820         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6821         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6822         ipr_cmd->dma_use_sg = qc->n_elem;
6823
6824         if (ioa_cfg->sis64)
6825                 ipr_build_ata_ioadl64(ipr_cmd, qc);
6826         else
6827                 ipr_build_ata_ioadl(ipr_cmd, qc);
6828
6829         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6830         ipr_copy_sata_tf(regs, &qc->tf);
6831         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6832         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6833
6834         switch (qc->tf.protocol) {
6835         case ATA_PROT_NODATA:
6836         case ATA_PROT_PIO:
6837                 break;
6838
6839         case ATA_PROT_DMA:
6840                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6841                 break;
6842
6843         case ATAPI_PROT_PIO:
6844         case ATAPI_PROT_NODATA:
6845                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6846                 break;
6847
6848         case ATAPI_PROT_DMA:
6849                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6850                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6851                 break;
6852
6853         default:
6854                 WARN_ON(1);
6855                 spin_unlock(&ipr_cmd->hrrq->_lock);
6856                 return AC_ERR_INVALID;
6857         }
6858
6859         ipr_send_command(ipr_cmd);
6860         spin_unlock(&ipr_cmd->hrrq->_lock);
6861
6862         return 0;
6863 }
6864
6865 /**
6866  * ipr_qc_fill_rtf - Read result TF
6867  * @qc: ATA queued command
6868  *
6869  * Return value:
6870  *      true
6871  **/
6872 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6873 {
6874         struct ipr_sata_port *sata_port = qc->ap->private_data;
6875         struct ipr_ioasa_gata *g = &sata_port->ioasa;
6876         struct ata_taskfile *tf = &qc->result_tf;
6877
6878         tf->feature = g->error;
6879         tf->nsect = g->nsect;
6880         tf->lbal = g->lbal;
6881         tf->lbam = g->lbam;
6882         tf->lbah = g->lbah;
6883         tf->device = g->device;
6884         tf->command = g->status;
6885         tf->hob_nsect = g->hob_nsect;
6886         tf->hob_lbal = g->hob_lbal;
6887         tf->hob_lbam = g->hob_lbam;
6888         tf->hob_lbah = g->hob_lbah;
6889
6890         return true;
6891 }
6892
6893 static struct ata_port_operations ipr_sata_ops = {
6894         .phy_reset = ipr_ata_phy_reset,
6895         .hardreset = ipr_sata_reset,
6896         .post_internal_cmd = ipr_ata_post_internal,
6897         .qc_prep = ata_noop_qc_prep,
6898         .qc_defer = ipr_qc_defer,
6899         .qc_issue = ipr_qc_issue,
6900         .qc_fill_rtf = ipr_qc_fill_rtf,
6901         .port_start = ata_sas_port_start,
6902         .port_stop = ata_sas_port_stop
6903 };
6904
6905 static struct ata_port_info sata_port_info = {
6906         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
6907                           ATA_FLAG_SAS_HOST,
6908         .pio_mask       = ATA_PIO4_ONLY,
6909         .mwdma_mask     = ATA_MWDMA2,
6910         .udma_mask      = ATA_UDMA6,
6911         .port_ops       = &ipr_sata_ops
6912 };
6913
6914 #ifdef CONFIG_PPC_PSERIES
6915 static const u16 ipr_blocked_processors[] = {
6916         PVR_NORTHSTAR,
6917         PVR_PULSAR,
6918         PVR_POWER4,
6919         PVR_ICESTAR,
6920         PVR_SSTAR,
6921         PVR_POWER4p,
6922         PVR_630,
6923         PVR_630p
6924 };
6925
6926 /**
6927  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6928  * @ioa_cfg:    ioa cfg struct
6929  *
6930  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6931  * certain pSeries hardware. This function determines if the given
6932  * adapter is in one of these confgurations or not.
6933  *
6934  * Return value:
6935  *      1 if adapter is not supported / 0 if adapter is supported
6936  **/
6937 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6938 {
6939         int i;
6940
6941         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6942                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6943                         if (pvr_version_is(ipr_blocked_processors[i]))
6944                                 return 1;
6945                 }
6946         }
6947         return 0;
6948 }
6949 #else
6950 #define ipr_invalid_adapter(ioa_cfg) 0
6951 #endif
6952
6953 /**
6954  * ipr_ioa_bringdown_done - IOA bring down completion.
6955  * @ipr_cmd:    ipr command struct
6956  *
6957  * This function processes the completion of an adapter bring down.
6958  * It wakes any reset sleepers.
6959  *
6960  * Return value:
6961  *      IPR_RC_JOB_RETURN
6962  **/
6963 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6964 {
6965         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6966         int i;
6967
6968         ENTER;
6969         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6970                 ipr_trace;
6971                 spin_unlock_irq(ioa_cfg->host->host_lock);
6972                 scsi_unblock_requests(ioa_cfg->host);
6973                 spin_lock_irq(ioa_cfg->host->host_lock);
6974         }
6975
6976         ioa_cfg->in_reset_reload = 0;
6977         ioa_cfg->reset_retries = 0;
6978         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6979                 spin_lock(&ioa_cfg->hrrq[i]._lock);
6980                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6981                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6982         }
6983         wmb();
6984
6985         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6986         wake_up_all(&ioa_cfg->reset_wait_q);
6987         LEAVE;
6988
6989         return IPR_RC_JOB_RETURN;
6990 }
6991
6992 /**
6993  * ipr_ioa_reset_done - IOA reset completion.
6994  * @ipr_cmd:    ipr command struct
6995  *
6996  * This function processes the completion of an adapter reset.
6997  * It schedules any necessary mid-layer add/removes and
6998  * wakes any reset sleepers.
6999  *
7000  * Return value:
7001  *      IPR_RC_JOB_RETURN
7002  **/
7003 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7004 {
7005         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7006         struct ipr_resource_entry *res;
7007         struct ipr_hostrcb *hostrcb, *temp;
7008         int i = 0, j;
7009
7010         ENTER;
7011         ioa_cfg->in_reset_reload = 0;
7012         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7013                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7014                 ioa_cfg->hrrq[j].allow_cmds = 1;
7015                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7016         }
7017         wmb();
7018         ioa_cfg->reset_cmd = NULL;
7019         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7020
7021         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7022                 if (res->add_to_ml || res->del_from_ml) {
7023                         ipr_trace;
7024                         break;
7025                 }
7026         }
7027         schedule_work(&ioa_cfg->work_q);
7028
7029         list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
7030                 list_del(&hostrcb->queue);
7031                 if (i++ < IPR_NUM_LOG_HCAMS)
7032                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
7033                 else
7034                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
7035         }
7036
7037         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7038         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7039
7040         ioa_cfg->reset_retries = 0;
7041         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7042         wake_up_all(&ioa_cfg->reset_wait_q);
7043
7044         spin_unlock(ioa_cfg->host->host_lock);
7045         scsi_unblock_requests(ioa_cfg->host);
7046         spin_lock(ioa_cfg->host->host_lock);
7047
7048         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
7049                 scsi_block_requests(ioa_cfg->host);
7050
7051         schedule_work(&ioa_cfg->work_q);
7052         LEAVE;
7053         return IPR_RC_JOB_RETURN;
7054 }
7055
7056 /**
7057  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7058  * @supported_dev:      supported device struct
7059  * @vpids:                      vendor product id struct
7060  *
7061  * Return value:
7062  *      none
7063  **/
7064 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7065                                  struct ipr_std_inq_vpids *vpids)
7066 {
7067         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7068         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7069         supported_dev->num_records = 1;
7070         supported_dev->data_length =
7071                 cpu_to_be16(sizeof(struct ipr_supported_device));
7072         supported_dev->reserved = 0;
7073 }
7074
7075 /**
7076  * ipr_set_supported_devs - Send Set Supported Devices for a device
7077  * @ipr_cmd:    ipr command struct
7078  *
7079  * This function sends a Set Supported Devices to the adapter
7080  *
7081  * Return value:
7082  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7083  **/
7084 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7085 {
7086         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7087         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7088         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7089         struct ipr_resource_entry *res = ipr_cmd->u.res;
7090
7091         ipr_cmd->job_step = ipr_ioa_reset_done;
7092
7093         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7094                 if (!ipr_is_scsi_disk(res))
7095                         continue;
7096
7097                 ipr_cmd->u.res = res;
7098                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7099
7100                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7101                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7102                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7103
7104                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7105                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7106                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7107                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7108
7109                 ipr_init_ioadl(ipr_cmd,
7110                                ioa_cfg->vpd_cbs_dma +
7111                                  offsetof(struct ipr_misc_cbs, supp_dev),
7112                                sizeof(struct ipr_supported_device),
7113                                IPR_IOADL_FLAGS_WRITE_LAST);
7114
7115                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7116                            IPR_SET_SUP_DEVICE_TIMEOUT);
7117
7118                 if (!ioa_cfg->sis64)
7119                         ipr_cmd->job_step = ipr_set_supported_devs;
7120                 LEAVE;
7121                 return IPR_RC_JOB_RETURN;
7122         }
7123
7124         LEAVE;
7125         return IPR_RC_JOB_CONTINUE;
7126 }
7127
7128 /**
7129  * ipr_get_mode_page - Locate specified mode page
7130  * @mode_pages: mode page buffer
7131  * @page_code:  page code to find
7132  * @len:                minimum required length for mode page
7133  *
7134  * Return value:
7135  *      pointer to mode page / NULL on failure
7136  **/
7137 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7138                                u32 page_code, u32 len)
7139 {
7140         struct ipr_mode_page_hdr *mode_hdr;
7141         u32 page_length;
7142         u32 length;
7143
7144         if (!mode_pages || (mode_pages->hdr.length == 0))
7145                 return NULL;
7146
7147         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7148         mode_hdr = (struct ipr_mode_page_hdr *)
7149                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7150
7151         while (length) {
7152                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7153                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7154                                 return mode_hdr;
7155                         break;
7156                 } else {
7157                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7158                                        mode_hdr->page_length);
7159                         length -= page_length;
7160                         mode_hdr = (struct ipr_mode_page_hdr *)
7161                                 ((unsigned long)mode_hdr + page_length);
7162                 }
7163         }
7164         return NULL;
7165 }
7166
7167 /**
7168  * ipr_check_term_power - Check for term power errors
7169  * @ioa_cfg:    ioa config struct
7170  * @mode_pages: IOAFP mode pages buffer
7171  *
7172  * Check the IOAFP's mode page 28 for term power errors
7173  *
7174  * Return value:
7175  *      nothing
7176  **/
7177 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7178                                  struct ipr_mode_pages *mode_pages)
7179 {
7180         int i;
7181         int entry_length;
7182         struct ipr_dev_bus_entry *bus;
7183         struct ipr_mode_page28 *mode_page;
7184
7185         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7186                                       sizeof(struct ipr_mode_page28));
7187
7188         entry_length = mode_page->entry_length;
7189
7190         bus = mode_page->bus;
7191
7192         for (i = 0; i < mode_page->num_entries; i++) {
7193                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7194                         dev_err(&ioa_cfg->pdev->dev,
7195                                 "Term power is absent on scsi bus %d\n",
7196                                 bus->res_addr.bus);
7197                 }
7198
7199                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7200         }
7201 }
7202
7203 /**
7204  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7205  * @ioa_cfg:    ioa config struct
7206  *
7207  * Looks through the config table checking for SES devices. If
7208  * the SES device is in the SES table indicating a maximum SCSI
7209  * bus speed, the speed is limited for the bus.
7210  *
7211  * Return value:
7212  *      none
7213  **/
7214 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7215 {
7216         u32 max_xfer_rate;
7217         int i;
7218
7219         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7220                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7221                                                        ioa_cfg->bus_attr[i].bus_width);
7222
7223                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7224                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7225         }
7226 }
7227
7228 /**
7229  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7230  * @ioa_cfg:    ioa config struct
7231  * @mode_pages: mode page 28 buffer
7232  *
7233  * Updates mode page 28 based on driver configuration
7234  *
7235  * Return value:
7236  *      none
7237  **/
7238 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7239                                           struct ipr_mode_pages *mode_pages)
7240 {
7241         int i, entry_length;
7242         struct ipr_dev_bus_entry *bus;
7243         struct ipr_bus_attributes *bus_attr;
7244         struct ipr_mode_page28 *mode_page;
7245
7246         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7247                                       sizeof(struct ipr_mode_page28));
7248
7249         entry_length = mode_page->entry_length;
7250
7251         /* Loop for each device bus entry */
7252         for (i = 0, bus = mode_page->bus;
7253              i < mode_page->num_entries;
7254              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7255                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7256                         dev_err(&ioa_cfg->pdev->dev,
7257                                 "Invalid resource address reported: 0x%08X\n",
7258                                 IPR_GET_PHYS_LOC(bus->res_addr));
7259                         continue;
7260                 }
7261
7262                 bus_attr = &ioa_cfg->bus_attr[i];
7263                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7264                 bus->bus_width = bus_attr->bus_width;
7265                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7266                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7267                 if (bus_attr->qas_enabled)
7268                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7269                 else
7270                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7271         }
7272 }
7273
7274 /**
7275  * ipr_build_mode_select - Build a mode select command
7276  * @ipr_cmd:    ipr command struct
7277  * @res_handle: resource handle to send command to
7278  * @parm:               Byte 2 of Mode Sense command
7279  * @dma_addr:   DMA buffer address
7280  * @xfer_len:   data transfer length
7281  *
7282  * Return value:
7283  *      none
7284  **/
7285 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7286                                   __be32 res_handle, u8 parm,
7287                                   dma_addr_t dma_addr, u8 xfer_len)
7288 {
7289         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7290
7291         ioarcb->res_handle = res_handle;
7292         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7293         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7294         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7295         ioarcb->cmd_pkt.cdb[1] = parm;
7296         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7297
7298         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7299 }
7300
7301 /**
7302  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7303  * @ipr_cmd:    ipr command struct
7304  *
7305  * This function sets up the SCSI bus attributes and sends
7306  * a Mode Select for Page 28 to activate them.
7307  *
7308  * Return value:
7309  *      IPR_RC_JOB_RETURN
7310  **/
7311 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7312 {
7313         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7314         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7315         int length;
7316
7317         ENTER;
7318         ipr_scsi_bus_speed_limit(ioa_cfg);
7319         ipr_check_term_power(ioa_cfg, mode_pages);
7320         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7321         length = mode_pages->hdr.length + 1;
7322         mode_pages->hdr.length = 0;
7323
7324         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7325                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7326                               length);
7327
7328         ipr_cmd->job_step = ipr_set_supported_devs;
7329         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7330                                     struct ipr_resource_entry, queue);
7331         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7332
7333         LEAVE;
7334         return IPR_RC_JOB_RETURN;
7335 }
7336
7337 /**
7338  * ipr_build_mode_sense - Builds a mode sense command
7339  * @ipr_cmd:    ipr command struct
7340  * @res:                resource entry struct
7341  * @parm:               Byte 2 of mode sense command
7342  * @dma_addr:   DMA address of mode sense buffer
7343  * @xfer_len:   Size of DMA buffer
7344  *
7345  * Return value:
7346  *      none
7347  **/
7348 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7349                                  __be32 res_handle,
7350                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7351 {
7352         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7353
7354         ioarcb->res_handle = res_handle;
7355         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7356         ioarcb->cmd_pkt.cdb[2] = parm;
7357         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7358         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7359
7360         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7361 }
7362
7363 /**
7364  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7365  * @ipr_cmd:    ipr command struct
7366  *
7367  * This function handles the failure of an IOA bringup command.
7368  *
7369  * Return value:
7370  *      IPR_RC_JOB_RETURN
7371  **/
7372 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7373 {
7374         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7375         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7376
7377         dev_err(&ioa_cfg->pdev->dev,
7378                 "0x%02X failed with IOASC: 0x%08X\n",
7379                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7380
7381         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7382         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7383         return IPR_RC_JOB_RETURN;
7384 }
7385
7386 /**
7387  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7388  * @ipr_cmd:    ipr command struct
7389  *
7390  * This function handles the failure of a Mode Sense to the IOAFP.
7391  * Some adapters do not handle all mode pages.
7392  *
7393  * Return value:
7394  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7395  **/
7396 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7397 {
7398         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7399         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7400
7401         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7402                 ipr_cmd->job_step = ipr_set_supported_devs;
7403                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7404                                             struct ipr_resource_entry, queue);
7405                 return IPR_RC_JOB_CONTINUE;
7406         }
7407
7408         return ipr_reset_cmd_failed(ipr_cmd);
7409 }
7410
7411 /**
7412  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7413  * @ipr_cmd:    ipr command struct
7414  *
7415  * This function send a Page 28 mode sense to the IOA to
7416  * retrieve SCSI bus attributes.
7417  *
7418  * Return value:
7419  *      IPR_RC_JOB_RETURN
7420  **/
7421 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7422 {
7423         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7424
7425         ENTER;
7426         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7427                              0x28, ioa_cfg->vpd_cbs_dma +
7428                              offsetof(struct ipr_misc_cbs, mode_pages),
7429                              sizeof(struct ipr_mode_pages));
7430
7431         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7432         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7433
7434         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7435
7436         LEAVE;
7437         return IPR_RC_JOB_RETURN;
7438 }
7439
7440 /**
7441  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7442  * @ipr_cmd:    ipr command struct
7443  *
7444  * This function enables dual IOA RAID support if possible.
7445  *
7446  * Return value:
7447  *      IPR_RC_JOB_RETURN
7448  **/
7449 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7450 {
7451         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7452         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7453         struct ipr_mode_page24 *mode_page;
7454         int length;
7455
7456         ENTER;
7457         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7458                                       sizeof(struct ipr_mode_page24));
7459
7460         if (mode_page)
7461                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7462
7463         length = mode_pages->hdr.length + 1;
7464         mode_pages->hdr.length = 0;
7465
7466         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7467                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7468                               length);
7469
7470         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7471         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7472
7473         LEAVE;
7474         return IPR_RC_JOB_RETURN;
7475 }
7476
7477 /**
7478  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7479  * @ipr_cmd:    ipr command struct
7480  *
7481  * This function handles the failure of a Mode Sense to the IOAFP.
7482  * Some adapters do not handle all mode pages.
7483  *
7484  * Return value:
7485  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7486  **/
7487 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7488 {
7489         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7490
7491         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7492                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7493                 return IPR_RC_JOB_CONTINUE;
7494         }
7495
7496         return ipr_reset_cmd_failed(ipr_cmd);
7497 }
7498
7499 /**
7500  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7501  * @ipr_cmd:    ipr command struct
7502  *
7503  * This function send a mode sense to the IOA to retrieve
7504  * the IOA Advanced Function Control mode page.
7505  *
7506  * Return value:
7507  *      IPR_RC_JOB_RETURN
7508  **/
7509 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7510 {
7511         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7512
7513         ENTER;
7514         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7515                              0x24, ioa_cfg->vpd_cbs_dma +
7516                              offsetof(struct ipr_misc_cbs, mode_pages),
7517                              sizeof(struct ipr_mode_pages));
7518
7519         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7520         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7521
7522         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7523
7524         LEAVE;
7525         return IPR_RC_JOB_RETURN;
7526 }
7527
7528 /**
7529  * ipr_init_res_table - Initialize the resource table
7530  * @ipr_cmd:    ipr command struct
7531  *
7532  * This function looks through the existing resource table, comparing
7533  * it with the config table. This function will take care of old/new
7534  * devices and schedule adding/removing them from the mid-layer
7535  * as appropriate.
7536  *
7537  * Return value:
7538  *      IPR_RC_JOB_CONTINUE
7539  **/
7540 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7541 {
7542         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7543         struct ipr_resource_entry *res, *temp;
7544         struct ipr_config_table_entry_wrapper cfgtew;
7545         int entries, found, flag, i;
7546         LIST_HEAD(old_res);
7547
7548         ENTER;
7549         if (ioa_cfg->sis64)
7550                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7551         else
7552                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7553
7554         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7555                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7556
7557         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7558                 list_move_tail(&res->queue, &old_res);
7559
7560         if (ioa_cfg->sis64)
7561                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7562         else
7563                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7564
7565         for (i = 0; i < entries; i++) {
7566                 if (ioa_cfg->sis64)
7567                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7568                 else
7569                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7570                 found = 0;
7571
7572                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7573                         if (ipr_is_same_device(res, &cfgtew)) {
7574                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7575                                 found = 1;
7576                                 break;
7577                         }
7578                 }
7579
7580                 if (!found) {
7581                         if (list_empty(&ioa_cfg->free_res_q)) {
7582                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7583                                 break;
7584                         }
7585
7586                         found = 1;
7587                         res = list_entry(ioa_cfg->free_res_q.next,
7588                                          struct ipr_resource_entry, queue);
7589                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7590                         ipr_init_res_entry(res, &cfgtew);
7591                         res->add_to_ml = 1;
7592                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7593                         res->sdev->allow_restart = 1;
7594
7595                 if (found)
7596                         ipr_update_res_entry(res, &cfgtew);
7597         }
7598
7599         list_for_each_entry_safe(res, temp, &old_res, queue) {
7600                 if (res->sdev) {
7601                         res->del_from_ml = 1;
7602                         res->res_handle = IPR_INVALID_RES_HANDLE;
7603                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7604                 }
7605         }
7606
7607         list_for_each_entry_safe(res, temp, &old_res, queue) {
7608                 ipr_clear_res_target(res);
7609                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7610         }
7611
7612         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7613                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7614         else
7615                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7616
7617         LEAVE;
7618         return IPR_RC_JOB_CONTINUE;
7619 }
7620
7621 /**
7622  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7623  * @ipr_cmd:    ipr command struct
7624  *
7625  * This function sends a Query IOA Configuration command
7626  * to the adapter to retrieve the IOA configuration table.
7627  *
7628  * Return value:
7629  *      IPR_RC_JOB_RETURN
7630  **/
7631 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7632 {
7633         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7634         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7635         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7636         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7637
7638         ENTER;
7639         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7640                 ioa_cfg->dual_raid = 1;
7641         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7642                  ucode_vpd->major_release, ucode_vpd->card_type,
7643                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7644         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7645         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7646
7647         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7648         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7649         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7650         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7651
7652         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7653                        IPR_IOADL_FLAGS_READ_LAST);
7654
7655         ipr_cmd->job_step = ipr_init_res_table;
7656
7657         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7658
7659         LEAVE;
7660         return IPR_RC_JOB_RETURN;
7661 }
7662
7663 /**
7664  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7665  * @ipr_cmd:    ipr command struct
7666  *
7667  * This utility function sends an inquiry to the adapter.
7668  *
7669  * Return value:
7670  *      none
7671  **/
7672 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7673                               dma_addr_t dma_addr, u8 xfer_len)
7674 {
7675         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7676
7677         ENTER;
7678         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7679         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7680
7681         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7682         ioarcb->cmd_pkt.cdb[1] = flags;
7683         ioarcb->cmd_pkt.cdb[2] = page;
7684         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7685
7686         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7687
7688         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7689         LEAVE;
7690 }
7691
7692 /**
7693  * ipr_inquiry_page_supported - Is the given inquiry page supported
7694  * @page0:              inquiry page 0 buffer
7695  * @page:               page code.
7696  *
7697  * This function determines if the specified inquiry page is supported.
7698  *
7699  * Return value:
7700  *      1 if page is supported / 0 if not
7701  **/
7702 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7703 {
7704         int i;
7705
7706         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7707                 if (page0->page[i] == page)
7708                         return 1;
7709
7710         return 0;
7711 }
7712
7713 /**
7714  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7715  * @ipr_cmd:    ipr command struct
7716  *
7717  * This function sends a Page 0xD0 inquiry to the adapter
7718  * to retrieve adapter capabilities.
7719  *
7720  * Return value:
7721  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7722  **/
7723 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7724 {
7725         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7726         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7727         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7728
7729         ENTER;
7730         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7731         memset(cap, 0, sizeof(*cap));
7732
7733         if (ipr_inquiry_page_supported(page0, 0xD0)) {
7734                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7735                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7736                                   sizeof(struct ipr_inquiry_cap));
7737                 return IPR_RC_JOB_RETURN;
7738         }
7739
7740         LEAVE;
7741         return IPR_RC_JOB_CONTINUE;
7742 }
7743
7744 /**
7745  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7746  * @ipr_cmd:    ipr command struct
7747  *
7748  * This function sends a Page 3 inquiry to the adapter
7749  * to retrieve software VPD information.
7750  *
7751  * Return value:
7752  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7753  **/
7754 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7755 {
7756         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7757
7758         ENTER;
7759
7760         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7761
7762         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7763                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7764                           sizeof(struct ipr_inquiry_page3));
7765
7766         LEAVE;
7767         return IPR_RC_JOB_RETURN;
7768 }
7769
7770 /**
7771  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7772  * @ipr_cmd:    ipr command struct
7773  *
7774  * This function sends a Page 0 inquiry to the adapter
7775  * to retrieve supported inquiry pages.
7776  *
7777  * Return value:
7778  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7779  **/
7780 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7781 {
7782         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7783         char type[5];
7784
7785         ENTER;
7786
7787         /* Grab the type out of the VPD and store it away */
7788         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7789         type[4] = '\0';
7790         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7791
7792         if (ipr_invalid_adapter(ioa_cfg)) {
7793                 dev_err(&ioa_cfg->pdev->dev,
7794                         "Adapter not supported in this hardware configuration.\n");
7795
7796                 if (!ipr_testmode) {
7797                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7798                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7799                         list_add_tail(&ipr_cmd->queue,
7800                                         &ioa_cfg->hrrq->hrrq_free_q);
7801                         return IPR_RC_JOB_RETURN;
7802                 }
7803         }
7804
7805         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7806
7807         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7808                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7809                           sizeof(struct ipr_inquiry_page0));
7810
7811         LEAVE;
7812         return IPR_RC_JOB_RETURN;
7813 }
7814
7815 /**
7816  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7817  * @ipr_cmd:    ipr command struct
7818  *
7819  * This function sends a standard inquiry to the adapter.
7820  *
7821  * Return value:
7822  *      IPR_RC_JOB_RETURN
7823  **/
7824 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7825 {
7826         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7827
7828         ENTER;
7829         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7830
7831         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7832                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7833                           sizeof(struct ipr_ioa_vpd));
7834
7835         LEAVE;
7836         return IPR_RC_JOB_RETURN;
7837 }
7838
7839 /**
7840  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7841  * @ipr_cmd:    ipr command struct
7842  *
7843  * This function send an Identify Host Request Response Queue
7844  * command to establish the HRRQ with the adapter.
7845  *
7846  * Return value:
7847  *      IPR_RC_JOB_RETURN
7848  **/
7849 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7850 {
7851         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7852         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7853         struct ipr_hrr_queue *hrrq;
7854
7855         ENTER;
7856         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7857         dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7858
7859         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7860                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7861
7862                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7863                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7864
7865                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7866                 if (ioa_cfg->sis64)
7867                         ioarcb->cmd_pkt.cdb[1] = 0x1;
7868
7869                 if (ioa_cfg->nvectors == 1)
7870                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7871                 else
7872                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7873
7874                 ioarcb->cmd_pkt.cdb[2] =
7875                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7876                 ioarcb->cmd_pkt.cdb[3] =
7877                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7878                 ioarcb->cmd_pkt.cdb[4] =
7879                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7880                 ioarcb->cmd_pkt.cdb[5] =
7881                         ((u64) hrrq->host_rrq_dma) & 0xff;
7882                 ioarcb->cmd_pkt.cdb[7] =
7883                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7884                 ioarcb->cmd_pkt.cdb[8] =
7885                         (sizeof(u32) * hrrq->size) & 0xff;
7886
7887                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7888                         ioarcb->cmd_pkt.cdb[9] =
7889                                         ioa_cfg->identify_hrrq_index;
7890
7891                 if (ioa_cfg->sis64) {
7892                         ioarcb->cmd_pkt.cdb[10] =
7893                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7894                         ioarcb->cmd_pkt.cdb[11] =
7895                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7896                         ioarcb->cmd_pkt.cdb[12] =
7897                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7898                         ioarcb->cmd_pkt.cdb[13] =
7899                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7900                 }
7901
7902                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7903                         ioarcb->cmd_pkt.cdb[14] =
7904                                         ioa_cfg->identify_hrrq_index;
7905
7906                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7907                            IPR_INTERNAL_TIMEOUT);
7908
7909                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7910                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7911
7912                 LEAVE;
7913                 return IPR_RC_JOB_RETURN;
7914         }
7915
7916         LEAVE;
7917         return IPR_RC_JOB_CONTINUE;
7918 }
7919
7920 /**
7921  * ipr_reset_timer_done - Adapter reset timer function
7922  * @ipr_cmd:    ipr command struct
7923  *
7924  * Description: This function is used in adapter reset processing
7925  * for timing events. If the reset_cmd pointer in the IOA
7926  * config struct is not this adapter's we are doing nested
7927  * resets and fail_all_ops will take care of freeing the
7928  * command block.
7929  *
7930  * Return value:
7931  *      none
7932  **/
7933 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7934 {
7935         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7936         unsigned long lock_flags = 0;
7937
7938         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7939
7940         if (ioa_cfg->reset_cmd == ipr_cmd) {
7941                 list_del(&ipr_cmd->queue);
7942                 ipr_cmd->done(ipr_cmd);
7943         }
7944
7945         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7946 }
7947
7948 /**
7949  * ipr_reset_start_timer - Start a timer for adapter reset job
7950  * @ipr_cmd:    ipr command struct
7951  * @timeout:    timeout value
7952  *
7953  * Description: This function is used in adapter reset processing
7954  * for timing events. If the reset_cmd pointer in the IOA
7955  * config struct is not this adapter's we are doing nested
7956  * resets and fail_all_ops will take care of freeing the
7957  * command block.
7958  *
7959  * Return value:
7960  *      none
7961  **/
7962 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7963                                   unsigned long timeout)
7964 {
7965
7966         ENTER;
7967         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7968         ipr_cmd->done = ipr_reset_ioa_job;
7969
7970         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7971         ipr_cmd->timer.expires = jiffies + timeout;
7972         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7973         add_timer(&ipr_cmd->timer);
7974 }
7975
7976 /**
7977  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7978  * @ioa_cfg:    ioa cfg struct
7979  *
7980  * Return value:
7981  *      nothing
7982  **/
7983 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7984 {
7985         struct ipr_hrr_queue *hrrq;
7986
7987         for_each_hrrq(hrrq, ioa_cfg) {
7988                 spin_lock(&hrrq->_lock);
7989                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7990
7991                 /* Initialize Host RRQ pointers */
7992                 hrrq->hrrq_start = hrrq->host_rrq;
7993                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7994                 hrrq->hrrq_curr = hrrq->hrrq_start;
7995                 hrrq->toggle_bit = 1;
7996                 spin_unlock(&hrrq->_lock);
7997         }
7998         wmb();
7999
8000         ioa_cfg->identify_hrrq_index = 0;
8001         if (ioa_cfg->hrrq_num == 1)
8002                 atomic_set(&ioa_cfg->hrrq_index, 0);
8003         else
8004                 atomic_set(&ioa_cfg->hrrq_index, 1);
8005
8006         /* Zero out config table */
8007         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8008 }
8009
8010 /**
8011  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8012  * @ipr_cmd:    ipr command struct
8013  *
8014  * Return value:
8015  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8016  **/
8017 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8018 {
8019         unsigned long stage, stage_time;
8020         u32 feedback;
8021         volatile u32 int_reg;
8022         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8023         u64 maskval = 0;
8024
8025         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8026         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8027         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8028
8029         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8030
8031         /* sanity check the stage_time value */
8032         if (stage_time == 0)
8033                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8034         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8035                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8036         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8037                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8038
8039         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8040                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8041                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8042                 stage_time = ioa_cfg->transop_timeout;
8043                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8044         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8045                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8046                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8047                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8048                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8049                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8050                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8051                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8052                         return IPR_RC_JOB_CONTINUE;
8053                 }
8054         }
8055
8056         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8057         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8058         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8059         ipr_cmd->done = ipr_reset_ioa_job;
8060         add_timer(&ipr_cmd->timer);
8061
8062         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8063
8064         return IPR_RC_JOB_RETURN;
8065 }
8066
8067 /**
8068  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8069  * @ipr_cmd:    ipr command struct
8070  *
8071  * This function reinitializes some control blocks and
8072  * enables destructive diagnostics on the adapter.
8073  *
8074  * Return value:
8075  *      IPR_RC_JOB_RETURN
8076  **/
8077 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8078 {
8079         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8080         volatile u32 int_reg;
8081         volatile u64 maskval;
8082         int i;
8083
8084         ENTER;
8085         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8086         ipr_init_ioa_mem(ioa_cfg);
8087
8088         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8089                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8090                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8091                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8092         }
8093         wmb();
8094         if (ioa_cfg->sis64) {
8095                 /* Set the adapter to the correct endian mode. */
8096                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8097                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8098         }
8099
8100         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8101
8102         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8103                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8104                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8105                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8106                 return IPR_RC_JOB_CONTINUE;
8107         }
8108
8109         /* Enable destructive diagnostics on IOA */
8110         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8111
8112         if (ioa_cfg->sis64) {
8113                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8114                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8115                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8116         } else
8117                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8118
8119         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8120
8121         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8122
8123         if (ioa_cfg->sis64) {
8124                 ipr_cmd->job_step = ipr_reset_next_stage;
8125                 return IPR_RC_JOB_CONTINUE;
8126         }
8127
8128         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8129         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8130         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8131         ipr_cmd->done = ipr_reset_ioa_job;
8132         add_timer(&ipr_cmd->timer);
8133         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8134
8135         LEAVE;
8136         return IPR_RC_JOB_RETURN;
8137 }
8138
8139 /**
8140  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8141  * @ipr_cmd:    ipr command struct
8142  *
8143  * This function is invoked when an adapter dump has run out
8144  * of processing time.
8145  *
8146  * Return value:
8147  *      IPR_RC_JOB_CONTINUE
8148  **/
8149 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8150 {
8151         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8152
8153         if (ioa_cfg->sdt_state == GET_DUMP)
8154                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8155         else if (ioa_cfg->sdt_state == READ_DUMP)
8156                 ioa_cfg->sdt_state = ABORT_DUMP;
8157
8158         ioa_cfg->dump_timeout = 1;
8159         ipr_cmd->job_step = ipr_reset_alert;
8160
8161         return IPR_RC_JOB_CONTINUE;
8162 }
8163
8164 /**
8165  * ipr_unit_check_no_data - Log a unit check/no data error log
8166  * @ioa_cfg:            ioa config struct
8167  *
8168  * Logs an error indicating the adapter unit checked, but for some
8169  * reason, we were unable to fetch the unit check buffer.
8170  *
8171  * Return value:
8172  *      nothing
8173  **/
8174 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8175 {
8176         ioa_cfg->errors_logged++;
8177         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8178 }
8179
8180 /**
8181  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8182  * @ioa_cfg:            ioa config struct
8183  *
8184  * Fetches the unit check buffer from the adapter by clocking the data
8185  * through the mailbox register.
8186  *
8187  * Return value:
8188  *      nothing
8189  **/
8190 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8191 {
8192         unsigned long mailbox;
8193         struct ipr_hostrcb *hostrcb;
8194         struct ipr_uc_sdt sdt;
8195         int rc, length;
8196         u32 ioasc;
8197
8198         mailbox = readl(ioa_cfg->ioa_mailbox);
8199
8200         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8201                 ipr_unit_check_no_data(ioa_cfg);
8202                 return;
8203         }
8204
8205         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8206         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8207                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8208
8209         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8210             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8211             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8212                 ipr_unit_check_no_data(ioa_cfg);
8213                 return;
8214         }
8215
8216         /* Find length of the first sdt entry (UC buffer) */
8217         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8218                 length = be32_to_cpu(sdt.entry[0].end_token);
8219         else
8220                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8221                           be32_to_cpu(sdt.entry[0].start_token)) &
8222                           IPR_FMT2_MBX_ADDR_MASK;
8223
8224         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8225                              struct ipr_hostrcb, queue);
8226         list_del(&hostrcb->queue);
8227         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8228
8229         rc = ipr_get_ldump_data_section(ioa_cfg,
8230                                         be32_to_cpu(sdt.entry[0].start_token),
8231                                         (__be32 *)&hostrcb->hcam,
8232                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8233
8234         if (!rc) {
8235                 ipr_handle_log_data(ioa_cfg, hostrcb);
8236                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8237                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8238                     ioa_cfg->sdt_state == GET_DUMP)
8239                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8240         } else
8241                 ipr_unit_check_no_data(ioa_cfg);
8242
8243         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8244 }
8245
8246 /**
8247  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8248  * @ipr_cmd:    ipr command struct
8249  *
8250  * Description: This function will call to get the unit check buffer.
8251  *
8252  * Return value:
8253  *      IPR_RC_JOB_RETURN
8254  **/
8255 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8256 {
8257         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8258
8259         ENTER;
8260         ioa_cfg->ioa_unit_checked = 0;
8261         ipr_get_unit_check_buffer(ioa_cfg);
8262         ipr_cmd->job_step = ipr_reset_alert;
8263         ipr_reset_start_timer(ipr_cmd, 0);
8264
8265         LEAVE;
8266         return IPR_RC_JOB_RETURN;
8267 }
8268
8269 /**
8270  * ipr_reset_restore_cfg_space - Restore PCI config space.
8271  * @ipr_cmd:    ipr command struct
8272  *
8273  * Description: This function restores the saved PCI config space of
8274  * the adapter, fails all outstanding ops back to the callers, and
8275  * fetches the dump/unit check if applicable to this reset.
8276  *
8277  * Return value:
8278  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8279  **/
8280 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8281 {
8282         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8283         u32 int_reg;
8284
8285         ENTER;
8286         ioa_cfg->pdev->state_saved = true;
8287         pci_restore_state(ioa_cfg->pdev);
8288
8289         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8290                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8291                 return IPR_RC_JOB_CONTINUE;
8292         }
8293
8294         ipr_fail_all_ops(ioa_cfg);
8295
8296         if (ioa_cfg->sis64) {
8297                 /* Set the adapter to the correct endian mode. */
8298                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8299                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8300         }
8301
8302         if (ioa_cfg->ioa_unit_checked) {
8303                 if (ioa_cfg->sis64) {
8304                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8305                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8306                         return IPR_RC_JOB_RETURN;
8307                 } else {
8308                         ioa_cfg->ioa_unit_checked = 0;
8309                         ipr_get_unit_check_buffer(ioa_cfg);
8310                         ipr_cmd->job_step = ipr_reset_alert;
8311                         ipr_reset_start_timer(ipr_cmd, 0);
8312                         return IPR_RC_JOB_RETURN;
8313                 }
8314         }
8315
8316         if (ioa_cfg->in_ioa_bringdown) {
8317                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8318         } else {
8319                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8320
8321                 if (GET_DUMP == ioa_cfg->sdt_state) {
8322                         ioa_cfg->sdt_state = READ_DUMP;
8323                         ioa_cfg->dump_timeout = 0;
8324                         if (ioa_cfg->sis64)
8325                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8326                         else
8327                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8328                         ipr_cmd->job_step = ipr_reset_wait_for_dump;
8329                         schedule_work(&ioa_cfg->work_q);
8330                         return IPR_RC_JOB_RETURN;
8331                 }
8332         }
8333
8334         LEAVE;
8335         return IPR_RC_JOB_CONTINUE;
8336 }
8337
8338 /**
8339  * ipr_reset_bist_done - BIST has completed on the adapter.
8340  * @ipr_cmd:    ipr command struct
8341  *
8342  * Description: Unblock config space and resume the reset process.
8343  *
8344  * Return value:
8345  *      IPR_RC_JOB_CONTINUE
8346  **/
8347 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8348 {
8349         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8350
8351         ENTER;
8352         if (ioa_cfg->cfg_locked)
8353                 pci_cfg_access_unlock(ioa_cfg->pdev);
8354         ioa_cfg->cfg_locked = 0;
8355         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8356         LEAVE;
8357         return IPR_RC_JOB_CONTINUE;
8358 }
8359
8360 /**
8361  * ipr_reset_start_bist - Run BIST on the adapter.
8362  * @ipr_cmd:    ipr command struct
8363  *
8364  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8365  *
8366  * Return value:
8367  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8368  **/
8369 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8370 {
8371         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8372         int rc = PCIBIOS_SUCCESSFUL;
8373
8374         ENTER;
8375         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8376                 writel(IPR_UPROCI_SIS64_START_BIST,
8377                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8378         else
8379                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8380
8381         if (rc == PCIBIOS_SUCCESSFUL) {
8382                 ipr_cmd->job_step = ipr_reset_bist_done;
8383                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8384                 rc = IPR_RC_JOB_RETURN;
8385         } else {
8386                 if (ioa_cfg->cfg_locked)
8387                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8388                 ioa_cfg->cfg_locked = 0;
8389                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8390                 rc = IPR_RC_JOB_CONTINUE;
8391         }
8392
8393         LEAVE;
8394         return rc;
8395 }
8396
8397 /**
8398  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8399  * @ipr_cmd:    ipr command struct
8400  *
8401  * Description: This clears PCI reset to the adapter and delays two seconds.
8402  *
8403  * Return value:
8404  *      IPR_RC_JOB_RETURN
8405  **/
8406 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8407 {
8408         ENTER;
8409         ipr_cmd->job_step = ipr_reset_bist_done;
8410         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8411         LEAVE;
8412         return IPR_RC_JOB_RETURN;
8413 }
8414
8415 /**
8416  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8417  * @work:       work struct
8418  *
8419  * Description: This pulses warm reset to a slot.
8420  *
8421  **/
8422 static void ipr_reset_reset_work(struct work_struct *work)
8423 {
8424         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8425         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8426         struct pci_dev *pdev = ioa_cfg->pdev;
8427         unsigned long lock_flags = 0;
8428
8429         ENTER;
8430         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8431         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8432         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8433
8434         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8435         if (ioa_cfg->reset_cmd == ipr_cmd)
8436                 ipr_reset_ioa_job(ipr_cmd);
8437         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8438         LEAVE;
8439 }
8440
8441 /**
8442  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8443  * @ipr_cmd:    ipr command struct
8444  *
8445  * Description: This asserts PCI reset to the adapter.
8446  *
8447  * Return value:
8448  *      IPR_RC_JOB_RETURN
8449  **/
8450 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8451 {
8452         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8453
8454         ENTER;
8455         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8456         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8457         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8458         LEAVE;
8459         return IPR_RC_JOB_RETURN;
8460 }
8461
8462 /**
8463  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8464  * @ipr_cmd:    ipr command struct
8465  *
8466  * Description: This attempts to block config access to the IOA.
8467  *
8468  * Return value:
8469  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8470  **/
8471 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8472 {
8473         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8474         int rc = IPR_RC_JOB_CONTINUE;
8475
8476         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8477                 ioa_cfg->cfg_locked = 1;
8478                 ipr_cmd->job_step = ioa_cfg->reset;
8479         } else {
8480                 if (ipr_cmd->u.time_left) {
8481                         rc = IPR_RC_JOB_RETURN;
8482                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8483                         ipr_reset_start_timer(ipr_cmd,
8484                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8485                 } else {
8486                         ipr_cmd->job_step = ioa_cfg->reset;
8487                         dev_err(&ioa_cfg->pdev->dev,
8488                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8489                 }
8490         }
8491
8492         return rc;
8493 }
8494
8495 /**
8496  * ipr_reset_block_config_access - Block config access to the IOA
8497  * @ipr_cmd:    ipr command struct
8498  *
8499  * Description: This attempts to block config access to the IOA
8500  *
8501  * Return value:
8502  *      IPR_RC_JOB_CONTINUE
8503  **/
8504 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8505 {
8506         ipr_cmd->ioa_cfg->cfg_locked = 0;
8507         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8508         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8509         return IPR_RC_JOB_CONTINUE;
8510 }
8511
8512 /**
8513  * ipr_reset_allowed - Query whether or not IOA can be reset
8514  * @ioa_cfg:    ioa config struct
8515  *
8516  * Return value:
8517  *      0 if reset not allowed / non-zero if reset is allowed
8518  **/
8519 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8520 {
8521         volatile u32 temp_reg;
8522
8523         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8524         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8525 }
8526
8527 /**
8528  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8529  * @ipr_cmd:    ipr command struct
8530  *
8531  * Description: This function waits for adapter permission to run BIST,
8532  * then runs BIST. If the adapter does not give permission after a
8533  * reasonable time, we will reset the adapter anyway. The impact of
8534  * resetting the adapter without warning the adapter is the risk of
8535  * losing the persistent error log on the adapter. If the adapter is
8536  * reset while it is writing to the flash on the adapter, the flash
8537  * segment will have bad ECC and be zeroed.
8538  *
8539  * Return value:
8540  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8541  **/
8542 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8543 {
8544         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8545         int rc = IPR_RC_JOB_RETURN;
8546
8547         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8548                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8549                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8550         } else {
8551                 ipr_cmd->job_step = ipr_reset_block_config_access;
8552                 rc = IPR_RC_JOB_CONTINUE;
8553         }
8554
8555         return rc;
8556 }
8557
8558 /**
8559  * ipr_reset_alert - Alert the adapter of a pending reset
8560  * @ipr_cmd:    ipr command struct
8561  *
8562  * Description: This function alerts the adapter that it will be reset.
8563  * If memory space is not currently enabled, proceed directly
8564  * to running BIST on the adapter. The timer must always be started
8565  * so we guarantee we do not run BIST from ipr_isr.
8566  *
8567  * Return value:
8568  *      IPR_RC_JOB_RETURN
8569  **/
8570 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8571 {
8572         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8573         u16 cmd_reg;
8574         int rc;
8575
8576         ENTER;
8577         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8578
8579         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8580                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8581                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8582                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8583         } else {
8584                 ipr_cmd->job_step = ipr_reset_block_config_access;
8585         }
8586
8587         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8588         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8589
8590         LEAVE;
8591         return IPR_RC_JOB_RETURN;
8592 }
8593
8594 /**
8595  * ipr_reset_quiesce_done - Complete IOA disconnect
8596  * @ipr_cmd:    ipr command struct
8597  *
8598  * Description: Freeze the adapter to complete quiesce processing
8599  *
8600  * Return value:
8601  *      IPR_RC_JOB_CONTINUE
8602  **/
8603 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8604 {
8605         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8606
8607         ENTER;
8608         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8609         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8610         LEAVE;
8611         return IPR_RC_JOB_CONTINUE;
8612 }
8613
8614 /**
8615  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8616  * @ipr_cmd:    ipr command struct
8617  *
8618  * Description: Ensure nothing is outstanding to the IOA and
8619  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8620  *
8621  * Return value:
8622  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8623  **/
8624 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8625 {
8626         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8627         struct ipr_cmnd *loop_cmd;
8628         struct ipr_hrr_queue *hrrq;
8629         int rc = IPR_RC_JOB_CONTINUE;
8630         int count = 0;
8631
8632         ENTER;
8633         ipr_cmd->job_step = ipr_reset_quiesce_done;
8634
8635         for_each_hrrq(hrrq, ioa_cfg) {
8636                 spin_lock(&hrrq->_lock);
8637                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8638                         count++;
8639                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8640                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8641                         rc = IPR_RC_JOB_RETURN;
8642                         break;
8643                 }
8644                 spin_unlock(&hrrq->_lock);
8645
8646                 if (count)
8647                         break;
8648         }
8649
8650         LEAVE;
8651         return rc;
8652 }
8653
8654 /**
8655  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8656  * @ipr_cmd:    ipr command struct
8657  *
8658  * Description: Cancel any oustanding HCAMs to the IOA.
8659  *
8660  * Return value:
8661  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8662  **/
8663 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8664 {
8665         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8666         int rc = IPR_RC_JOB_CONTINUE;
8667         struct ipr_cmd_pkt *cmd_pkt;
8668         struct ipr_cmnd *hcam_cmd;
8669         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8670
8671         ENTER;
8672         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8673
8674         if (!hrrq->ioa_is_dead) {
8675                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8676                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8677                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8678                                         continue;
8679
8680                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8681                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8682                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8683                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8684                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8685                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8686                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8687                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8688                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8689                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8690                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8691                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8692                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8693                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8694
8695                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8696                                            IPR_CANCEL_TIMEOUT);
8697
8698                                 rc = IPR_RC_JOB_RETURN;
8699                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8700                                 break;
8701                         }
8702                 }
8703         } else
8704                 ipr_cmd->job_step = ipr_reset_alert;
8705
8706         LEAVE;
8707         return rc;
8708 }
8709
8710 /**
8711  * ipr_reset_ucode_download_done - Microcode download completion
8712  * @ipr_cmd:    ipr command struct
8713  *
8714  * Description: This function unmaps the microcode download buffer.
8715  *
8716  * Return value:
8717  *      IPR_RC_JOB_CONTINUE
8718  **/
8719 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8720 {
8721         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8722         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8723
8724         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8725                      sglist->num_sg, DMA_TO_DEVICE);
8726
8727         ipr_cmd->job_step = ipr_reset_alert;
8728         return IPR_RC_JOB_CONTINUE;
8729 }
8730
8731 /**
8732  * ipr_reset_ucode_download - Download microcode to the adapter
8733  * @ipr_cmd:    ipr command struct
8734  *
8735  * Description: This function checks to see if it there is microcode
8736  * to download to the adapter. If there is, a download is performed.
8737  *
8738  * Return value:
8739  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8740  **/
8741 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8742 {
8743         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8744         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8745
8746         ENTER;
8747         ipr_cmd->job_step = ipr_reset_alert;
8748
8749         if (!sglist)
8750                 return IPR_RC_JOB_CONTINUE;
8751
8752         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8753         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8754         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8755         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8756         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8757         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8758         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8759
8760         if (ioa_cfg->sis64)
8761                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8762         else
8763                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8764         ipr_cmd->job_step = ipr_reset_ucode_download_done;
8765
8766         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8767                    IPR_WRITE_BUFFER_TIMEOUT);
8768
8769         LEAVE;
8770         return IPR_RC_JOB_RETURN;
8771 }
8772
8773 /**
8774  * ipr_reset_shutdown_ioa - Shutdown the adapter
8775  * @ipr_cmd:    ipr command struct
8776  *
8777  * Description: This function issues an adapter shutdown of the
8778  * specified type to the specified adapter as part of the
8779  * adapter reset job.
8780  *
8781  * Return value:
8782  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8783  **/
8784 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8785 {
8786         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8787         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8788         unsigned long timeout;
8789         int rc = IPR_RC_JOB_CONTINUE;
8790
8791         ENTER;
8792         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
8793                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8794         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
8795                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8796                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8797                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8798                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8799                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8800
8801                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8802                         timeout = IPR_SHUTDOWN_TIMEOUT;
8803                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8804                         timeout = IPR_INTERNAL_TIMEOUT;
8805                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8806                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8807                 else
8808                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8809
8810                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8811
8812                 rc = IPR_RC_JOB_RETURN;
8813                 ipr_cmd->job_step = ipr_reset_ucode_download;
8814         } else
8815                 ipr_cmd->job_step = ipr_reset_alert;
8816
8817         LEAVE;
8818         return rc;
8819 }
8820
8821 /**
8822  * ipr_reset_ioa_job - Adapter reset job
8823  * @ipr_cmd:    ipr command struct
8824  *
8825  * Description: This function is the job router for the adapter reset job.
8826  *
8827  * Return value:
8828  *      none
8829  **/
8830 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8831 {
8832         u32 rc, ioasc;
8833         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8834
8835         do {
8836                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8837
8838                 if (ioa_cfg->reset_cmd != ipr_cmd) {
8839                         /*
8840                          * We are doing nested adapter resets and this is
8841                          * not the current reset job.
8842                          */
8843                         list_add_tail(&ipr_cmd->queue,
8844                                         &ipr_cmd->hrrq->hrrq_free_q);
8845                         return;
8846                 }
8847
8848                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8849                         rc = ipr_cmd->job_step_failed(ipr_cmd);
8850                         if (rc == IPR_RC_JOB_RETURN)
8851                                 return;
8852                 }
8853
8854                 ipr_reinit_ipr_cmnd(ipr_cmd);
8855                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8856                 rc = ipr_cmd->job_step(ipr_cmd);
8857         } while (rc == IPR_RC_JOB_CONTINUE);
8858 }
8859
8860 /**
8861  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8862  * @ioa_cfg:            ioa config struct
8863  * @job_step:           first job step of reset job
8864  * @shutdown_type:      shutdown type
8865  *
8866  * Description: This function will initiate the reset of the given adapter
8867  * starting at the selected job step.
8868  * If the caller needs to wait on the completion of the reset,
8869  * the caller must sleep on the reset_wait_q.
8870  *
8871  * Return value:
8872  *      none
8873  **/
8874 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8875                                     int (*job_step) (struct ipr_cmnd *),
8876                                     enum ipr_shutdown_type shutdown_type)
8877 {
8878         struct ipr_cmnd *ipr_cmd;
8879         int i;
8880
8881         ioa_cfg->in_reset_reload = 1;
8882         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8883                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8884                 ioa_cfg->hrrq[i].allow_cmds = 0;
8885                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8886         }
8887         wmb();
8888         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8889                 scsi_block_requests(ioa_cfg->host);
8890
8891         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8892         ioa_cfg->reset_cmd = ipr_cmd;
8893         ipr_cmd->job_step = job_step;
8894         ipr_cmd->u.shutdown_type = shutdown_type;
8895
8896         ipr_reset_ioa_job(ipr_cmd);
8897 }
8898
8899 /**
8900  * ipr_initiate_ioa_reset - Initiate an adapter reset
8901  * @ioa_cfg:            ioa config struct
8902  * @shutdown_type:      shutdown type
8903  *
8904  * Description: This function will initiate the reset of the given adapter.
8905  * If the caller needs to wait on the completion of the reset,
8906  * the caller must sleep on the reset_wait_q.
8907  *
8908  * Return value:
8909  *      none
8910  **/
8911 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8912                                    enum ipr_shutdown_type shutdown_type)
8913 {
8914         int i;
8915
8916         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8917                 return;
8918
8919         if (ioa_cfg->in_reset_reload) {
8920                 if (ioa_cfg->sdt_state == GET_DUMP)
8921                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8922                 else if (ioa_cfg->sdt_state == READ_DUMP)
8923                         ioa_cfg->sdt_state = ABORT_DUMP;
8924         }
8925
8926         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8927                 dev_err(&ioa_cfg->pdev->dev,
8928                         "IOA taken offline - error recovery failed\n");
8929
8930                 ioa_cfg->reset_retries = 0;
8931                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8932                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8933                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
8934                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8935                 }
8936                 wmb();
8937
8938                 if (ioa_cfg->in_ioa_bringdown) {
8939                         ioa_cfg->reset_cmd = NULL;
8940                         ioa_cfg->in_reset_reload = 0;
8941                         ipr_fail_all_ops(ioa_cfg);
8942                         wake_up_all(&ioa_cfg->reset_wait_q);
8943
8944                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8945                                 spin_unlock_irq(ioa_cfg->host->host_lock);
8946                                 scsi_unblock_requests(ioa_cfg->host);
8947                                 spin_lock_irq(ioa_cfg->host->host_lock);
8948                         }
8949                         return;
8950                 } else {
8951                         ioa_cfg->in_ioa_bringdown = 1;
8952                         shutdown_type = IPR_SHUTDOWN_NONE;
8953                 }
8954         }
8955
8956         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8957                                 shutdown_type);
8958 }
8959
8960 /**
8961  * ipr_reset_freeze - Hold off all I/O activity
8962  * @ipr_cmd:    ipr command struct
8963  *
8964  * Description: If the PCI slot is frozen, hold off all I/O
8965  * activity; then, as soon as the slot is available again,
8966  * initiate an adapter reset.
8967  */
8968 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8969 {
8970         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8971         int i;
8972
8973         /* Disallow new interrupts, avoid loop */
8974         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8975                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8976                 ioa_cfg->hrrq[i].allow_interrupts = 0;
8977                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8978         }
8979         wmb();
8980         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8981         ipr_cmd->done = ipr_reset_ioa_job;
8982         return IPR_RC_JOB_RETURN;
8983 }
8984
8985 /**
8986  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8987  * @pdev:       PCI device struct
8988  *
8989  * Description: This routine is called to tell us that the MMIO
8990  * access to the IOA has been restored
8991  */
8992 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8993 {
8994         unsigned long flags = 0;
8995         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8996
8997         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8998         if (!ioa_cfg->probe_done)
8999                 pci_save_state(pdev);
9000         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9001         return PCI_ERS_RESULT_NEED_RESET;
9002 }
9003
9004 /**
9005  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9006  * @pdev:       PCI device struct
9007  *
9008  * Description: This routine is called to tell us that the PCI bus
9009  * is down. Can't do anything here, except put the device driver
9010  * into a holding pattern, waiting for the PCI bus to come back.
9011  */
9012 static void ipr_pci_frozen(struct pci_dev *pdev)
9013 {
9014         unsigned long flags = 0;
9015         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9016
9017         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9018         if (ioa_cfg->probe_done)
9019                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9020         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9021 }
9022
9023 /**
9024  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9025  * @pdev:       PCI device struct
9026  *
9027  * Description: This routine is called by the pci error recovery
9028  * code after the PCI slot has been reset, just before we
9029  * should resume normal operations.
9030  */
9031 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9032 {
9033         unsigned long flags = 0;
9034         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9035
9036         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9037         if (ioa_cfg->probe_done) {
9038                 if (ioa_cfg->needs_warm_reset)
9039                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9040                 else
9041                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9042                                                 IPR_SHUTDOWN_NONE);
9043         } else
9044                 wake_up_all(&ioa_cfg->eeh_wait_q);
9045         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9046         return PCI_ERS_RESULT_RECOVERED;
9047 }
9048
9049 /**
9050  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9051  * @pdev:       PCI device struct
9052  *
9053  * Description: This routine is called when the PCI bus has
9054  * permanently failed.
9055  */
9056 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9057 {
9058         unsigned long flags = 0;
9059         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9060         int i;
9061
9062         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9063         if (ioa_cfg->probe_done) {
9064                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9065                         ioa_cfg->sdt_state = ABORT_DUMP;
9066                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9067                 ioa_cfg->in_ioa_bringdown = 1;
9068                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9069                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9070                         ioa_cfg->hrrq[i].allow_cmds = 0;
9071                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9072                 }
9073                 wmb();
9074                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9075         } else
9076                 wake_up_all(&ioa_cfg->eeh_wait_q);
9077         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9078 }
9079
9080 /**
9081  * ipr_pci_error_detected - Called when a PCI error is detected.
9082  * @pdev:       PCI device struct
9083  * @state:      PCI channel state
9084  *
9085  * Description: Called when a PCI error is detected.
9086  *
9087  * Return value:
9088  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9089  */
9090 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9091                                                pci_channel_state_t state)
9092 {
9093         switch (state) {
9094         case pci_channel_io_frozen:
9095                 ipr_pci_frozen(pdev);
9096                 return PCI_ERS_RESULT_CAN_RECOVER;
9097         case pci_channel_io_perm_failure:
9098                 ipr_pci_perm_failure(pdev);
9099                 return PCI_ERS_RESULT_DISCONNECT;
9100                 break;
9101         default:
9102                 break;
9103         }
9104         return PCI_ERS_RESULT_NEED_RESET;
9105 }
9106
9107 /**
9108  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9109  * @ioa_cfg:    ioa cfg struct
9110  *
9111  * Description: This is the second phase of adapter intialization
9112  * This function takes care of initilizing the adapter to the point
9113  * where it can accept new commands.
9114
9115  * Return value:
9116  *      0 on success / -EIO on failure
9117  **/
9118 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9119 {
9120         int rc = 0;
9121         unsigned long host_lock_flags = 0;
9122
9123         ENTER;
9124         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9125         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9126         ioa_cfg->probe_done = 1;
9127         if (ioa_cfg->needs_hard_reset) {
9128                 ioa_cfg->needs_hard_reset = 0;
9129                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9130         } else
9131                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9132                                         IPR_SHUTDOWN_NONE);
9133         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9134
9135         LEAVE;
9136         return rc;
9137 }
9138
9139 /**
9140  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9141  * @ioa_cfg:    ioa config struct
9142  *
9143  * Return value:
9144  *      none
9145  **/
9146 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9147 {
9148         int i;
9149
9150         if (ioa_cfg->ipr_cmnd_list) {
9151                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9152                         if (ioa_cfg->ipr_cmnd_list[i])
9153                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9154                                               ioa_cfg->ipr_cmnd_list[i],
9155                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9156
9157                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9158                 }
9159         }
9160
9161         if (ioa_cfg->ipr_cmd_pool)
9162                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9163
9164         kfree(ioa_cfg->ipr_cmnd_list);
9165         kfree(ioa_cfg->ipr_cmnd_list_dma);
9166         ioa_cfg->ipr_cmnd_list = NULL;
9167         ioa_cfg->ipr_cmnd_list_dma = NULL;
9168         ioa_cfg->ipr_cmd_pool = NULL;
9169 }
9170
9171 /**
9172  * ipr_free_mem - Frees memory allocated for an adapter
9173  * @ioa_cfg:    ioa cfg struct
9174  *
9175  * Return value:
9176  *      nothing
9177  **/
9178 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9179 {
9180         int i;
9181
9182         kfree(ioa_cfg->res_entries);
9183         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9184                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9185         ipr_free_cmd_blks(ioa_cfg);
9186
9187         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9188                 dma_free_coherent(&ioa_cfg->pdev->dev,
9189                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9190                                   ioa_cfg->hrrq[i].host_rrq,
9191                                   ioa_cfg->hrrq[i].host_rrq_dma);
9192
9193         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9194                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9195
9196         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9197                 dma_free_coherent(&ioa_cfg->pdev->dev,
9198                                   sizeof(struct ipr_hostrcb),
9199                                   ioa_cfg->hostrcb[i],
9200                                   ioa_cfg->hostrcb_dma[i]);
9201         }
9202
9203         ipr_free_dump(ioa_cfg);
9204         kfree(ioa_cfg->trace);
9205 }
9206
9207 /**
9208  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9209  * @ioa_cfg:    ipr cfg struct
9210  *
9211  * This function frees all allocated IRQs for the
9212  * specified adapter.
9213  *
9214  * Return value:
9215  *      none
9216  **/
9217 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9218 {
9219         struct pci_dev *pdev = ioa_cfg->pdev;
9220
9221         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9222             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9223                 int i;
9224                 for (i = 0; i < ioa_cfg->nvectors; i++)
9225                         free_irq(ioa_cfg->vectors_info[i].vec,
9226                                  &ioa_cfg->hrrq[i]);
9227         } else
9228                 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9229
9230         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9231                 pci_disable_msi(pdev);
9232                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9233         } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9234                 pci_disable_msix(pdev);
9235                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9236         }
9237 }
9238
9239 /**
9240  * ipr_free_all_resources - Free all allocated resources for an adapter.
9241  * @ipr_cmd:    ipr command struct
9242  *
9243  * This function frees all allocated resources for the
9244  * specified adapter.
9245  *
9246  * Return value:
9247  *      none
9248  **/
9249 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9250 {
9251         struct pci_dev *pdev = ioa_cfg->pdev;
9252
9253         ENTER;
9254         ipr_free_irqs(ioa_cfg);
9255         if (ioa_cfg->reset_work_q)
9256                 destroy_workqueue(ioa_cfg->reset_work_q);
9257         iounmap(ioa_cfg->hdw_dma_regs);
9258         pci_release_regions(pdev);
9259         ipr_free_mem(ioa_cfg);
9260         scsi_host_put(ioa_cfg->host);
9261         pci_disable_device(pdev);
9262         LEAVE;
9263 }
9264
9265 /**
9266  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9267  * @ioa_cfg:    ioa config struct
9268  *
9269  * Return value:
9270  *      0 on success / -ENOMEM on allocation failure
9271  **/
9272 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9273 {
9274         struct ipr_cmnd *ipr_cmd;
9275         struct ipr_ioarcb *ioarcb;
9276         dma_addr_t dma_addr;
9277         int i, entries_each_hrrq, hrrq_id = 0;
9278
9279         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9280                                                 sizeof(struct ipr_cmnd), 512, 0);
9281
9282         if (!ioa_cfg->ipr_cmd_pool)
9283                 return -ENOMEM;
9284
9285         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9286         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9287
9288         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9289                 ipr_free_cmd_blks(ioa_cfg);
9290                 return -ENOMEM;
9291         }
9292
9293         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9294                 if (ioa_cfg->hrrq_num > 1) {
9295                         if (i == 0) {
9296                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9297                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9298                                         ioa_cfg->hrrq[i].max_cmd_id =
9299                                                 (entries_each_hrrq - 1);
9300                         } else {
9301                                 entries_each_hrrq =
9302                                         IPR_NUM_BASE_CMD_BLKS/
9303                                         (ioa_cfg->hrrq_num - 1);
9304                                 ioa_cfg->hrrq[i].min_cmd_id =
9305                                         IPR_NUM_INTERNAL_CMD_BLKS +
9306                                         (i - 1) * entries_each_hrrq;
9307                                 ioa_cfg->hrrq[i].max_cmd_id =
9308                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9309                                         i * entries_each_hrrq - 1);
9310                         }
9311                 } else {
9312                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9313                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9314                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9315                 }
9316                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9317         }
9318
9319         BUG_ON(ioa_cfg->hrrq_num == 0);
9320
9321         i = IPR_NUM_CMD_BLKS -
9322                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9323         if (i > 0) {
9324                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9325                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9326         }
9327
9328         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9329                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9330
9331                 if (!ipr_cmd) {
9332                         ipr_free_cmd_blks(ioa_cfg);
9333                         return -ENOMEM;
9334                 }
9335
9336                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9337                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9338                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9339
9340                 ioarcb = &ipr_cmd->ioarcb;
9341                 ipr_cmd->dma_addr = dma_addr;
9342                 if (ioa_cfg->sis64)
9343                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9344                 else
9345                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9346
9347                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9348                 if (ioa_cfg->sis64) {
9349                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9350                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9351                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9352                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9353                 } else {
9354                         ioarcb->write_ioadl_addr =
9355                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9356                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9357                         ioarcb->ioasa_host_pci_addr =
9358                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9359                 }
9360                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9361                 ipr_cmd->cmd_index = i;
9362                 ipr_cmd->ioa_cfg = ioa_cfg;
9363                 ipr_cmd->sense_buffer_dma = dma_addr +
9364                         offsetof(struct ipr_cmnd, sense_buffer);
9365
9366                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9367                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9368                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9369                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9370                         hrrq_id++;
9371         }
9372
9373         return 0;
9374 }
9375
9376 /**
9377  * ipr_alloc_mem - Allocate memory for an adapter
9378  * @ioa_cfg:    ioa config struct
9379  *
9380  * Return value:
9381  *      0 on success / non-zero for error
9382  **/
9383 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9384 {
9385         struct pci_dev *pdev = ioa_cfg->pdev;
9386         int i, rc = -ENOMEM;
9387
9388         ENTER;
9389         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9390                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9391
9392         if (!ioa_cfg->res_entries)
9393                 goto out;
9394
9395         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9396                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9397                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9398         }
9399
9400         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9401                                               sizeof(struct ipr_misc_cbs),
9402                                               &ioa_cfg->vpd_cbs_dma,
9403                                               GFP_KERNEL);
9404
9405         if (!ioa_cfg->vpd_cbs)
9406                 goto out_free_res_entries;
9407
9408         if (ipr_alloc_cmd_blks(ioa_cfg))
9409                 goto out_free_vpd_cbs;
9410
9411         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9412                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9413                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9414                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9415                                         GFP_KERNEL);
9416
9417                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9418                         while (--i > 0)
9419                                 dma_free_coherent(&pdev->dev,
9420                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9421                                         ioa_cfg->hrrq[i].host_rrq,
9422                                         ioa_cfg->hrrq[i].host_rrq_dma);
9423                         goto out_ipr_free_cmd_blocks;
9424                 }
9425                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9426         }
9427
9428         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9429                                                   ioa_cfg->cfg_table_size,
9430                                                   &ioa_cfg->cfg_table_dma,
9431                                                   GFP_KERNEL);
9432
9433         if (!ioa_cfg->u.cfg_table)
9434                 goto out_free_host_rrq;
9435
9436         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9437                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9438                                                          sizeof(struct ipr_hostrcb),
9439                                                          &ioa_cfg->hostrcb_dma[i],
9440                                                          GFP_KERNEL);
9441
9442                 if (!ioa_cfg->hostrcb[i])
9443                         goto out_free_hostrcb_dma;
9444
9445                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9446                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9447                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9448                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9449         }
9450
9451         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9452                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9453
9454         if (!ioa_cfg->trace)
9455                 goto out_free_hostrcb_dma;
9456
9457         rc = 0;
9458 out:
9459         LEAVE;
9460         return rc;
9461
9462 out_free_hostrcb_dma:
9463         while (i-- > 0) {
9464                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9465                                   ioa_cfg->hostrcb[i],
9466                                   ioa_cfg->hostrcb_dma[i]);
9467         }
9468         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9469                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9470 out_free_host_rrq:
9471         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9472                 dma_free_coherent(&pdev->dev,
9473                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9474                                   ioa_cfg->hrrq[i].host_rrq,
9475                                   ioa_cfg->hrrq[i].host_rrq_dma);
9476         }
9477 out_ipr_free_cmd_blocks:
9478         ipr_free_cmd_blks(ioa_cfg);
9479 out_free_vpd_cbs:
9480         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9481                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9482 out_free_res_entries:
9483         kfree(ioa_cfg->res_entries);
9484         goto out;
9485 }
9486
9487 /**
9488  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9489  * @ioa_cfg:    ioa config struct
9490  *
9491  * Return value:
9492  *      none
9493  **/
9494 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9495 {
9496         int i;
9497
9498         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9499                 ioa_cfg->bus_attr[i].bus = i;
9500                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9501                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9502                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9503                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9504                 else
9505                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9506         }
9507 }
9508
9509 /**
9510  * ipr_init_regs - Initialize IOA registers
9511  * @ioa_cfg:    ioa config struct
9512  *
9513  * Return value:
9514  *      none
9515  **/
9516 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9517 {
9518         const struct ipr_interrupt_offsets *p;
9519         struct ipr_interrupts *t;
9520         void __iomem *base;
9521
9522         p = &ioa_cfg->chip_cfg->regs;
9523         t = &ioa_cfg->regs;
9524         base = ioa_cfg->hdw_dma_regs;
9525
9526         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9527         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9528         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9529         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9530         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9531         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9532         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9533         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9534         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9535         t->ioarrin_reg = base + p->ioarrin_reg;
9536         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9537         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9538         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9539         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9540         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9541         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9542
9543         if (ioa_cfg->sis64) {
9544                 t->init_feedback_reg = base + p->init_feedback_reg;
9545                 t->dump_addr_reg = base + p->dump_addr_reg;
9546                 t->dump_data_reg = base + p->dump_data_reg;
9547                 t->endian_swap_reg = base + p->endian_swap_reg;
9548         }
9549 }
9550
9551 /**
9552  * ipr_init_ioa_cfg - Initialize IOA config struct
9553  * @ioa_cfg:    ioa config struct
9554  * @host:               scsi host struct
9555  * @pdev:               PCI dev struct
9556  *
9557  * Return value:
9558  *      none
9559  **/
9560 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9561                              struct Scsi_Host *host, struct pci_dev *pdev)
9562 {
9563         int i;
9564
9565         ioa_cfg->host = host;
9566         ioa_cfg->pdev = pdev;
9567         ioa_cfg->log_level = ipr_log_level;
9568         ioa_cfg->doorbell = IPR_DOORBELL;
9569         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9570         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9571         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9572         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9573         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9574         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9575
9576         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9577         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9578         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9579         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9580         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9581         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9582         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9583         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9584         ioa_cfg->sdt_state = INACTIVE;
9585
9586         ipr_initialize_bus_attr(ioa_cfg);
9587         ioa_cfg->max_devs_supported = ipr_max_devs;
9588
9589         if (ioa_cfg->sis64) {
9590                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9591                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9592                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9593                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9594                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9595                                            + ((sizeof(struct ipr_config_table_entry64)
9596                                                * ioa_cfg->max_devs_supported)));
9597         } else {
9598                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9599                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9600                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9601                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9602                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9603                                            + ((sizeof(struct ipr_config_table_entry)
9604                                                * ioa_cfg->max_devs_supported)));
9605         }
9606
9607         host->max_channel = IPR_VSET_BUS;
9608         host->unique_id = host->host_no;
9609         host->max_cmd_len = IPR_MAX_CDB_LEN;
9610         host->can_queue = ioa_cfg->max_cmds;
9611         pci_set_drvdata(pdev, ioa_cfg);
9612
9613         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9614                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9615                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9616                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9617                 if (i == 0)
9618                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9619                 else
9620                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9621         }
9622 }
9623
9624 /**
9625  * ipr_get_chip_info - Find adapter chip information
9626  * @dev_id:             PCI device id struct
9627  *
9628  * Return value:
9629  *      ptr to chip information on success / NULL on failure
9630  **/
9631 static const struct ipr_chip_t *
9632 ipr_get_chip_info(const struct pci_device_id *dev_id)
9633 {
9634         int i;
9635
9636         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9637                 if (ipr_chip[i].vendor == dev_id->vendor &&
9638                     ipr_chip[i].device == dev_id->device)
9639                         return &ipr_chip[i];
9640         return NULL;
9641 }
9642
9643 /**
9644  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9645  *                                              during probe time
9646  * @ioa_cfg:    ioa config struct
9647  *
9648  * Return value:
9649  *      None
9650  **/
9651 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9652 {
9653         struct pci_dev *pdev = ioa_cfg->pdev;
9654
9655         if (pci_channel_offline(pdev)) {
9656                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9657                                    !pci_channel_offline(pdev),
9658                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9659                 pci_restore_state(pdev);
9660         }
9661 }
9662
9663 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9664 {
9665         struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9666         int i, vectors;
9667
9668         for (i = 0; i < ARRAY_SIZE(entries); ++i)
9669                 entries[i].entry = i;
9670
9671         vectors = pci_enable_msix_range(ioa_cfg->pdev,
9672                                         entries, 1, ipr_number_of_msix);
9673         if (vectors < 0) {
9674                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9675                 return vectors;
9676         }
9677
9678         for (i = 0; i < vectors; i++)
9679                 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9680         ioa_cfg->nvectors = vectors;
9681
9682         return 0;
9683 }
9684
9685 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9686 {
9687         int i, vectors;
9688
9689         vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9690         if (vectors < 0) {
9691                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9692                 return vectors;
9693         }
9694
9695         for (i = 0; i < vectors; i++)
9696                 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9697         ioa_cfg->nvectors = vectors;
9698
9699         return 0;
9700 }
9701
9702 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9703 {
9704         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9705
9706         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9707                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9708                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9709                 ioa_cfg->vectors_info[vec_idx].
9710                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9711         }
9712 }
9713
9714 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9715 {
9716         int i, rc;
9717
9718         for (i = 1; i < ioa_cfg->nvectors; i++) {
9719                 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9720                         ipr_isr_mhrrq,
9721                         0,
9722                         ioa_cfg->vectors_info[i].desc,
9723                         &ioa_cfg->hrrq[i]);
9724                 if (rc) {
9725                         while (--i >= 0)
9726                                 free_irq(ioa_cfg->vectors_info[i].vec,
9727                                         &ioa_cfg->hrrq[i]);
9728                         return rc;
9729                 }
9730         }
9731         return 0;
9732 }
9733
9734 /**
9735  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9736  * @pdev:               PCI device struct
9737  *
9738  * Description: Simply set the msi_received flag to 1 indicating that
9739  * Message Signaled Interrupts are supported.
9740  *
9741  * Return value:
9742  *      0 on success / non-zero on failure
9743  **/
9744 static irqreturn_t ipr_test_intr(int irq, void *devp)
9745 {
9746         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9747         unsigned long lock_flags = 0;
9748         irqreturn_t rc = IRQ_HANDLED;
9749
9750         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9751         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9752
9753         ioa_cfg->msi_received = 1;
9754         wake_up(&ioa_cfg->msi_wait_q);
9755
9756         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9757         return rc;
9758 }
9759
9760 /**
9761  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9762  * @pdev:               PCI device struct
9763  *
9764  * Description: The return value from pci_enable_msi_range() can not always be
9765  * trusted.  This routine sets up and initiates a test interrupt to determine
9766  * if the interrupt is received via the ipr_test_intr() service routine.
9767  * If the tests fails, the driver will fall back to LSI.
9768  *
9769  * Return value:
9770  *      0 on success / non-zero on failure
9771  **/
9772 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9773 {
9774         int rc;
9775         volatile u32 int_reg;
9776         unsigned long lock_flags = 0;
9777
9778         ENTER;
9779
9780         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9781         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9782         ioa_cfg->msi_received = 0;
9783         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9784         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9785         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9786         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9787
9788         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9789                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9790         else
9791                 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9792         if (rc) {
9793                 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9794                 return rc;
9795         } else if (ipr_debug)
9796                 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9797
9798         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9799         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9800         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9801         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9802         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9803
9804         if (!ioa_cfg->msi_received) {
9805                 /* MSI test failed */
9806                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9807                 rc = -EOPNOTSUPP;
9808         } else if (ipr_debug)
9809                 dev_info(&pdev->dev, "MSI test succeeded.\n");
9810
9811         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9812
9813         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9814                 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9815         else
9816                 free_irq(pdev->irq, ioa_cfg);
9817
9818         LEAVE;
9819
9820         return rc;
9821 }
9822
9823  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9824  * @pdev:               PCI device struct
9825  * @dev_id:             PCI device id struct
9826  *
9827  * Return value:
9828  *      0 on success / non-zero on failure
9829  **/
9830 static int ipr_probe_ioa(struct pci_dev *pdev,
9831                          const struct pci_device_id *dev_id)
9832 {
9833         struct ipr_ioa_cfg *ioa_cfg;
9834         struct Scsi_Host *host;
9835         unsigned long ipr_regs_pci;
9836         void __iomem *ipr_regs;
9837         int rc = PCIBIOS_SUCCESSFUL;
9838         volatile u32 mask, uproc, interrupts;
9839         unsigned long lock_flags, driver_lock_flags;
9840
9841         ENTER;
9842
9843         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9844         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9845
9846         if (!host) {
9847                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9848                 rc = -ENOMEM;
9849                 goto out;
9850         }
9851
9852         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9853         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9854         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9855
9856         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9857
9858         if (!ioa_cfg->ipr_chip) {
9859                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9860                         dev_id->vendor, dev_id->device);
9861                 goto out_scsi_host_put;
9862         }
9863
9864         /* set SIS 32 or SIS 64 */
9865         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9866         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9867         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9868         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9869
9870         if (ipr_transop_timeout)
9871                 ioa_cfg->transop_timeout = ipr_transop_timeout;
9872         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9873                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9874         else
9875                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9876
9877         ioa_cfg->revid = pdev->revision;
9878
9879         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9880
9881         ipr_regs_pci = pci_resource_start(pdev, 0);
9882
9883         rc = pci_request_regions(pdev, IPR_NAME);
9884         if (rc < 0) {
9885                 dev_err(&pdev->dev,
9886                         "Couldn't register memory range of registers\n");
9887                 goto out_scsi_host_put;
9888         }
9889
9890         rc = pci_enable_device(pdev);
9891
9892         if (rc || pci_channel_offline(pdev)) {
9893                 if (pci_channel_offline(pdev)) {
9894                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9895                         rc = pci_enable_device(pdev);
9896                 }
9897
9898                 if (rc) {
9899                         dev_err(&pdev->dev, "Cannot enable adapter\n");
9900                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9901                         goto out_release_regions;
9902                 }
9903         }
9904
9905         ipr_regs = pci_ioremap_bar(pdev, 0);
9906
9907         if (!ipr_regs) {
9908                 dev_err(&pdev->dev,
9909                         "Couldn't map memory range of registers\n");
9910                 rc = -ENOMEM;
9911                 goto out_disable;
9912         }
9913
9914         ioa_cfg->hdw_dma_regs = ipr_regs;
9915         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9916         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9917
9918         ipr_init_regs(ioa_cfg);
9919
9920         if (ioa_cfg->sis64) {
9921                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9922                 if (rc < 0) {
9923                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9924                         rc = dma_set_mask_and_coherent(&pdev->dev,
9925                                                        DMA_BIT_MASK(32));
9926                 }
9927         } else
9928                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9929
9930         if (rc < 0) {
9931                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
9932                 goto cleanup_nomem;
9933         }
9934
9935         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9936                                    ioa_cfg->chip_cfg->cache_line_size);
9937
9938         if (rc != PCIBIOS_SUCCESSFUL) {
9939                 dev_err(&pdev->dev, "Write of cache line size failed\n");
9940                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9941                 rc = -EIO;
9942                 goto cleanup_nomem;
9943         }
9944
9945         /* Issue MMIO read to ensure card is not in EEH */
9946         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9947         ipr_wait_for_pci_err_recovery(ioa_cfg);
9948
9949         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9950                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9951                         IPR_MAX_MSIX_VECTORS);
9952                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9953         }
9954
9955         if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9956                         ipr_enable_msix(ioa_cfg) == 0)
9957                 ioa_cfg->intr_flag = IPR_USE_MSIX;
9958         else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9959                         ipr_enable_msi(ioa_cfg) == 0)
9960                 ioa_cfg->intr_flag = IPR_USE_MSI;
9961         else {
9962                 ioa_cfg->intr_flag = IPR_USE_LSI;
9963                 ioa_cfg->nvectors = 1;
9964                 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9965         }
9966
9967         pci_set_master(pdev);
9968
9969         if (pci_channel_offline(pdev)) {
9970                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9971                 pci_set_master(pdev);
9972                 if (pci_channel_offline(pdev)) {
9973                         rc = -EIO;
9974                         goto out_msi_disable;
9975                 }
9976         }
9977
9978         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9979             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9980                 rc = ipr_test_msi(ioa_cfg, pdev);
9981                 if (rc == -EOPNOTSUPP) {
9982                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9983                         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9984                                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9985                                 pci_disable_msi(pdev);
9986                          } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9987                                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9988                                 pci_disable_msix(pdev);
9989                         }
9990
9991                         ioa_cfg->intr_flag = IPR_USE_LSI;
9992                         ioa_cfg->nvectors = 1;
9993                 }
9994                 else if (rc)
9995                         goto out_msi_disable;
9996                 else {
9997                         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9998                                 dev_info(&pdev->dev,
9999                                         "Request for %d MSIs succeeded with starting IRQ: %d\n",
10000                                         ioa_cfg->nvectors, pdev->irq);
10001                         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10002                                 dev_info(&pdev->dev,
10003                                         "Request for %d MSIXs succeeded.",
10004                                         ioa_cfg->nvectors);
10005                 }
10006         }
10007
10008         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10009                                 (unsigned int)num_online_cpus(),
10010                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10011
10012         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10013                 goto out_msi_disable;
10014
10015         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10016                 goto out_msi_disable;
10017
10018         rc = ipr_alloc_mem(ioa_cfg);
10019         if (rc < 0) {
10020                 dev_err(&pdev->dev,
10021                         "Couldn't allocate enough memory for device driver!\n");
10022                 goto out_msi_disable;
10023         }
10024
10025         /* Save away PCI config space for use following IOA reset */
10026         rc = pci_save_state(pdev);
10027
10028         if (rc != PCIBIOS_SUCCESSFUL) {
10029                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10030                 rc = -EIO;
10031                 goto cleanup_nolog;
10032         }
10033
10034         /*
10035          * If HRRQ updated interrupt is not masked, or reset alert is set,
10036          * the card is in an unknown state and needs a hard reset
10037          */
10038         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10039         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10040         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10041         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10042                 ioa_cfg->needs_hard_reset = 1;
10043         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10044                 ioa_cfg->needs_hard_reset = 1;
10045         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10046                 ioa_cfg->ioa_unit_checked = 1;
10047
10048         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10049         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10050         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10051
10052         if (ioa_cfg->intr_flag == IPR_USE_MSI
10053                         || ioa_cfg->intr_flag == IPR_USE_MSIX) {
10054                 name_msi_vectors(ioa_cfg);
10055                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
10056                         0,
10057                         ioa_cfg->vectors_info[0].desc,
10058                         &ioa_cfg->hrrq[0]);
10059                 if (!rc)
10060                         rc = ipr_request_other_msi_irqs(ioa_cfg);
10061         } else {
10062                 rc = request_irq(pdev->irq, ipr_isr,
10063                          IRQF_SHARED,
10064                          IPR_NAME, &ioa_cfg->hrrq[0]);
10065         }
10066         if (rc) {
10067                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10068                         pdev->irq, rc);
10069                 goto cleanup_nolog;
10070         }
10071
10072         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10073             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10074                 ioa_cfg->needs_warm_reset = 1;
10075                 ioa_cfg->reset = ipr_reset_slot_reset;
10076
10077                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10078                                                                 WQ_MEM_RECLAIM, host->host_no);
10079
10080                 if (!ioa_cfg->reset_work_q) {
10081                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10082                         goto out_free_irq;
10083                 }
10084         } else
10085                 ioa_cfg->reset = ipr_reset_start_bist;
10086
10087         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10088         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10089         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10090
10091         LEAVE;
10092 out:
10093         return rc;
10094
10095 out_free_irq:
10096         ipr_free_irqs(ioa_cfg);
10097 cleanup_nolog:
10098         ipr_free_mem(ioa_cfg);
10099 out_msi_disable:
10100         ipr_wait_for_pci_err_recovery(ioa_cfg);
10101         if (ioa_cfg->intr_flag == IPR_USE_MSI)
10102                 pci_disable_msi(pdev);
10103         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10104                 pci_disable_msix(pdev);
10105 cleanup_nomem:
10106         iounmap(ipr_regs);
10107 out_disable:
10108         pci_disable_device(pdev);
10109 out_release_regions:
10110         pci_release_regions(pdev);
10111 out_scsi_host_put:
10112         scsi_host_put(host);
10113         goto out;
10114 }
10115
10116 /**
10117  * ipr_initiate_ioa_bringdown - Bring down an adapter
10118  * @ioa_cfg:            ioa config struct
10119  * @shutdown_type:      shutdown type
10120  *
10121  * Description: This function will initiate bringing down the adapter.
10122  * This consists of issuing an IOA shutdown to the adapter
10123  * to flush the cache, and running BIST.
10124  * If the caller needs to wait on the completion of the reset,
10125  * the caller must sleep on the reset_wait_q.
10126  *
10127  * Return value:
10128  *      none
10129  **/
10130 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10131                                        enum ipr_shutdown_type shutdown_type)
10132 {
10133         ENTER;
10134         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10135                 ioa_cfg->sdt_state = ABORT_DUMP;
10136         ioa_cfg->reset_retries = 0;
10137         ioa_cfg->in_ioa_bringdown = 1;
10138         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10139         LEAVE;
10140 }
10141
10142 /**
10143  * __ipr_remove - Remove a single adapter
10144  * @pdev:       pci device struct
10145  *
10146  * Adapter hot plug remove entry point.
10147  *
10148  * Return value:
10149  *      none
10150  **/
10151 static void __ipr_remove(struct pci_dev *pdev)
10152 {
10153         unsigned long host_lock_flags = 0;
10154         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10155         int i;
10156         unsigned long driver_lock_flags;
10157         ENTER;
10158
10159         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10160         while (ioa_cfg->in_reset_reload) {
10161                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10162                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10163                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10164         }
10165
10166         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10167                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10168                 ioa_cfg->hrrq[i].removing_ioa = 1;
10169                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10170         }
10171         wmb();
10172         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10173
10174         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10175         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10176         flush_work(&ioa_cfg->work_q);
10177         if (ioa_cfg->reset_work_q)
10178                 flush_workqueue(ioa_cfg->reset_work_q);
10179         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10180         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10181
10182         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10183         list_del(&ioa_cfg->queue);
10184         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10185
10186         if (ioa_cfg->sdt_state == ABORT_DUMP)
10187                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10188         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10189
10190         ipr_free_all_resources(ioa_cfg);
10191
10192         LEAVE;
10193 }
10194
10195 /**
10196  * ipr_remove - IOA hot plug remove entry point
10197  * @pdev:       pci device struct
10198  *
10199  * Adapter hot plug remove entry point.
10200  *
10201  * Return value:
10202  *      none
10203  **/
10204 static void ipr_remove(struct pci_dev *pdev)
10205 {
10206         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10207
10208         ENTER;
10209
10210         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10211                               &ipr_trace_attr);
10212         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10213                              &ipr_dump_attr);
10214         scsi_remove_host(ioa_cfg->host);
10215
10216         __ipr_remove(pdev);
10217
10218         LEAVE;
10219 }
10220
10221 /**
10222  * ipr_probe - Adapter hot plug add entry point
10223  *
10224  * Return value:
10225  *      0 on success / non-zero on failure
10226  **/
10227 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10228 {
10229         struct ipr_ioa_cfg *ioa_cfg;
10230         int rc, i;
10231
10232         rc = ipr_probe_ioa(pdev, dev_id);
10233
10234         if (rc)
10235                 return rc;
10236
10237         ioa_cfg = pci_get_drvdata(pdev);
10238         rc = ipr_probe_ioa_part2(ioa_cfg);
10239
10240         if (rc) {
10241                 __ipr_remove(pdev);
10242                 return rc;
10243         }
10244
10245         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10246
10247         if (rc) {
10248                 __ipr_remove(pdev);
10249                 return rc;
10250         }
10251
10252         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10253                                    &ipr_trace_attr);
10254
10255         if (rc) {
10256                 scsi_remove_host(ioa_cfg->host);
10257                 __ipr_remove(pdev);
10258                 return rc;
10259         }
10260
10261         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10262                                    &ipr_dump_attr);
10263
10264         if (rc) {
10265                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10266                                       &ipr_trace_attr);
10267                 scsi_remove_host(ioa_cfg->host);
10268                 __ipr_remove(pdev);
10269                 return rc;
10270         }
10271
10272         scsi_scan_host(ioa_cfg->host);
10273         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10274
10275         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10276                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10277                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
10278                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10279                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
10280                 }
10281         }
10282
10283         schedule_work(&ioa_cfg->work_q);
10284         return 0;
10285 }
10286
10287 /**
10288  * ipr_shutdown - Shutdown handler.
10289  * @pdev:       pci device struct
10290  *
10291  * This function is invoked upon system shutdown/reboot. It will issue
10292  * an adapter shutdown to the adapter to flush the write cache.
10293  *
10294  * Return value:
10295  *      none
10296  **/
10297 static void ipr_shutdown(struct pci_dev *pdev)
10298 {
10299         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10300         unsigned long lock_flags = 0;
10301         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10302         int i;
10303
10304         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10305         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10306                 ioa_cfg->iopoll_weight = 0;
10307                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10308                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10309         }
10310
10311         while (ioa_cfg->in_reset_reload) {
10312                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10313                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10314                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10315         }
10316
10317         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10318                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10319
10320         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10321         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10322         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10323         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10324                 ipr_free_irqs(ioa_cfg);
10325                 pci_disable_device(ioa_cfg->pdev);
10326         }
10327 }
10328
10329 static struct pci_device_id ipr_pci_table[] = {
10330         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10331                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10332         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10333                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10334         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10335                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10336         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10337                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10338         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10339                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10340         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10341                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10342         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10343                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10344         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10345                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10346                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10347         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10348               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10349         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10350               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10351               IPR_USE_LONG_TRANSOP_TIMEOUT },
10352         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10353               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10354               IPR_USE_LONG_TRANSOP_TIMEOUT },
10355         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10356               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10357         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10358               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10359               IPR_USE_LONG_TRANSOP_TIMEOUT},
10360         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10361               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10362               IPR_USE_LONG_TRANSOP_TIMEOUT },
10363         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10364               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10365               IPR_USE_LONG_TRANSOP_TIMEOUT },
10366         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10367               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10368         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10369               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10370         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10371               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10372               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10373         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10374                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10375         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10376                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10377         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10378                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10379                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10380         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10381                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10382                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10383         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10384                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10385         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10386                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10387         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10388                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10389         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10390                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10391         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10392                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10393         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10394                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10395         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10396                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10397         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10398                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10399         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10400                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10401         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10402                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10403         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10404                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10405         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10406                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10407         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10408                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10409         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10410                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10411         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10412                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10413         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10414                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10415         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10416                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10417         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10418                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10419         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10420                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10421         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10422                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10423         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10424                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10425         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10426                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10427         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10428                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10429         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10430                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10431         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10432                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10433         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10434                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10435         { }
10436 };
10437 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10438
10439 static const struct pci_error_handlers ipr_err_handler = {
10440         .error_detected = ipr_pci_error_detected,
10441         .mmio_enabled = ipr_pci_mmio_enabled,
10442         .slot_reset = ipr_pci_slot_reset,
10443 };
10444
10445 static struct pci_driver ipr_driver = {
10446         .name = IPR_NAME,
10447         .id_table = ipr_pci_table,
10448         .probe = ipr_probe,
10449         .remove = ipr_remove,
10450         .shutdown = ipr_shutdown,
10451         .err_handler = &ipr_err_handler,
10452 };
10453
10454 /**
10455  * ipr_halt_done - Shutdown prepare completion
10456  *
10457  * Return value:
10458  *      none
10459  **/
10460 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10461 {
10462         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10463 }
10464
10465 /**
10466  * ipr_halt - Issue shutdown prepare to all adapters
10467  *
10468  * Return value:
10469  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10470  **/
10471 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10472 {
10473         struct ipr_cmnd *ipr_cmd;
10474         struct ipr_ioa_cfg *ioa_cfg;
10475         unsigned long flags = 0, driver_lock_flags;
10476
10477         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10478                 return NOTIFY_DONE;
10479
10480         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10481
10482         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10483                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10484                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10485                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10486                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10487                         continue;
10488                 }
10489
10490                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10491                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10492                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10493                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10494                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10495
10496                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10497                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10498         }
10499         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10500
10501         return NOTIFY_OK;
10502 }
10503
10504 static struct notifier_block ipr_notifier = {
10505         ipr_halt, NULL, 0
10506 };
10507
10508 /**
10509  * ipr_init - Module entry point
10510  *
10511  * Return value:
10512  *      0 on success / negative value on failure
10513  **/
10514 static int __init ipr_init(void)
10515 {
10516         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10517                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10518
10519         register_reboot_notifier(&ipr_notifier);
10520         return pci_register_driver(&ipr_driver);
10521 }
10522
10523 /**
10524  * ipr_exit - Module unload
10525  *
10526  * Module unload entry point.
10527  *
10528  * Return value:
10529  *      none
10530  **/
10531 static void __exit ipr_exit(void)
10532 {
10533         unregister_reboot_notifier(&ipr_notifier);
10534         pci_unregister_driver(&ipr_driver);
10535 }
10536
10537 module_init(ipr_init);
10538 module_exit(ipr_exit);