staging: unisys: visorhba change -1 return value
[cascardo/linux.git] / drivers / staging / unisys / visorhba / visorhba_main.c
1 /* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2  * All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or (at
7  * your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  * NON INFRINGEMENT.  See the GNU General Public License for more
13  * details.
14  */
15
16 #include <linux/debugfs.h>
17 #include <linux/skbuff.h>
18 #include <linux/kthread.h>
19 #include <linux/idr.h>
20 #include <linux/seq_file.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_device.h>
25
26 #include "visorbus.h"
27 #include "iochannel.h"
28
29 /* The Send and Receive Buffers of the IO Queue may both be full */
30
31 #define IOS_ERROR_THRESHOLD     1000
32 /* MAX_BUF = 6 lines x 10 MAXVHBA x 80 characters
33  *         = 4800 bytes ~ 2^13 = 8192 bytes
34  */
35 #define MAX_BUF                 8192
36 #define MAX_PENDING_REQUESTS    (MIN_NUMSIGNALS * 2)
37 #define VISORHBA_ERROR_COUNT    30
38
39 static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
40                                       void (*visorhba_cmnd_done)
41                                             (struct scsi_cmnd *));
42 #ifdef DEF_SCSI_QCMD
43 static DEF_SCSI_QCMD(visorhba_queue_command)
44 #else
45 #define visorhba_queue_command visorhba_queue_command_lck
46 #endif
47 static int visorhba_probe(struct visor_device *dev);
48 static void visorhba_remove(struct visor_device *dev);
49 static int visorhba_pause(struct visor_device *dev,
50                           visorbus_state_complete_func complete_func);
51 static int visorhba_resume(struct visor_device *dev,
52                            visorbus_state_complete_func complete_func);
53
54 static struct dentry *visorhba_debugfs_dir;
55
56 /* GUIDS for HBA channel type supported by this driver */
57 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
58         /* Note that the only channel type we expect to be reported by the
59          * bus driver is the SPAR_VHBA channel.
60          */
61         { SPAR_VHBA_CHANNEL_PROTOCOL_UUID, "sparvhba" },
62         { NULL_UUID_LE, NULL }
63 };
64
65 /* This is used to tell the visor bus driver which types of visor devices
66  * we support, and what functions to call when a visor device that we support
67  * is attached or removed.
68  */
69 static struct visor_driver visorhba_driver = {
70         .name = "visorhba",
71         .owner = THIS_MODULE,
72         .channel_types = visorhba_channel_types,
73         .probe = visorhba_probe,
74         .remove = visorhba_remove,
75         .pause = visorhba_pause,
76         .resume = visorhba_resume,
77         .channel_interrupt = NULL,
78 };
79 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
80 MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
81
82 struct visordisk_info {
83         u32 valid;
84         u32 channel, id, lun;   /* Disk Path */
85         atomic_t ios_threshold;
86         atomic_t error_count;
87         struct visordisk_info *next;
88 };
89
90 struct scsipending {
91         struct uiscmdrsp cmdrsp;
92         void *sent;             /* The Data being tracked */
93         char cmdtype;           /* Type of pointer that is being stored */
94 };
95
96 /* Each scsi_host has a host_data area that contains this struct. */
97 struct visorhba_devdata {
98         struct Scsi_Host *scsihost;
99         struct visor_device *dev;
100         struct list_head dev_info_list;
101         /* Tracks the requests that have been forwarded to
102          * the IOVM and haven't returned yet
103          */
104         struct scsipending pending[MAX_PENDING_REQUESTS];
105         /* Start search for next pending free slot here */
106         unsigned int nextinsert;
107         spinlock_t privlock; /* lock to protect data in devdata */
108         bool serverdown;
109         bool serverchangingstate;
110         unsigned long long acquire_failed_cnt;
111         unsigned long long interrupts_rcvd;
112         unsigned long long interrupts_notme;
113         unsigned long long interrupts_disabled;
114         u64 __iomem *flags_addr;
115         atomic_t interrupt_rcvd;
116         wait_queue_head_t rsp_queue;
117         struct visordisk_info head;
118         unsigned int max_buff_len;
119         int devnum;
120         struct task_struct *thread;
121         int thread_wait_ms;
122
123         /*
124          * allows us to pass int handles back-and-forth between us and
125          * iovm, instead of raw pointers
126          */
127         struct idr idr;
128
129         struct dentry *debugfs_dir;
130         struct dentry *debugfs_info;
131 };
132
133 struct visorhba_devices_open {
134         struct visorhba_devdata *devdata;
135 };
136
137 #define for_each_vdisk_match(iter, list, match)                   \
138         for (iter = &list->head; iter->next; iter = iter->next) \
139                 if ((iter->channel == match->channel) &&                  \
140                     (iter->id == match->id) &&                    \
141                     (iter->lun == match->lun))
142 /**
143  *      visor_thread_start - starts a thread for the device
144  *      @threadfn: Function the thread starts
145  *      @thrcontext: Context to pass to the thread, i.e. devdata
146  *      @name: string describing name of thread
147  *
148  *      Starts a thread for the device.
149  *
150  *      Return the task_struct * denoting the thread on success,
151  *             or NULL on failure
152  */
153 static struct task_struct *visor_thread_start
154 (int (*threadfn)(void *), void *thrcontext, char *name)
155 {
156         struct task_struct *task;
157
158         task = kthread_run(threadfn, thrcontext, "%s", name);
159         if (IS_ERR(task)) {
160                 pr_err("visorbus failed to start thread\n");
161                 return NULL;
162         }
163         return task;
164 }
165
166 /**
167  *      visor_thread_stop - stops the thread if it is running
168  */
169 static void visor_thread_stop(struct task_struct *task)
170 {
171         if (!task)
172                 return;  /* no thread running */
173         kthread_stop(task);
174 }
175
176 /**
177  *      add_scsipending_entry - save off io command that is pending in
178  *                              Service Partition
179  *      @devdata: Pointer to devdata
180  *      @cmdtype: Specifies the type of command pending
181  *      @new:   The command to be saved
182  *
183  *      Saves off the io command that is being handled by the Service
184  *      Partition so that it can be handled when it completes. If new is
185  *      NULL it is assumed the entry refers only to the cmdrsp.
186  *      Returns insert_location where entry was added,
187  *      -EBUSY if it can't
188  */
189 static int add_scsipending_entry(struct visorhba_devdata *devdata,
190                                  char cmdtype, void *new)
191 {
192         unsigned long flags;
193         struct scsipending *entry;
194         int insert_location;
195
196         spin_lock_irqsave(&devdata->privlock, flags);
197         insert_location = devdata->nextinsert;
198         while (devdata->pending[insert_location].sent) {
199                 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
200                 if (insert_location == (int)devdata->nextinsert) {
201                         spin_unlock_irqrestore(&devdata->privlock, flags);
202                         return -EBUSY;
203                 }
204         }
205
206         entry = &devdata->pending[insert_location];
207         memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
208         entry->cmdtype = cmdtype;
209         if (new)
210                 entry->sent = new;
211         else /* wants to send cmdrsp */
212                 entry->sent = &entry->cmdrsp;
213         devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
214         spin_unlock_irqrestore(&devdata->privlock, flags);
215
216         return insert_location;
217 }
218
219 /**
220  *      del_scsipending_enty - removes an entry from the pending array
221  *      @devdata: Device holding the pending array
222  *      @del: Entry to remove
223  *
224  *      Removes the entry pointed at by del and returns it.
225  *      Returns the scsipending entry pointed at
226  */
227 static void *del_scsipending_ent(struct visorhba_devdata *devdata,
228                                  int del)
229 {
230         unsigned long flags;
231         void *sent;
232
233         if (del >= MAX_PENDING_REQUESTS)
234                 return NULL;
235
236         spin_lock_irqsave(&devdata->privlock, flags);
237         sent = devdata->pending[del].sent;
238
239         devdata->pending[del].cmdtype = 0;
240         devdata->pending[del].sent = NULL;
241         spin_unlock_irqrestore(&devdata->privlock, flags);
242
243         return sent;
244 }
245
246 /**
247  *      get_scsipending_cmdrsp - return the cmdrsp stored in a pending entry
248  *      #ddata: Device holding the pending array
249  *      @ent: Entry that stores the cmdrsp
250  *
251  *      Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
252  *      if the "sent" field is not NULL
253  *      Returns a pointer to the cmdrsp.
254  */
255 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
256                                                 int ent)
257 {
258         if (ddata->pending[ent].sent)
259                 return &ddata->pending[ent].cmdrsp;
260
261         return NULL;
262 }
263
264 /**
265  *      simple_idr_get - associate a provided pointer with an int value
266  *                       1 <= value <= INT_MAX, and return this int value;
267  *                       the pointer value can be obtained later by passing
268  *                       this int value to idr_find()
269  *      @idrtable: the data object maintaining the pointer<-->int mappings
270  *      @p: the pointer value to be remembered
271  *      @lock: a spinlock used when exclusive access to idrtable is needed
272  */
273 static unsigned int simple_idr_get(struct idr *idrtable, void *p,
274                                    spinlock_t *lock)
275 {
276         int id;
277         unsigned long flags;
278
279         idr_preload(GFP_KERNEL);
280         spin_lock_irqsave(lock, flags);
281         id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
282         spin_unlock_irqrestore(lock, flags);
283         idr_preload_end();
284         if (id < 0)
285                 return 0;  /* failure */
286         return (unsigned int)(id);  /* idr_alloc() guarantees > 0 */
287 }
288
289 /**
290  *      setup_scsitaskmgmt_handles - stash the necessary handles so that the
291  *                                   completion processing logic for a taskmgmt
292  *                                   cmd will be able to find who to wake up
293  *                                   and where to stash the result
294  */
295 static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
296                                        struct uiscmdrsp *cmdrsp,
297                                        wait_queue_head_t *event, int *result)
298 {
299         /* specify the event that has to be triggered when this */
300         /* cmd is complete */
301         cmdrsp->scsitaskmgmt.notify_handle =
302                 simple_idr_get(idrtable, event, lock);
303         cmdrsp->scsitaskmgmt.notifyresult_handle =
304                 simple_idr_get(idrtable, result, lock);
305 }
306
307 /**
308  *      cleanup_scsitaskmgmt_handles - forget handles created by
309  *                                     setup_scsitaskmgmt_handles()
310  */
311 static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
312                                          struct uiscmdrsp *cmdrsp)
313 {
314         if (cmdrsp->scsitaskmgmt.notify_handle)
315                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
316         if (cmdrsp->scsitaskmgmt.notifyresult_handle)
317                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
318 }
319
320 /**
321  *      forward_taskmgmt_command - send taskmegmt command to the Service
322  *                                 Partition
323  *      @tasktype: Type of taskmgmt command
324  *      @scsidev: Scsidev that issued command
325  *
326  *      Create a cmdrsp packet and send it to the Serivce Partition
327  *      that will service this request.
328  *      Returns whether the command was queued successfully or not.
329  */
330 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
331                                     struct scsi_cmnd *scsicmd)
332 {
333         struct uiscmdrsp *cmdrsp;
334         struct scsi_device *scsidev = scsicmd->device;
335         struct visorhba_devdata *devdata =
336                 (struct visorhba_devdata *)scsidev->host->hostdata;
337         int notifyresult = 0xffff;
338         wait_queue_head_t notifyevent;
339         int scsicmd_id = 0;
340
341         if (devdata->serverdown || devdata->serverchangingstate)
342                 return FAILED;
343
344         scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
345                                            NULL);
346         if (scsicmd_id < 0)
347                 return FAILED;
348
349         cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
350
351         init_waitqueue_head(&notifyevent);
352
353         /* issue TASK_MGMT_ABORT_TASK */
354         cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
355         setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
356                                    &notifyevent, &notifyresult);
357
358         /* save destination */
359         cmdrsp->scsitaskmgmt.tasktype = tasktype;
360         cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
361         cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
362         cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
363         cmdrsp->scsitaskmgmt.handle = scsicmd_id;
364
365         dev_dbg(&scsidev->sdev_gendev,
366                 "visorhba: initiating type=%d taskmgmt command\n", tasktype);
367         if (!visorchannel_signalinsert(devdata->dev->visorchannel,
368                                        IOCHAN_TO_IOPART,
369                                        cmdrsp))
370                 goto err_del_scsipending_ent;
371
372         /* It can take the Service Partition up to 35 seconds to complete
373          * an IO in some cases, so wait 45 seconds and error out
374          */
375         if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
376                                 msecs_to_jiffies(45000)))
377                 goto err_del_scsipending_ent;
378
379         dev_dbg(&scsidev->sdev_gendev,
380                 "visorhba: taskmgmt type=%d success; result=0x%x\n",
381                  tasktype, notifyresult);
382         if (tasktype == TASK_MGMT_ABORT_TASK)
383                 scsicmd->result = DID_ABORT << 16;
384         else
385                 scsicmd->result = DID_RESET << 16;
386
387         scsicmd->scsi_done(scsicmd);
388         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
389         return SUCCESS;
390
391 err_del_scsipending_ent:
392         dev_dbg(&scsidev->sdev_gendev,
393                 "visorhba: taskmgmt type=%d not executed\n", tasktype);
394         del_scsipending_ent(devdata, scsicmd_id);
395         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
396         return FAILED;
397 }
398
399 /**
400  *      visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
401  *      @scsicmd: The scsicmd that needs aborted
402  *
403  *      Returns SUCCESS if inserted, failure otherwise
404  *
405  */
406 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
407 {
408         /* issue TASK_MGMT_ABORT_TASK */
409         struct scsi_device *scsidev;
410         struct visordisk_info *vdisk;
411         struct visorhba_devdata *devdata;
412
413         scsidev = scsicmd->device;
414         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
415         for_each_vdisk_match(vdisk, devdata, scsidev) {
416                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
417                         atomic_inc(&vdisk->error_count);
418                 else
419                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
420         }
421         return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd);
422 }
423
424 /**
425  *      visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
426  *      @scsicmd: The scsicmd that needs aborted
427  *
428  *      Returns SUCCESS if inserted, failure otherwise
429  */
430 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
431 {
432         /* issue TASK_MGMT_LUN_RESET */
433         struct scsi_device *scsidev;
434         struct visordisk_info *vdisk;
435         struct visorhba_devdata *devdata;
436
437         scsidev = scsicmd->device;
438         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
439         for_each_vdisk_match(vdisk, devdata, scsidev) {
440                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
441                         atomic_inc(&vdisk->error_count);
442                 else
443                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
444         }
445         return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd);
446 }
447
448 /**
449  *      visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
450  *                                   target on the bus
451  *      @scsicmd: The scsicmd that needs aborted
452  *
453  *      Returns SUCCESS
454  */
455 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
456 {
457         struct scsi_device *scsidev;
458         struct visordisk_info *vdisk;
459         struct visorhba_devdata *devdata;
460
461         scsidev = scsicmd->device;
462         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
463         for_each_vdisk_match(vdisk, devdata, scsidev) {
464                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
465                         atomic_inc(&vdisk->error_count);
466                 else
467                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
468         }
469         return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd);
470 }
471
472 /**
473  *      visorhba_host_reset_handler - Not supported
474  *      @scsicmd: The scsicmd that needs aborted
475  *
476  *      Not supported, return SUCCESS
477  *      Returns SUCCESS
478  */
479 static int
480 visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
481 {
482         /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
483         return SUCCESS;
484 }
485
486 /**
487  *      visorhba_get_info
488  *      @shp: Scsi host that is requesting information
489  *
490  *      Returns string with info
491  */
492 static const char *visorhba_get_info(struct Scsi_Host *shp)
493 {
494         /* Return version string */
495         return "visorhba";
496 }
497
498 /**
499  *      visorhba_queue_command_lck -- queues command to the Service Partition
500  *      @scsicmd: Command to be queued
501  *      @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
502  *
503  *      Queues to scsicmd to the ServicePartition after converting it to a
504  *      uiscmdrsp structure.
505  *
506  *      Returns success if queued to the Service Partition, otherwise
507  *      failure.
508  */
509 static int
510 visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
511                            void (*visorhba_cmnd_done)(struct scsi_cmnd *))
512 {
513         struct uiscmdrsp *cmdrsp;
514         struct scsi_device *scsidev = scsicmd->device;
515         int insert_location;
516         unsigned char *cdb = scsicmd->cmnd;
517         struct Scsi_Host *scsihost = scsidev->host;
518         unsigned int i;
519         struct visorhba_devdata *devdata =
520                 (struct visorhba_devdata *)scsihost->hostdata;
521         struct scatterlist *sg = NULL;
522         struct scatterlist *sglist = NULL;
523
524         if (devdata->serverdown || devdata->serverchangingstate)
525                 return SCSI_MLQUEUE_DEVICE_BUSY;
526
527         insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
528                                                 (void *)scsicmd);
529
530         if (insert_location < 0)
531                 return SCSI_MLQUEUE_DEVICE_BUSY;
532
533         cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
534
535         cmdrsp->cmdtype = CMD_SCSI_TYPE;
536         /* save the pending insertion location. Deletion from pending
537          * will return the scsicmd pointer for completion
538          */
539         cmdrsp->scsi.handle = insert_location;
540
541         /* save done function that we have call when cmd is complete */
542         scsicmd->scsi_done = visorhba_cmnd_done;
543         /* save destination */
544         cmdrsp->scsi.vdest.channel = scsidev->channel;
545         cmdrsp->scsi.vdest.id = scsidev->id;
546         cmdrsp->scsi.vdest.lun = scsidev->lun;
547         /* save datadir */
548         cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
549         memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
550
551         cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
552
553         /* keep track of the max buffer length so far. */
554         if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
555                 devdata->max_buff_len = cmdrsp->scsi.bufflen;
556
557         if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
558                 goto err_del_scsipending_ent;
559
560         /* convert buffer to phys information  */
561         /* buffer is scatterlist - copy it out */
562         sglist = scsi_sglist(scsicmd);
563
564         for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
565                 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
566                 cmdrsp->scsi.gpi_list[i].length = sg->length;
567         }
568         cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
569
570         if (!visorchannel_signalinsert(devdata->dev->visorchannel,
571                                        IOCHAN_TO_IOPART,
572                                        cmdrsp))
573                 /* queue must be full and we aren't going to wait */
574                 goto err_del_scsipending_ent;
575
576         return 0;
577
578 err_del_scsipending_ent:
579         del_scsipending_ent(devdata, insert_location);
580         return SCSI_MLQUEUE_DEVICE_BUSY;
581 }
582
583 /**
584  *      visorhba_slave_alloc - called when new disk is discovered
585  *      @scsidev: New disk
586  *
587  *      Create a new visordisk_info structure and add it to our
588  *      list of vdisks.
589  *
590  *      Returns success when created, otherwise error.
591  */
592 static int visorhba_slave_alloc(struct scsi_device *scsidev)
593 {
594         /* this is called by the midlayer before scan for new devices --
595          * LLD can alloc any struct & do init if needed.
596          */
597         struct visordisk_info *vdisk;
598         struct visordisk_info *tmpvdisk;
599         struct visorhba_devdata *devdata;
600         struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
601
602         devdata = (struct visorhba_devdata *)scsihost->hostdata;
603         if (!devdata)
604                 return 0; /* even though we errored, treat as success */
605
606         for_each_vdisk_match(vdisk, devdata, scsidev)
607                 return 0; /* already allocated return success */
608
609         tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
610         if (!tmpvdisk)
611                 return -ENOMEM;
612
613         tmpvdisk->channel = scsidev->channel;
614         tmpvdisk->id = scsidev->id;
615         tmpvdisk->lun = scsidev->lun;
616         vdisk->next = tmpvdisk;
617         return 0;
618 }
619
620 /**
621  *      visorhba_slave_destroy - disk is going away
622  *      @scsidev: scsi device going away
623  *
624  *      Disk is going away, clean up resources.
625  *      Returns void.
626  */
627 static void visorhba_slave_destroy(struct scsi_device *scsidev)
628 {
629         /* midlevel calls this after device has been quiesced and
630          * before it is to be deleted.
631          */
632         struct visordisk_info *vdisk, *delvdisk;
633         struct visorhba_devdata *devdata;
634         struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
635
636         devdata = (struct visorhba_devdata *)scsihost->hostdata;
637         for_each_vdisk_match(vdisk, devdata, scsidev) {
638                 delvdisk = vdisk->next;
639                 vdisk->next = delvdisk->next;
640                 kfree(delvdisk);
641                 return;
642         }
643 }
644
645 static struct scsi_host_template visorhba_driver_template = {
646         .name = "Unisys Visor HBA",
647         .info = visorhba_get_info,
648         .queuecommand = visorhba_queue_command,
649         .eh_abort_handler = visorhba_abort_handler,
650         .eh_device_reset_handler = visorhba_device_reset_handler,
651         .eh_bus_reset_handler = visorhba_bus_reset_handler,
652         .eh_host_reset_handler = visorhba_host_reset_handler,
653         .shost_attrs = NULL,
654 #define visorhba_MAX_CMNDS 128
655         .can_queue = visorhba_MAX_CMNDS,
656         .sg_tablesize = 64,
657         .this_id = -1,
658         .slave_alloc = visorhba_slave_alloc,
659         .slave_destroy = visorhba_slave_destroy,
660         .use_clustering = ENABLE_CLUSTERING,
661 };
662
663 /**
664  *      info_debugfs_show - debugfs interface to dump visorhba states
665  *
666  *      This presents a file in the debugfs tree named:
667  *          /visorhba/vbus<x>:dev<y>/info
668  */
669 static int info_debugfs_show(struct seq_file *seq, void *v)
670 {
671         struct visorhba_devdata *devdata = seq->private;
672
673         seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
674         seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
675         seq_printf(seq, "interrupts_disabled = %llu\n",
676                    devdata->interrupts_disabled);
677         seq_printf(seq, "interrupts_notme = %llu\n",
678                    devdata->interrupts_notme);
679         seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
680         if (devdata->flags_addr) {
681                 u64 phys_flags_addr =
682                         virt_to_phys((__force  void *)devdata->flags_addr);
683                 seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
684                            phys_flags_addr);
685                 seq_printf(seq, "FeatureFlags = %llu\n",
686                            (__le64)readq(devdata->flags_addr));
687         }
688         seq_printf(seq, "acquire_failed_cnt = %llu\n",
689                    devdata->acquire_failed_cnt);
690
691         return 0;
692 }
693
694 static int info_debugfs_open(struct inode *inode, struct file *file)
695 {
696         return single_open(file, info_debugfs_show, inode->i_private);
697 }
698
699 static const struct file_operations info_debugfs_fops = {
700         .owner = THIS_MODULE,
701         .open = info_debugfs_open,
702         .read = seq_read,
703         .llseek = seq_lseek,
704         .release = single_release,
705 };
706
707 /**
708  *      complete_taskmgmt_command - complete task management
709  *      @cmdrsp: Response from the IOVM
710  *
711  *      Service Partition returned the result of the task management
712  *      command. Wake up anyone waiting for it.
713  *      Returns void
714  */
715 static inline void complete_taskmgmt_command
716 (struct idr *idrtable, struct uiscmdrsp *cmdrsp, int result)
717 {
718         wait_queue_head_t *wq =
719                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
720         int *scsi_result_ptr =
721                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
722
723         if (unlikely(!(wq && scsi_result_ptr))) {
724                 pr_err("visorhba: no completion context; cmd will time out\n");
725                 return;
726         }
727
728         /* copy the result of the taskmgmt and
729          * wake up the error handler that is waiting for this
730          */
731         pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
732         *scsi_result_ptr = result;
733         wake_up_all(wq);
734 }
735
736 /**
737  *      visorhba_serverdown_complete - Called when we are done cleaning up
738  *                                     from serverdown
739  *      @work: work structure for this serverdown request
740  *
741  *      Called when we are done cleanning up from serverdown, stop processing
742  *      queue, fail pending IOs.
743  *      Returns void when finished cleaning up
744  */
745 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
746 {
747         int i;
748         struct scsipending *pendingdel = NULL;
749         struct scsi_cmnd *scsicmd = NULL;
750         struct uiscmdrsp *cmdrsp;
751         unsigned long flags;
752
753         /* Stop using the IOVM response queue (queue should be drained
754          * by the end)
755          */
756         visor_thread_stop(devdata->thread);
757
758         /* Fail commands that weren't completed */
759         spin_lock_irqsave(&devdata->privlock, flags);
760         for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
761                 pendingdel = &devdata->pending[i];
762                 switch (pendingdel->cmdtype) {
763                 case CMD_SCSI_TYPE:
764                         scsicmd = pendingdel->sent;
765                         scsicmd->result = DID_RESET << 16;
766                         if (scsicmd->scsi_done)
767                                 scsicmd->scsi_done(scsicmd);
768                         break;
769                 case CMD_SCSITASKMGMT_TYPE:
770                         cmdrsp = pendingdel->sent;
771                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
772                                                   TASK_MGMT_FAILED);
773                         break;
774                 default:
775                         break;
776                 }
777                 pendingdel->cmdtype = 0;
778                 pendingdel->sent = NULL;
779         }
780         spin_unlock_irqrestore(&devdata->privlock, flags);
781
782         devdata->serverdown = true;
783         devdata->serverchangingstate = false;
784 }
785
786 /**
787  *      visorhba_serverdown - Got notified that the IOVM is down
788  *      @devdata: visorhba that is being serviced by downed IOVM.
789  *
790  *      Something happened to the IOVM, return immediately and
791  *      schedule work cleanup work.
792  *      Return SUCCESS or EINVAL
793  */
794 static int visorhba_serverdown(struct visorhba_devdata *devdata)
795 {
796         if (!devdata->serverdown && !devdata->serverchangingstate) {
797                 devdata->serverchangingstate = true;
798                 visorhba_serverdown_complete(devdata);
799         } else if (devdata->serverchangingstate) {
800                 return -EINVAL;
801         }
802         return 0;
803 }
804
805 /**
806  *      do_scsi_linuxstat - scsi command returned linuxstat
807  *      @cmdrsp: response from IOVM
808  *      @scsicmd: Command issued.
809  *
810  *      Don't log errors for disk-not-present inquiries
811  *      Returns void
812  */
813 static void
814 do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
815 {
816         struct visorhba_devdata *devdata;
817         struct visordisk_info *vdisk;
818         struct scsi_device *scsidev;
819
820         scsidev = scsicmd->device;
821         memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
822
823         /* Do not log errors for disk-not-present inquiries */
824         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
825             (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
826             (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
827                 return;
828         /* Okay see what our error_count is here.... */
829         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
830         for_each_vdisk_match(vdisk, devdata, scsidev) {
831                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
832                         atomic_inc(&vdisk->error_count);
833                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
834                 }
835         }
836 }
837
838 static int set_no_disk_inquiry_result(unsigned char *buf,
839                                       size_t len, bool is_lun0)
840 {
841         if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN)
842                 return -EINVAL;
843         memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
844         buf[2] = SCSI_SPC2_VER;
845         if (is_lun0) {
846                 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
847                 buf[3] = DEV_HISUPPORT;
848         } else {
849                 buf[0] = DEV_NOT_CAPABLE;
850         }
851         buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
852         strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
853         return 0;
854 }
855
856 /**
857  *      do_scsi_nolinuxstat - scsi command didn't have linuxstat
858  *      @cmdrsp: response from IOVM
859  *      @scsicmd: Command issued.
860  *
861  *      Handle response when no linuxstat was returned
862  *      Returns void
863  */
864 static void
865 do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
866 {
867         struct scsi_device *scsidev;
868         unsigned char buf[36];
869         struct scatterlist *sg;
870         unsigned int i;
871         char *this_page;
872         char *this_page_orig;
873         int bufind = 0;
874         struct visordisk_info *vdisk;
875         struct visorhba_devdata *devdata;
876
877         scsidev = scsicmd->device;
878         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
879             (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
880                 if (cmdrsp->scsi.no_disk_result == 0)
881                         return;
882
883                 /* Linux scsi code wants a device at Lun 0
884                  * to issue report luns, but we don't want
885                  * a disk there so we'll present a processor
886                  * there.
887                  */
888                 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
889                                            scsidev->lun == 0);
890
891                 if (scsi_sg_count(scsicmd) == 0) {
892                         memcpy(scsi_sglist(scsicmd), buf,
893                                cmdrsp->scsi.bufflen);
894                         return;
895                 }
896
897                 sg = scsi_sglist(scsicmd);
898                 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
899                         this_page_orig = kmap_atomic(sg_page(sg + i));
900                         this_page = (void *)((unsigned long)this_page_orig |
901                                              sg[i].offset);
902                         memcpy(this_page, buf + bufind, sg[i].length);
903                         kunmap_atomic(this_page_orig);
904                 }
905         } else {
906                 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
907                 for_each_vdisk_match(vdisk, devdata, scsidev) {
908                         if (atomic_read(&vdisk->ios_threshold) > 0) {
909                                 atomic_dec(&vdisk->ios_threshold);
910                                 if (atomic_read(&vdisk->ios_threshold) == 0)
911                                         atomic_set(&vdisk->error_count, 0);
912                         }
913                 }
914         }
915 }
916
917 /**
918  *      complete_scsi_command - complete a scsi command
919  *      @uiscmdrsp: Response from Service Partition
920  *      @scsicmd: The scsi command
921  *
922  *      Response returned by the Service Partition, finish it and send
923  *      completion to the scsi midlayer.
924  *      Returns void.
925  */
926 static void
927 complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
928 {
929         /* take what we need out of cmdrsp and complete the scsicmd */
930         scsicmd->result = cmdrsp->scsi.linuxstat;
931         if (cmdrsp->scsi.linuxstat)
932                 do_scsi_linuxstat(cmdrsp, scsicmd);
933         else
934                 do_scsi_nolinuxstat(cmdrsp, scsicmd);
935
936         scsicmd->scsi_done(scsicmd);
937 }
938
939 /**
940  *      drain_queue - pull responses out of iochannel
941  *      @cmdrsp: Response from the IOSP
942  *      @devdata: device that owns this iochannel
943  *
944  *      Pulls responses out of the iochannel and process the responses.
945  *      Restuns void
946  */
947 static void
948 drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
949 {
950         struct scsi_cmnd *scsicmd;
951
952         while (1) {
953                 if (!visorchannel_signalremove(devdata->dev->visorchannel,
954                                                IOCHAN_FROM_IOPART,
955                                                cmdrsp))
956                         break; /* queue empty */
957
958                 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
959                         /* scsicmd location is returned by the
960                          * deletion
961                          */
962                         scsicmd = del_scsipending_ent(devdata,
963                                                       cmdrsp->scsi.handle);
964                         if (!scsicmd)
965                                 break;
966                         /* complete the orig cmd */
967                         complete_scsi_command(cmdrsp, scsicmd);
968                 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
969                         if (!del_scsipending_ent(devdata,
970                                                  cmdrsp->scsitaskmgmt.handle))
971                                 break;
972                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
973                                                   cmdrsp->scsitaskmgmt.result);
974                 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
975                         dev_err_once(&devdata->dev->device,
976                                      "ignoring unsupported NOTIFYGUEST\n");
977                 /* cmdrsp is now available for re-use */
978         }
979 }
980
981 /**
982  *      process_incoming_rsps - Process responses from IOSP
983  *      @v: void pointer to visorhba_devdata
984  *
985  *      Main function for the thread that processes the responses
986  *      from the IO Service Partition. When the queue is empty, wait
987  *      to check to see if it is full again.
988  */
989 static int process_incoming_rsps(void *v)
990 {
991         struct visorhba_devdata *devdata = v;
992         struct uiscmdrsp *cmdrsp = NULL;
993         const int size = sizeof(*cmdrsp);
994
995         cmdrsp = kmalloc(size, GFP_ATOMIC);
996         if (!cmdrsp)
997                 return -ENOMEM;
998
999         while (1) {
1000                 if (kthread_should_stop())
1001                         break;
1002                 wait_event_interruptible_timeout(
1003                         devdata->rsp_queue, (atomic_read(
1004                                              &devdata->interrupt_rcvd) == 1),
1005                                 msecs_to_jiffies(devdata->thread_wait_ms));
1006                 /* drain queue */
1007                 drain_queue(cmdrsp, devdata);
1008         }
1009         kfree(cmdrsp);
1010         return 0;
1011 }
1012
1013 /**
1014  *      visorhba_pause - function to handle visorbus pause messages
1015  *      @dev: device that is pausing.
1016  *      @complete_func: function to call when finished
1017  *
1018  *      Something has happened to the IO Service Partition that is
1019  *      handling this device. Quiet this device and reset commands
1020  *      so that the Service Partition can be corrected.
1021  *      Returns SUCCESS
1022  */
1023 static int visorhba_pause(struct visor_device *dev,
1024                           visorbus_state_complete_func complete_func)
1025 {
1026         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1027
1028         visorhba_serverdown(devdata);
1029         complete_func(dev, 0);
1030         return 0;
1031 }
1032
1033 /**
1034  *      visorhba_resume - function called when the IO Service Partition is back
1035  *      @dev: device that is pausing.
1036  *      @complete_func: function to call when finished
1037  *
1038  *      Yay! The IO Service Partition is back, the channel has been wiped
1039  *      so lets re-establish connection and start processing responses.
1040  *      Returns 0 on success, error on failure.
1041  */
1042 static int visorhba_resume(struct visor_device *dev,
1043                            visorbus_state_complete_func complete_func)
1044 {
1045         struct visorhba_devdata *devdata;
1046
1047         devdata = dev_get_drvdata(&dev->device);
1048         if (!devdata)
1049                 return -EINVAL;
1050
1051         if (devdata->serverdown && !devdata->serverchangingstate)
1052                 devdata->serverchangingstate = true;
1053
1054         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1055                                              "vhba_incming");
1056
1057         devdata->serverdown = false;
1058         devdata->serverchangingstate = false;
1059
1060         return 0;
1061 }
1062
1063 /**
1064  *      visorhba_probe - device has been discovered, do acquire
1065  *      @dev: visor_device that was discovered
1066  *
1067  *      A new HBA was discovered, do the initial connections of it.
1068  *      Return 0 on success, otherwise error.
1069  */
1070 static int visorhba_probe(struct visor_device *dev)
1071 {
1072         struct Scsi_Host *scsihost;
1073         struct vhba_config_max max;
1074         struct visorhba_devdata *devdata = NULL;
1075         int err, channel_offset;
1076         u64 features;
1077
1078         scsihost = scsi_host_alloc(&visorhba_driver_template,
1079                                    sizeof(*devdata));
1080         if (!scsihost)
1081                 return -ENODEV;
1082
1083         channel_offset = offsetof(struct spar_io_channel_protocol,
1084                                   vhba.max);
1085         err = visorbus_read_channel(dev, channel_offset, &max,
1086                                     sizeof(struct vhba_config_max));
1087         if (err < 0)
1088                 goto err_scsi_host_put;
1089
1090         scsihost->max_id = (unsigned int)max.max_id;
1091         scsihost->max_lun = (unsigned int)max.max_lun;
1092         scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1093         scsihost->max_sectors =
1094             (unsigned short)(max.max_io_size >> 9);
1095         scsihost->sg_tablesize =
1096             (unsigned short)(max.max_io_size / PAGE_SIZE);
1097         if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1098                 scsihost->sg_tablesize = MAX_PHYS_INFO;
1099         err = scsi_add_host(scsihost, &dev->device);
1100         if (err < 0)
1101                 goto err_scsi_host_put;
1102
1103         devdata = (struct visorhba_devdata *)scsihost->hostdata;
1104         devdata->dev = dev;
1105         dev_set_drvdata(&dev->device, devdata);
1106
1107         devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1108                                                   visorhba_debugfs_dir);
1109         if (!devdata->debugfs_dir) {
1110                 err = -ENOMEM;
1111                 goto err_scsi_remove_host;
1112         }
1113         devdata->debugfs_info =
1114                 debugfs_create_file("info", S_IRUSR | S_IRGRP,
1115                                     devdata->debugfs_dir, devdata,
1116                                     &info_debugfs_fops);
1117         if (!devdata->debugfs_info) {
1118                 err = -ENOMEM;
1119                 goto err_debugfs_dir;
1120         }
1121
1122         init_waitqueue_head(&devdata->rsp_queue);
1123         spin_lock_init(&devdata->privlock);
1124         devdata->serverdown = false;
1125         devdata->serverchangingstate = false;
1126         devdata->scsihost = scsihost;
1127
1128         channel_offset = offsetof(struct spar_io_channel_protocol,
1129                                   channel_header.features);
1130         err = visorbus_read_channel(dev, channel_offset, &features, 8);
1131         if (err)
1132                 goto err_debugfs_info;
1133         features |= ULTRA_IO_CHANNEL_IS_POLLING;
1134         err = visorbus_write_channel(dev, channel_offset, &features, 8);
1135         if (err)
1136                 goto err_debugfs_info;
1137
1138         idr_init(&devdata->idr);
1139
1140         devdata->thread_wait_ms = 2;
1141         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1142                                              "vhba_incoming");
1143
1144         scsi_scan_host(scsihost);
1145
1146         return 0;
1147
1148 err_debugfs_info:
1149         debugfs_remove(devdata->debugfs_info);
1150
1151 err_debugfs_dir:
1152         debugfs_remove_recursive(devdata->debugfs_dir);
1153
1154 err_scsi_remove_host:
1155         scsi_remove_host(scsihost);
1156
1157 err_scsi_host_put:
1158         scsi_host_put(scsihost);
1159         return err;
1160 }
1161
1162 /**
1163  *      visorhba_remove - remove a visorhba device
1164  *      @dev: Device to remove
1165  *
1166  *      Removes the visorhba device.
1167  *      Returns void.
1168  */
1169 static void visorhba_remove(struct visor_device *dev)
1170 {
1171         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1172         struct Scsi_Host *scsihost = NULL;
1173
1174         if (!devdata)
1175                 return;
1176
1177         scsihost = devdata->scsihost;
1178         visor_thread_stop(devdata->thread);
1179         scsi_remove_host(scsihost);
1180         scsi_host_put(scsihost);
1181
1182         idr_destroy(&devdata->idr);
1183
1184         dev_set_drvdata(&dev->device, NULL);
1185         debugfs_remove(devdata->debugfs_info);
1186         debugfs_remove_recursive(devdata->debugfs_dir);
1187 }
1188
1189 /**
1190  *      visorhba_init           - driver init routine
1191  *
1192  *      Initialize the visorhba driver and register it with visorbus
1193  *      to handle s-Par virtual host bus adapter.
1194  */
1195 static int visorhba_init(void)
1196 {
1197         int rc = -ENOMEM;
1198
1199         visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1200         if (!visorhba_debugfs_dir)
1201                 return -ENOMEM;
1202
1203         rc = visorbus_register_visor_driver(&visorhba_driver);
1204         if (rc)
1205                 goto cleanup_debugfs;
1206
1207         return 0;
1208
1209 cleanup_debugfs:
1210         debugfs_remove_recursive(visorhba_debugfs_dir);
1211
1212         return rc;
1213 }
1214
1215 /**
1216  *      visorhba_cleanup        - driver exit routine
1217  *
1218  *      Unregister driver from the bus and free up memory.
1219  */
1220 static void visorhba_exit(void)
1221 {
1222         visorbus_unregister_visor_driver(&visorhba_driver);
1223         debugfs_remove_recursive(visorhba_debugfs_dir);
1224 }
1225
1226 module_init(visorhba_init);
1227 module_exit(visorhba_exit);
1228
1229 MODULE_AUTHOR("Unisys");
1230 MODULE_LICENSE("GPL");
1231 MODULE_DESCRIPTION("s-Par hba driver");