4 * Copyright (C) 1999-2002 Red Hat Software
6 * Written by Alan Cox, Building Number Three Ltd
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * For the purpose of avoiding doubt the preferred form of the work
19 * for making modifications shall be a standards compliant form such
20 * gzipped tar and not one requiring a proprietary or patent encumbered
25 * Multiple device handling error fixes,
26 * Added a queue depth.
28 * FC920 has an rmw bug. Dont or in the end marker.
29 * Removed queue walk, fixed for 64bitness.
30 * Rewrote much of the code over time
31 * Added indirect block lists
32 * Handle 64K limits on many controllers
33 * Don't use indirects on the Promise (breaks)
34 * Heavily chop down the queue depths
36 * Independent queues per IOP
37 * Support for dynamic device creation/deletion
39 * Support for larger I/Os through merge* functions
40 * (taken from DAC960 driver)
41 * Boji T Kannanthanam:
42 * Set the I2O Block devices to be detected in increasing
43 * order of TIDs during boot.
44 * Search and set the I2O block device that we boot off
45 * from as the first device to be claimed (as /dev/i2o/hda)
46 * Properly attach/detach I2O gendisk structure from the
47 * system gendisk list. The I2O block devices now appear in
49 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
50 * Minor bugfixes for 2.6.
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/i2o.h>
56 #include <linux/smp_lock.h>
58 #include <linux/mempool.h>
60 #include <linux/genhd.h>
61 #include <linux/blkdev.h>
62 #include <linux/hdreg.h>
64 #include <scsi/scsi.h>
66 #include "i2o_block.h"
68 #define OSM_NAME "block-osm"
69 #define OSM_VERSION "1.325"
70 #define OSM_DESCRIPTION "I2O Block Device OSM"
72 static struct i2o_driver i2o_block_driver;
74 /* global Block OSM request mempool */
75 static struct i2o_block_mempool i2o_blk_req_pool;
77 /* Block OSM class handling definition */
78 static struct i2o_class_id i2o_block_class_id[] = {
79 {I2O_CLASS_RANDOM_BLOCK_STORAGE},
84 * i2o_block_device_free - free the memory of the I2O Block device
85 * @dev: I2O Block device, which should be cleaned up
87 * Frees the request queue, gendisk and the i2o_block_device structure.
89 static void i2o_block_device_free(struct i2o_block_device *dev)
91 blk_cleanup_queue(dev->gd->queue);
99 * i2o_block_remove - remove the I2O Block device from the system again
100 * @dev: I2O Block device which should be removed
102 * Remove gendisk from system and free all allocated memory.
106 static int i2o_block_remove(struct device *dev)
108 struct i2o_device *i2o_dev = to_i2o_device(dev);
109 struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
111 osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid,
112 i2o_blk_dev->gd->disk_name);
114 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
116 del_gendisk(i2o_blk_dev->gd);
118 dev_set_drvdata(dev, NULL);
120 i2o_device_claim_release(i2o_dev);
122 i2o_block_device_free(i2o_blk_dev);
128 * i2o_block_device flush - Flush all dirty data of I2O device dev
129 * @dev: I2O device which should be flushed
131 * Flushes all dirty data on device dev.
133 * Returns 0 on success or negative error code on failure.
135 static int i2o_block_device_flush(struct i2o_device *dev)
137 struct i2o_message *msg;
139 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
143 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
145 cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->
147 msg->body[0] = cpu_to_le32(60 << 16);
148 osm_debug("Flushing...\n");
150 return i2o_msg_post_wait(dev->iop, msg, 60);
154 * i2o_block_device_mount - Mount (load) the media of device dev
155 * @dev: I2O device which should receive the mount request
156 * @media_id: Media Identifier
158 * Load a media into drive. Identifier should be set to -1, because the
159 * spec does not support any other value.
161 * Returns 0 on success or negative error code on failure.
163 static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
165 struct i2o_message *msg;
167 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
171 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
173 cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->
175 msg->body[0] = cpu_to_le32(-1);
176 msg->body[1] = cpu_to_le32(0x00000000);
177 osm_debug("Mounting...\n");
179 return i2o_msg_post_wait(dev->iop, msg, 2);
183 * i2o_block_device_lock - Locks the media of device dev
184 * @dev: I2O device which should receive the lock request
185 * @media_id: Media Identifier
187 * Lock media of device dev to prevent removal. The media identifier
188 * should be set to -1, because the spec does not support any other value.
190 * Returns 0 on success or negative error code on failure.
192 static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
194 struct i2o_message *msg;
196 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
200 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
202 cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->
204 msg->body[0] = cpu_to_le32(-1);
205 osm_debug("Locking...\n");
207 return i2o_msg_post_wait(dev->iop, msg, 2);
211 * i2o_block_device_unlock - Unlocks the media of device dev
212 * @dev: I2O device which should receive the unlocked request
213 * @media_id: Media Identifier
215 * Unlocks the media in device dev. The media identifier should be set to
216 * -1, because the spec does not support any other value.
218 * Returns 0 on success or negative error code on failure.
220 static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
222 struct i2o_message *msg;
224 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
228 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
230 cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->
232 msg->body[0] = cpu_to_le32(media_id);
233 osm_debug("Unlocking...\n");
235 return i2o_msg_post_wait(dev->iop, msg, 2);
239 * i2o_block_device_power - Power management for device dev
240 * @dev: I2O device which should receive the power management request
241 * @op: Operation to send
243 * Send a power management request to the device dev.
245 * Returns 0 on success or negative error code on failure.
247 static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
249 struct i2o_device *i2o_dev = dev->i2o_dev;
250 struct i2o_controller *c = i2o_dev->iop;
251 struct i2o_message *msg;
254 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
258 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
260 cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->
262 msg->body[0] = cpu_to_le32(op << 24);
263 osm_debug("Power...\n");
265 rc = i2o_msg_post_wait(c, msg, 60);
273 * i2o_block_request_alloc - Allocate an I2O block request struct
275 * Allocates an I2O block request struct and initialize the list.
277 * Returns a i2o_block_request pointer on success or negative error code
280 static inline struct i2o_block_request *i2o_block_request_alloc(void)
282 struct i2o_block_request *ireq;
284 ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC);
286 return ERR_PTR(-ENOMEM);
288 INIT_LIST_HEAD(&ireq->queue);
289 sg_init_table(ireq->sg_table, I2O_MAX_PHYS_SEGMENTS);
295 * i2o_block_request_free - Frees a I2O block request
296 * @ireq: I2O block request which should be freed
298 * Frees the allocated memory (give it back to the request mempool).
300 static inline void i2o_block_request_free(struct i2o_block_request *ireq)
302 mempool_free(ireq, i2o_blk_req_pool.pool);
306 * i2o_block_sglist_alloc - Allocate the SG list and map it
307 * @c: I2O controller to which the request belongs
308 * @ireq: I2O block request
309 * @mptr: message body pointer
311 * Builds the SG list and map it to be accessable by the controller.
313 * Returns 0 on failure or 1 on success.
315 static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
316 struct i2o_block_request *ireq,
320 enum dma_data_direction direction;
322 ireq->dev = &c->pdev->dev;
323 nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
325 if (rq_data_dir(ireq->req) == READ)
326 direction = PCI_DMA_FROMDEVICE;
328 direction = PCI_DMA_TODEVICE;
330 ireq->sg_nents = nents;
332 return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr);
336 * i2o_block_sglist_free - Frees the SG list
337 * @ireq: I2O block request from which the SG should be freed
339 * Frees the SG list from the I2O block request.
341 static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
343 enum dma_data_direction direction;
345 if (rq_data_dir(ireq->req) == READ)
346 direction = PCI_DMA_FROMDEVICE;
348 direction = PCI_DMA_TODEVICE;
350 dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction);
354 * i2o_block_prep_req_fn - Allocates I2O block device specific struct
355 * @q: request queue for the request
356 * @req: the request to prepare
358 * Allocate the necessary i2o_block_request struct and connect it to
359 * the request. This is needed that we not lose the SG list later on.
361 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
363 static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
365 struct i2o_block_device *i2o_blk_dev = q->queuedata;
366 struct i2o_block_request *ireq;
368 if (unlikely(!i2o_blk_dev)) {
369 osm_err("block device already removed\n");
373 /* connect the i2o_block_request to the request */
375 ireq = i2o_block_request_alloc();
377 osm_debug("unable to allocate i2o_block_request!\n");
378 return BLKPREP_DEFER;
381 ireq->i2o_blk_dev = i2o_blk_dev;
385 /* do not come back here */
386 req->cmd_flags |= REQ_DONTPREP;
392 * i2o_block_delayed_request_fn - delayed request queue function
393 * @work: the delayed request with the queue to start
395 * If the request queue is stopped for a disk, and there is no open
396 * request, a new event is created, which calls this function to start
397 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
400 static void i2o_block_delayed_request_fn(struct work_struct *work)
402 struct i2o_block_delayed_request *dreq =
403 container_of(work, struct i2o_block_delayed_request,
405 struct request_queue *q = dreq->queue;
408 spin_lock_irqsave(q->queue_lock, flags);
410 spin_unlock_irqrestore(q->queue_lock, flags);
415 * i2o_block_end_request - Post-processing of completed commands
416 * @req: request which should be completed
417 * @error: 0 for success, < 0 for error
418 * @nr_bytes: number of bytes to complete
420 * Mark the request as complete. The lock must not be held when entering.
423 static void i2o_block_end_request(struct request *req, int error,
426 struct i2o_block_request *ireq = req->special;
427 struct i2o_block_device *dev = ireq->i2o_blk_dev;
428 struct request_queue *q = req->q;
431 if (blk_end_request(req, error, nr_bytes))
433 blk_end_request_all(req, -EIO);
435 spin_lock_irqsave(q->queue_lock, flags);
438 dev->open_queue_depth--;
439 list_del(&ireq->queue);
444 spin_unlock_irqrestore(q->queue_lock, flags);
446 i2o_block_sglist_free(ireq);
447 i2o_block_request_free(ireq);
451 * i2o_block_reply - Block OSM reply handler.
452 * @c: I2O controller from which the message arrives
453 * @m: message id of reply
454 * @msg: the actual I2O message reply
456 * This function gets all the message replies.
459 static int i2o_block_reply(struct i2o_controller *c, u32 m,
460 struct i2o_message *msg)
465 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
466 if (unlikely(!req)) {
467 osm_err("NULL reply received!\n");
472 * Lets see what is cooking. We stuffed the
473 * request in the context.
476 if ((le32_to_cpu(msg->body[0]) >> 24) != 0) {
477 u32 status = le32_to_cpu(msg->body[0]);
479 * Device not ready means two things. One is that the
480 * the thing went offline (but not a removal media)
482 * The second is that you have a SuperTrak 100 and the
483 * firmware got constipated. Unlike standard i2o card
484 * setups the supertrak returns an error rather than
485 * blocking for the timeout in these cases.
487 * Don't stick a supertrak100 into cache aggressive modes
490 osm_err("TID %03x error status: 0x%02x, detailed status: "
491 "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff),
492 status >> 24, status & 0xffff);
499 i2o_block_end_request(req, error, le32_to_cpu(msg->body[1]));
504 static void i2o_block_event(struct work_struct *work)
506 struct i2o_event *evt = container_of(work, struct i2o_event, work);
507 osm_debug("event received\n");
512 * SCSI-CAM for ioctl geometry mapping
513 * Duplicated with SCSI - this should be moved into somewhere common
516 * LBA -> CHS mapping table taken from:
518 * "Incorporating the I2O Architecture into BIOS for Intel Architecture
521 * This is an I2O document that is only available to I2O members,
524 * From my understanding, this is how all the I2O cards do this
526 * Disk Size | Sectors | Heads | Cylinders
527 * ---------------+---------+-------+-------------------
528 * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512)
529 * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512)
530 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
531 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
534 #define BLOCK_SIZE_528M 1081344
535 #define BLOCK_SIZE_1G 2097152
536 #define BLOCK_SIZE_21G 4403200
537 #define BLOCK_SIZE_42G 8806400
538 #define BLOCK_SIZE_84G 17612800
540 static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
541 unsigned char *hds, unsigned char *secs)
543 unsigned long heads, sectors, cylinders;
545 sectors = 63L; /* Maximize sectors per track */
546 if (capacity <= BLOCK_SIZE_528M)
548 else if (capacity <= BLOCK_SIZE_1G)
550 else if (capacity <= BLOCK_SIZE_21G)
552 else if (capacity <= BLOCK_SIZE_42G)
557 cylinders = (unsigned long)capacity / (heads * sectors);
559 *cyls = (unsigned short)cylinders; /* Stuff return values */
560 *secs = (unsigned char)sectors;
561 *hds = (unsigned char)heads;
565 * i2o_block_open - Open the block device
566 * @bdev: block device being opened
567 * @mode: file open mode
569 * Power up the device, mount and lock the media. This function is called,
570 * if the block device is opened for access.
572 * Returns 0 on success or negative error code on failure.
574 static int i2o_block_open(struct block_device *bdev, fmode_t mode)
576 struct i2o_block_device *dev = bdev->bd_disk->private_data;
582 if (dev->power > 0x1f)
583 i2o_block_device_power(dev, 0x02);
585 i2o_block_device_mount(dev->i2o_dev, -1);
587 i2o_block_device_lock(dev->i2o_dev, -1);
589 osm_debug("Ready.\n");
596 * i2o_block_release - Release the I2O block device
597 * @disk: gendisk device being released
598 * @mode: file open mode
600 * Unlock and unmount the media, and power down the device. Gets called if
601 * the block device is closed.
603 * Returns 0 on success or negative error code on failure.
605 static int i2o_block_release(struct gendisk *disk, fmode_t mode)
607 struct i2o_block_device *dev = disk->private_data;
611 * This is to deail with the case of an application
612 * opening a device and then the device dissapears while
613 * it's in use, and then the application tries to release
614 * it. ex: Unmounting a deleted RAID volume at reboot.
615 * If we send messages, it will just cause FAILs since
616 * the TID no longer exists.
622 i2o_block_device_flush(dev->i2o_dev);
624 i2o_block_device_unlock(dev->i2o_dev, -1);
626 if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */
631 i2o_block_device_power(dev, operation);
637 static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo)
639 i2o_block_biosparam(get_capacity(bdev->bd_disk),
640 &geo->cylinders, &geo->heads, &geo->sectors);
645 * i2o_block_ioctl - Issue device specific ioctl calls.
646 * @bdev: block device being opened
647 * @mode: file open mode
648 * @cmd: ioctl command
651 * Handles ioctl request for the block device.
653 * Return 0 on success or negative error on failure.
655 static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode,
656 unsigned int cmd, unsigned long arg)
658 struct gendisk *disk = bdev->bd_disk;
659 struct i2o_block_device *dev = disk->private_data;
662 /* Anyone capable of this syscall can do *real bad* things */
664 if (!capable(CAP_SYS_ADMIN))
670 ret = put_user(dev->rcache, (int __user *)arg);
673 ret = put_user(dev->wcache, (int __user *)arg);
677 if (arg < 0 || arg > CACHE_SMARTFETCH)
685 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
697 * i2o_block_media_changed - Have we seen a media change?
698 * @disk: gendisk which should be verified
700 * Verifies if the media has changed.
702 * Returns 1 if the media was changed or 0 otherwise.
704 static int i2o_block_media_changed(struct gendisk *disk)
706 struct i2o_block_device *p = disk->private_data;
708 if (p->media_change_flag) {
709 p->media_change_flag = 0;
716 * i2o_block_transfer - Transfer a request to/from the I2O controller
717 * @req: the request which should be transfered
719 * This function converts the request into a I2O message. The necessary
720 * DMA buffers are allocated and after everything is setup post the message
721 * to the I2O controller. No cleanup is done by this function. It is done
722 * on the interrupt side when the reply arrives.
724 * Return 0 on success or negative error code on failure.
726 static int i2o_block_transfer(struct request *req)
728 struct i2o_block_device *dev = req->rq_disk->private_data;
729 struct i2o_controller *c;
730 u32 tid = dev->i2o_dev->lct_data.tid;
731 struct i2o_message *msg;
733 struct i2o_block_request *ireq = req->special;
735 u32 sgl_offset = SGL_OFFSET_8;
736 u32 ctl_flags = 0x00000000;
740 if (unlikely(!dev->i2o_dev)) {
741 osm_err("transfer to removed drive\n");
746 c = dev->i2o_dev->iop;
748 msg = i2o_msg_get(c);
754 tcntxt = i2o_cntxt_list_add(c, req);
760 msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context);
761 msg->u.s.tcntxt = cpu_to_le32(tcntxt);
763 mptr = &msg->body[0];
765 if (rq_data_dir(req) == READ) {
766 cmd = I2O_CMD_BLOCK_READ << 24;
768 switch (dev->rcache) {
770 ctl_flags = 0x201F0008;
773 case CACHE_SMARTFETCH:
774 if (blk_rq_sectors(req) > 16)
775 ctl_flags = 0x201F0008;
777 ctl_flags = 0x001F0000;
784 cmd = I2O_CMD_BLOCK_WRITE << 24;
786 switch (dev->wcache) {
787 case CACHE_WRITETHROUGH:
788 ctl_flags = 0x001F0008;
790 case CACHE_WRITEBACK:
791 ctl_flags = 0x001F0010;
793 case CACHE_SMARTBACK:
794 if (blk_rq_sectors(req) > 16)
795 ctl_flags = 0x001F0004;
797 ctl_flags = 0x001F0010;
799 case CACHE_SMARTTHROUGH:
800 if (blk_rq_sectors(req) > 16)
801 ctl_flags = 0x001F0004;
803 ctl_flags = 0x001F0010;
809 #ifdef CONFIG_I2O_EXT_ADAPTEC
815 hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
818 sgl_offset = SGL_OFFSET_12;
821 cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid);
823 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
824 *mptr++ = cpu_to_le32(tid);
829 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
831 if (rq_data_dir(req) == READ) {
833 scsi_flags = 0x60a0000a;
836 scsi_flags = 0xa0a0000a;
839 *mptr++ = cpu_to_le32(scsi_flags);
841 *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
842 *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
844 memcpy(mptr, cmd, 10);
846 *mptr++ = cpu_to_le32(blk_rq_bytes(req));
850 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
851 *mptr++ = cpu_to_le32(ctl_flags);
852 *mptr++ = cpu_to_le32(blk_rq_bytes(req));
854 cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
856 cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
859 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
865 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
867 list_add_tail(&ireq->queue, &dev->open_queue);
868 dev->open_queue_depth++;
870 i2o_msg_post(c, msg);
875 i2o_cntxt_list_remove(c, req);
885 * i2o_block_request_fn - request queue handling function
886 * @q: request queue from which the request could be fetched
888 * Takes the next request from the queue, transfers it and if no error
889 * occurs dequeue it from the queue. On arrival of the reply the message
890 * will be processed further. If an error occurs requeue the request.
892 static void i2o_block_request_fn(struct request_queue *q)
896 while (!blk_queue_plugged(q)) {
897 req = blk_peek_request(q);
901 if (req->cmd_type == REQ_TYPE_FS) {
902 struct i2o_block_delayed_request *dreq;
903 struct i2o_block_request *ireq = req->special;
904 unsigned int queue_depth;
906 queue_depth = ireq->i2o_blk_dev->open_queue_depth;
908 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
909 if (!i2o_block_transfer(req)) {
910 blk_start_request(req);
913 osm_info("transfer error\n");
919 /* stop the queue and retry later */
920 dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);
925 INIT_DELAYED_WORK(&dreq->work,
926 i2o_block_delayed_request_fn);
928 if (!queue_delayed_work(i2o_block_driver.event_queue,
930 I2O_BLOCK_RETRY_TIME))
937 blk_start_request(req);
938 __blk_end_request_all(req, -EIO);
943 /* I2O Block device operations definition */
944 static const struct block_device_operations i2o_block_fops = {
945 .owner = THIS_MODULE,
946 .open = i2o_block_open,
947 .release = i2o_block_release,
948 .ioctl = i2o_block_ioctl,
949 .compat_ioctl = i2o_block_ioctl,
950 .getgeo = i2o_block_getgeo,
951 .media_changed = i2o_block_media_changed
955 * i2o_block_device_alloc - Allocate memory for a I2O Block device
957 * Allocate memory for the i2o_block_device struct, gendisk and request
958 * queue and initialize them as far as no additional information is needed.
960 * Returns a pointer to the allocated I2O Block device on success or a
961 * negative error code on failure.
963 static struct i2o_block_device *i2o_block_device_alloc(void)
965 struct i2o_block_device *dev;
967 struct request_queue *queue;
970 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
972 osm_err("Insufficient memory to allocate I2O Block disk.\n");
977 INIT_LIST_HEAD(&dev->open_queue);
978 spin_lock_init(&dev->lock);
979 dev->rcache = CACHE_PREFETCH;
980 dev->wcache = CACHE_WRITEBACK;
982 /* allocate a gendisk with 16 partitions */
985 osm_err("Insufficient memory to allocate gendisk.\n");
990 /* initialize the request queue */
991 queue = blk_init_queue(i2o_block_request_fn, &dev->lock);
993 osm_err("Insufficient memory to allocate request queue.\n");
998 blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
1000 gd->major = I2O_MAJOR;
1002 gd->fops = &i2o_block_fops;
1003 gd->private_data = dev;
1020 * i2o_block_probe - verify if dev is a I2O Block device and install it
1021 * @dev: device to verify if it is a I2O Block device
1023 * We only verify if the user_tid of the device is 0xfff and then install
1024 * the device. Otherwise it is used by some other device (e. g. RAID).
1026 * Returns 0 on success or negative error code on failure.
1028 static int i2o_block_probe(struct device *dev)
1030 struct i2o_device *i2o_dev = to_i2o_device(dev);
1031 struct i2o_controller *c = i2o_dev->iop;
1032 struct i2o_block_device *i2o_blk_dev;
1034 struct request_queue *queue;
1035 static int unit = 0;
1041 unsigned short max_sectors;
1043 #ifdef CONFIG_I2O_EXT_ADAPTEC
1048 if (c->limit_sectors)
1049 max_sectors = I2O_MAX_SECTORS_LIMITED;
1051 max_sectors = I2O_MAX_SECTORS;
1053 /* skip devices which are used by IOP */
1054 if (i2o_dev->lct_data.user_tid != 0xfff) {
1055 osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);
1059 if (i2o_device_claim(i2o_dev)) {
1060 osm_warn("Unable to claim device. Installation aborted\n");
1065 i2o_blk_dev = i2o_block_device_alloc();
1066 if (IS_ERR(i2o_blk_dev)) {
1067 osm_err("could not alloc a new I2O block device");
1068 rc = PTR_ERR(i2o_blk_dev);
1072 i2o_blk_dev->i2o_dev = i2o_dev;
1073 dev_set_drvdata(dev, i2o_blk_dev);
1076 gd = i2o_blk_dev->gd;
1077 gd->first_minor = unit << 4;
1078 sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);
1079 gd->driverfs_dev = &i2o_dev->device;
1081 /* setup request queue */
1083 queue->queuedata = i2o_blk_dev;
1085 blk_queue_max_hw_sectors(queue, max_sectors);
1086 blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size));
1088 osm_debug("max sectors = %d\n", queue->max_sectors);
1089 osm_debug("phys segments = %d\n", queue->max_phys_segments);
1090 osm_debug("max hw segments = %d\n", queue->max_hw_segments);
1093 * Ask for the current media data. If that isn't supported
1094 * then we ask for the device capacity data
1096 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
1097 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1098 blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
1100 osm_warn("unable to get blocksize of %s\n", gd->disk_name);
1102 if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
1103 !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
1104 set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT);
1106 osm_warn("could not get size of %s\n", gd->disk_name);
1108 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1109 i2o_blk_dev->power = power;
1111 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1117 osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid,
1118 i2o_blk_dev->gd->disk_name);
1123 i2o_device_claim_release(i2o_dev);
1129 /* Block OSM driver struct */
1130 static struct i2o_driver i2o_block_driver = {
1132 .event = i2o_block_event,
1133 .reply = i2o_block_reply,
1134 .classes = i2o_block_class_id,
1136 .probe = i2o_block_probe,
1137 .remove = i2o_block_remove,
1142 * i2o_block_init - Block OSM initialization function
1144 * Allocate the slab and mempool for request structs, registers i2o_block
1145 * block device and finally register the Block OSM in the I2O core.
1147 * Returns 0 on success or negative error code on failure.
1149 static int __init i2o_block_init(void)
1154 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1156 /* Allocate request mempool and slab */
1157 size = sizeof(struct i2o_block_request);
1158 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
1159 SLAB_HWCACHE_ALIGN, NULL);
1160 if (!i2o_blk_req_pool.slab) {
1161 osm_err("can't init request slab\n");
1166 i2o_blk_req_pool.pool =
1167 mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE,
1168 i2o_blk_req_pool.slab);
1169 if (!i2o_blk_req_pool.pool) {
1170 osm_err("can't init request mempool\n");
1175 /* Register the block device interfaces */
1176 rc = register_blkdev(I2O_MAJOR, "i2o_block");
1178 osm_err("unable to register block device\n");
1182 osm_info("registered device at major %d\n", I2O_MAJOR);
1185 /* Register Block OSM into I2O core */
1186 rc = i2o_driver_register(&i2o_block_driver);
1188 osm_err("Could not register Block driver\n");
1189 goto unregister_blkdev;
1195 unregister_blkdev(I2O_MAJOR, "i2o_block");
1198 mempool_destroy(i2o_blk_req_pool.pool);
1201 kmem_cache_destroy(i2o_blk_req_pool.slab);
1208 * i2o_block_exit - Block OSM exit function
1210 * Unregisters Block OSM from I2O core, unregisters i2o_block block device
1211 * and frees the mempool and slab.
1213 static void __exit i2o_block_exit(void)
1215 /* Unregister I2O Block OSM from I2O core */
1216 i2o_driver_unregister(&i2o_block_driver);
1218 /* Unregister block device */
1219 unregister_blkdev(I2O_MAJOR, "i2o_block");
1221 /* Free request mempool and slab */
1222 mempool_destroy(i2o_blk_req_pool.pool);
1223 kmem_cache_destroy(i2o_blk_req_pool.slab);
1226 MODULE_AUTHOR("Red Hat");
1227 MODULE_LICENSE("GPL");
1228 MODULE_DESCRIPTION(OSM_DESCRIPTION);
1229 MODULE_VERSION(OSM_VERSION);
1231 module_init(i2o_block_init);
1232 module_exit(i2o_block_exit);