block: simplify and export blk_rq_append_bio
[cascardo/linux.git] / block / blk-core.c
index 090e55d..a223018 100644 (file)
@@ -962,7 +962,7 @@ static void __freed_request(struct request_list *rl, int sync)
 static void freed_request(struct request_list *rl, int op, unsigned int flags)
 {
        struct request_queue *q = rl->q;
-       int sync = rw_is_sync(op | flags);
+       int sync = rw_is_sync(op, flags);
 
        q->nr_rqs[sync]--;
        rl->count[sync]--;
@@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
         * Flush requests do not use the elevator so skip initialization.
         * This allows a request to share the flush and elevator data.
         */
-       if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
+       if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA))
                return false;
 
        return true;
@@ -1075,7 +1075,7 @@ static struct request *__get_request(struct request_list *rl, int op,
        struct elevator_type *et = q->elevator->type;
        struct io_context *ioc = rq_ioc(bio);
        struct io_cq *icq = NULL;
-       const bool is_sync = rw_is_sync(op | op_flags) != 0;
+       const bool is_sync = rw_is_sync(op, op_flags) != 0;
        int may_queue;
 
        if (unlikely(blk_queue_dying(q)))
@@ -1244,7 +1244,7 @@ static struct request *get_request(struct request_queue *q, int op,
                                   int op_flags, struct bio *bio,
                                   gfp_t gfp_mask)
 {
-       const bool is_sync = rw_is_sync(op | op_flags) != 0;
+       const bool is_sync = rw_is_sync(op, op_flags) != 0;
        DEFINE_WAIT(wait);
        struct request_list *rl;
        struct request *rq;
@@ -1294,10 +1294,15 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
 
        spin_lock_irq(q->queue_lock);
        rq = get_request(q, rw, 0, NULL, gfp_mask);
-       if (IS_ERR(rq))
+       if (IS_ERR(rq)) {
                spin_unlock_irq(q->queue_lock);
-       /* q->queue_lock is unlocked at this point */
+               return rq;
+       }
 
+       /* q->queue_lock is unlocked at this point */
+       rq->__data_len = 0;
+       rq->__sector = (sector_t) -1;
+       rq->bio = rq->biotail = NULL;
        return rq;
 }
 
@@ -1358,7 +1363,7 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio,
                int ret;
 
                blk_queue_bounce(q, &bounce_bio);
-               ret = blk_rq_append_bio(q, rq, bounce_bio);
+               ret = blk_rq_append_bio(rq, bounce_bio);
                if (unlikely(ret)) {
                        blk_put_request(rq);
                        return ERR_PTR(ret);
@@ -1377,9 +1382,6 @@ EXPORT_SYMBOL(blk_make_request);
 void blk_rq_set_block_pc(struct request *rq)
 {
        rq->cmd_type = REQ_TYPE_BLOCK_PC;
-       rq->__data_len = 0;
-       rq->__sector = (sector_t) -1;
-       rq->bio = rq->biotail = NULL;
        memset(rq->__cmd, 0, sizeof(rq->__cmd));
 }
 EXPORT_SYMBOL(blk_rq_set_block_pc);
@@ -1736,7 +1738,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
                return BLK_QC_T_NONE;
        }
 
-       if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
+       if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) {
                spin_lock_irq(q->queue_lock);
                where = ELEVATOR_INSERT_FLUSH;
                goto get_rq;
@@ -1853,7 +1855,7 @@ static void handle_bad_sector(struct bio *bio)
        char b[BDEVNAME_SIZE];
 
        printk(KERN_INFO "attempt to access beyond end of device\n");
-       printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
+       printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
                        bdevname(bio->bi_bdev, b),
                        bio->bi_rw,
                        (unsigned long long)bio_end_sector(bio),
@@ -1968,25 +1970,30 @@ generic_make_request_checks(struct bio *bio)
         * drivers without flush support don't have to worry
         * about them.
         */
-       if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
+       if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
            !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
-               bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
+               bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA);
                if (!nr_sectors) {
                        err = 0;
                        goto end_io;
                }
        }
 
-       if ((bio_op(bio) == REQ_OP_DISCARD) &&
-           (!blk_queue_discard(q) ||
-            ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
-               err = -EOPNOTSUPP;
-               goto end_io;
-       }
-
-       if (bio_op(bio) == REQ_OP_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
-               err = -EOPNOTSUPP;
-               goto end_io;
+       switch (bio_op(bio)) {
+       case REQ_OP_DISCARD:
+               if (!blk_queue_discard(q))
+                       goto not_supported;
+               break;
+       case REQ_OP_SECURE_ERASE:
+               if (!blk_queue_secure_erase(q))
+                       goto not_supported;
+               break;
+       case REQ_OP_WRITE_SAME:
+               if (!bdev_write_same(bio->bi_bdev))
+                       goto not_supported;
+               break;
+       default:
+               break;
        }
 
        /*
@@ -2003,6 +2010,8 @@ generic_make_request_checks(struct bio *bio)
        trace_block_bio_queue(q, bio);
        return true;
 
+not_supported:
+       err = -EOPNOTSUPP;
 end_io:
        bio->bi_error = err;
        bio_endio(bio);
@@ -2161,7 +2170,7 @@ EXPORT_SYMBOL(submit_bio);
 static int blk_cloned_rq_check_limits(struct request_queue *q,
                                      struct request *rq)
 {
-       if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
+       if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
                printk(KERN_ERR "%s: over max size limit.\n", __func__);
                return -EIO;
        }
@@ -2217,7 +2226,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
         */
        BUG_ON(blk_queued_rq(rq));
 
-       if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
+       if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
                where = ELEVATOR_INSERT_FLUSH;
 
        add_acct_request(q, rq, where);
@@ -3311,7 +3320,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                /*
                 * rq is already accounted, so use raw insert
                 */
-               if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
+               if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
                        __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
                else
                        __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
@@ -3378,6 +3387,7 @@ bool blk_poll(struct request_queue *q, blk_qc_t cookie)
 
        return false;
 }
+EXPORT_SYMBOL_GPL(blk_poll);
 
 #ifdef CONFIG_PM
 /**