block: add a bi_error field to struct bio
[cascardo/linux.git] / block / blk-lib.c
1 /*
2  * Functions related to generic helpers functions
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9
10 #include "blk.h"
11
12 struct bio_batch {
13         atomic_t                done;
14         int                     error;
15         struct completion       *wait;
16 };
17
18 static void bio_batch_end_io(struct bio *bio)
19 {
20         struct bio_batch *bb = bio->bi_private;
21
22         if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
23                 bb->error = bio->bi_error;
24         if (atomic_dec_and_test(&bb->done))
25                 complete(bb->wait);
26         bio_put(bio);
27 }
28
29 /**
30  * blkdev_issue_discard - queue a discard
31  * @bdev:       blockdev to issue discard for
32  * @sector:     start sector
33  * @nr_sects:   number of sectors to discard
34  * @gfp_mask:   memory allocation flags (for bio_alloc)
35  * @flags:      BLKDEV_IFL_* flags to control behaviour
36  *
37  * Description:
38  *    Issue a discard request for the sectors in question.
39  */
40 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41                 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
42 {
43         DECLARE_COMPLETION_ONSTACK(wait);
44         struct request_queue *q = bdev_get_queue(bdev);
45         int type = REQ_WRITE | REQ_DISCARD;
46         unsigned int max_discard_sectors, granularity;
47         int alignment;
48         struct bio_batch bb;
49         struct bio *bio;
50         int ret = 0;
51         struct blk_plug plug;
52
53         if (!q)
54                 return -ENXIO;
55
56         if (!blk_queue_discard(q))
57                 return -EOPNOTSUPP;
58
59         /* Zero-sector (unknown) and one-sector granularities are the same.  */
60         granularity = max(q->limits.discard_granularity >> 9, 1U);
61         alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
62
63         /*
64          * Ensure that max_discard_sectors is of the proper
65          * granularity, so that requests stay aligned after a split.
66          */
67         max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
68         max_discard_sectors -= max_discard_sectors % granularity;
69         if (unlikely(!max_discard_sectors)) {
70                 /* Avoid infinite loop below. Being cautious never hurts. */
71                 return -EOPNOTSUPP;
72         }
73
74         if (flags & BLKDEV_DISCARD_SECURE) {
75                 if (!blk_queue_secdiscard(q))
76                         return -EOPNOTSUPP;
77                 type |= REQ_SECURE;
78         }
79
80         atomic_set(&bb.done, 1);
81         bb.error = 0;
82         bb.wait = &wait;
83
84         blk_start_plug(&plug);
85         while (nr_sects) {
86                 unsigned int req_sects;
87                 sector_t end_sect, tmp;
88
89                 bio = bio_alloc(gfp_mask, 1);
90                 if (!bio) {
91                         ret = -ENOMEM;
92                         break;
93                 }
94
95                 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
96
97                 /*
98                  * If splitting a request, and the next starting sector would be
99                  * misaligned, stop the discard at the previous aligned sector.
100                  */
101                 end_sect = sector + req_sects;
102                 tmp = end_sect;
103                 if (req_sects < nr_sects &&
104                     sector_div(tmp, granularity) != alignment) {
105                         end_sect = end_sect - alignment;
106                         sector_div(end_sect, granularity);
107                         end_sect = end_sect * granularity + alignment;
108                         req_sects = end_sect - sector;
109                 }
110
111                 bio->bi_iter.bi_sector = sector;
112                 bio->bi_end_io = bio_batch_end_io;
113                 bio->bi_bdev = bdev;
114                 bio->bi_private = &bb;
115
116                 bio->bi_iter.bi_size = req_sects << 9;
117                 nr_sects -= req_sects;
118                 sector = end_sect;
119
120                 atomic_inc(&bb.done);
121                 submit_bio(type, bio);
122
123                 /*
124                  * We can loop for a long time in here, if someone does
125                  * full device discards (like mkfs). Be nice and allow
126                  * us to schedule out to avoid softlocking if preempt
127                  * is disabled.
128                  */
129                 cond_resched();
130         }
131         blk_finish_plug(&plug);
132
133         /* Wait for bios in-flight */
134         if (!atomic_dec_and_test(&bb.done))
135                 wait_for_completion_io(&wait);
136
137         if (bb.error)
138                 return bb.error;
139         return ret;
140 }
141 EXPORT_SYMBOL(blkdev_issue_discard);
142
143 /**
144  * blkdev_issue_write_same - queue a write same operation
145  * @bdev:       target blockdev
146  * @sector:     start sector
147  * @nr_sects:   number of sectors to write
148  * @gfp_mask:   memory allocation flags (for bio_alloc)
149  * @page:       page containing data to write
150  *
151  * Description:
152  *    Issue a write same request for the sectors in question.
153  */
154 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
155                             sector_t nr_sects, gfp_t gfp_mask,
156                             struct page *page)
157 {
158         DECLARE_COMPLETION_ONSTACK(wait);
159         struct request_queue *q = bdev_get_queue(bdev);
160         unsigned int max_write_same_sectors;
161         struct bio_batch bb;
162         struct bio *bio;
163         int ret = 0;
164
165         if (!q)
166                 return -ENXIO;
167
168         max_write_same_sectors = q->limits.max_write_same_sectors;
169
170         if (max_write_same_sectors == 0)
171                 return -EOPNOTSUPP;
172
173         atomic_set(&bb.done, 1);
174         bb.error = 0;
175         bb.wait = &wait;
176
177         while (nr_sects) {
178                 bio = bio_alloc(gfp_mask, 1);
179                 if (!bio) {
180                         ret = -ENOMEM;
181                         break;
182                 }
183
184                 bio->bi_iter.bi_sector = sector;
185                 bio->bi_end_io = bio_batch_end_io;
186                 bio->bi_bdev = bdev;
187                 bio->bi_private = &bb;
188                 bio->bi_vcnt = 1;
189                 bio->bi_io_vec->bv_page = page;
190                 bio->bi_io_vec->bv_offset = 0;
191                 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
192
193                 if (nr_sects > max_write_same_sectors) {
194                         bio->bi_iter.bi_size = max_write_same_sectors << 9;
195                         nr_sects -= max_write_same_sectors;
196                         sector += max_write_same_sectors;
197                 } else {
198                         bio->bi_iter.bi_size = nr_sects << 9;
199                         nr_sects = 0;
200                 }
201
202                 atomic_inc(&bb.done);
203                 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
204         }
205
206         /* Wait for bios in-flight */
207         if (!atomic_dec_and_test(&bb.done))
208                 wait_for_completion_io(&wait);
209
210         if (bb.error)
211                 return bb.error;
212         return ret;
213 }
214 EXPORT_SYMBOL(blkdev_issue_write_same);
215
216 /**
217  * blkdev_issue_zeroout - generate number of zero filed write bios
218  * @bdev:       blockdev to issue
219  * @sector:     start sector
220  * @nr_sects:   number of sectors to write
221  * @gfp_mask:   memory allocation flags (for bio_alloc)
222  *
223  * Description:
224  *  Generate and issue number of bios with zerofiled pages.
225  */
226
227 static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
228                                   sector_t nr_sects, gfp_t gfp_mask)
229 {
230         int ret;
231         struct bio *bio;
232         struct bio_batch bb;
233         unsigned int sz;
234         DECLARE_COMPLETION_ONSTACK(wait);
235
236         atomic_set(&bb.done, 1);
237         bb.error = 0;
238         bb.wait = &wait;
239
240         ret = 0;
241         while (nr_sects != 0) {
242                 bio = bio_alloc(gfp_mask,
243                                 min(nr_sects, (sector_t)BIO_MAX_PAGES));
244                 if (!bio) {
245                         ret = -ENOMEM;
246                         break;
247                 }
248
249                 bio->bi_iter.bi_sector = sector;
250                 bio->bi_bdev   = bdev;
251                 bio->bi_end_io = bio_batch_end_io;
252                 bio->bi_private = &bb;
253
254                 while (nr_sects != 0) {
255                         sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
256                         ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
257                         nr_sects -= ret >> 9;
258                         sector += ret >> 9;
259                         if (ret < (sz << 9))
260                                 break;
261                 }
262                 ret = 0;
263                 atomic_inc(&bb.done);
264                 submit_bio(WRITE, bio);
265         }
266
267         /* Wait for bios in-flight */
268         if (!atomic_dec_and_test(&bb.done))
269                 wait_for_completion_io(&wait);
270
271         if (bb.error)
272                 return bb.error;
273         return ret;
274 }
275
276 /**
277  * blkdev_issue_zeroout - zero-fill a block range
278  * @bdev:       blockdev to write
279  * @sector:     start sector
280  * @nr_sects:   number of sectors to write
281  * @gfp_mask:   memory allocation flags (for bio_alloc)
282  * @discard:    whether to discard the block range
283  *
284  * Description:
285  *  Zero-fill a block range.  If the discard flag is set and the block
286  *  device guarantees that subsequent READ operations to the block range
287  *  in question will return zeroes, the blocks will be discarded. Should
288  *  the discard request fail, if the discard flag is not set, or if
289  *  discard_zeroes_data is not supported, this function will resort to
290  *  zeroing the blocks manually, thus provisioning (allocating,
291  *  anchoring) them. If the block device supports the WRITE SAME command
292  *  blkdev_issue_zeroout() will use it to optimize the process of
293  *  clearing the block range. Otherwise the zeroing will be performed
294  *  using regular WRITE calls.
295  */
296
297 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
298                          sector_t nr_sects, gfp_t gfp_mask, bool discard)
299 {
300         struct request_queue *q = bdev_get_queue(bdev);
301
302         if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
303             blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
304                 return 0;
305
306         if (bdev_write_same(bdev) &&
307             blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
308                                     ZERO_PAGE(0)) == 0)
309                 return 0;
310
311         return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
312 }
313 EXPORT_SYMBOL(blkdev_issue_zeroout);