2 * Functions related to mapping data to requests
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
11 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
15 blk_rq_bio_prep(q, rq, bio);
16 else if (!ll_back_merge_fn(q, rq, bio))
19 rq->biotail->bi_next = bio;
22 rq->data_len += bio->bi_size;
26 EXPORT_SYMBOL(blk_rq_append_bio);
28 static int __blk_rq_unmap_user(struct bio *bio)
33 if (bio_flagged(bio, BIO_USER_MAPPED))
36 ret = bio_uncopy_user(bio);
42 static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
43 void __user *ubuf, unsigned int len)
46 struct bio *bio, *orig_bio;
49 reading = rq_data_dir(rq) == READ;
52 * if alignment requirement is satisfied, map in user pages for
53 * direct dma. else, set up kernel bounce buffers
55 uaddr = (unsigned long) ubuf;
56 if (!(uaddr & queue_dma_alignment(q)) &&
57 !(len & queue_dma_alignment(q)))
58 bio = bio_map_user(q, NULL, uaddr, len, reading);
60 bio = bio_copy_user(q, uaddr, len, reading);
66 blk_queue_bounce(q, &bio);
69 * We link the bounce buffer in and could have to traverse it
70 * later so we have to get a ref to prevent it from being freed
74 ret = blk_rq_append_bio(q, rq, bio);
78 /* if it was boucned we must call the end io function */
80 __blk_rq_unmap_user(orig_bio);
86 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
87 * @q: request queue where request should be inserted
88 * @rq: request structure to fill
89 * @ubuf: the user buffer
90 * @len: length of user data
93 * Data will be mapped directly for zero copy io, if possible. Otherwise
94 * a kernel bounce buffer is used.
96 * A matching blk_rq_unmap_user() must be issued at the end of io, while
97 * still in process context.
99 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
100 * before being submitted to the device, as pages mapped may be out of
101 * reach. It's the callers responsibility to make sure this happens. The
102 * original bio must be passed back in to blk_rq_unmap_user() for proper
105 int blk_rq_map_user(struct request_queue *q, struct request *rq,
106 void __user *ubuf, unsigned long len)
108 unsigned long bytes_read = 0;
109 struct bio *bio = NULL;
112 if (len > (q->max_hw_sectors << 9))
117 while (bytes_read != len) {
118 unsigned long map_len, end, start;
120 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
121 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
123 start = (unsigned long)ubuf >> PAGE_SHIFT;
126 * A bad offset could cause us to require BIO_MAX_PAGES + 1
127 * pages. If this happens we just lower the requested
128 * mapping len by a page so that we can fit
130 if (end - start > BIO_MAX_PAGES)
131 map_len -= PAGE_SIZE;
133 ret = __blk_rq_map_user(q, rq, ubuf, map_len);
143 * __blk_rq_map_user() copies the buffers if starting address
144 * or length isn't aligned. As the copied buffer is always
145 * page aligned, we know that there's enough room for padding.
146 * Extend the last bio and update rq->data_len accordingly.
148 * On unmap, bio_uncopy_user() will use unmodified
149 * bio_map_data pointed to by bio->bi_private.
151 if (len & queue_dma_alignment(q)) {
152 unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1;
153 struct bio *bio = rq->biotail;
155 bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
156 bio->bi_size += pad_len;
159 rq->buffer = rq->data = NULL;
162 blk_rq_unmap_user(bio);
166 EXPORT_SYMBOL(blk_rq_map_user);
169 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
170 * @q: request queue where request should be inserted
171 * @rq: request to map data to
172 * @iov: pointer to the iovec
173 * @iov_count: number of elements in the iovec
174 * @len: I/O byte count
177 * Data will be mapped directly for zero copy io, if possible. Otherwise
178 * a kernel bounce buffer is used.
180 * A matching blk_rq_unmap_user() must be issued at the end of io, while
181 * still in process context.
183 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
184 * before being submitted to the device, as pages mapped may be out of
185 * reach. It's the callers responsibility to make sure this happens. The
186 * original bio must be passed back in to blk_rq_unmap_user() for proper
189 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
190 struct sg_iovec *iov, int iov_count, unsigned int len)
194 if (!iov || iov_count <= 0)
197 /* we don't allow misaligned data like bio_map_user() does. If the
198 * user is using sg, they're expected to know the alignment constraints
199 * and respect them accordingly */
200 bio = bio_map_user_iov(q, NULL, iov, iov_count,
201 rq_data_dir(rq) == READ);
205 if (bio->bi_size != len) {
212 blk_rq_bio_prep(q, rq, bio);
213 rq->buffer = rq->data = NULL;
216 EXPORT_SYMBOL(blk_rq_map_user_iov);
219 * blk_rq_unmap_user - unmap a request with user data
220 * @bio: start of bio list
223 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
224 * supply the original rq->bio from the blk_rq_map_user() return, since
225 * the io completion may have changed rq->bio.
227 int blk_rq_unmap_user(struct bio *bio)
229 struct bio *mapped_bio;
234 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
235 mapped_bio = bio->bi_private;
237 ret2 = __blk_rq_unmap_user(mapped_bio);
248 EXPORT_SYMBOL(blk_rq_unmap_user);
251 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
252 * @q: request queue where request should be inserted
253 * @rq: request to fill
254 * @kbuf: the kernel buffer
255 * @len: length of user data
256 * @gfp_mask: memory allocation flags
258 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
259 unsigned int len, gfp_t gfp_mask)
263 if (len > (q->max_hw_sectors << 9))
268 bio = bio_map_kern(q, kbuf, len, gfp_mask);
272 if (rq_data_dir(rq) == WRITE)
273 bio->bi_rw |= (1 << BIO_RW);
275 blk_rq_bio_prep(q, rq, bio);
276 blk_queue_bounce(q, &rq->bio);
277 rq->buffer = rq->data = NULL;
280 EXPORT_SYMBOL(blk_rq_map_kern);