2 * linux/drivers/mmc/queue.c
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright 2006-2007 Pierre Ossman
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/blkdev.h>
14 #include <linux/kthread.h>
16 #include <linux/mmc/card.h>
17 #include <linux/mmc/host.h>
20 #define MMC_QUEUE_BOUNCESZ 65536
22 #define MMC_QUEUE_SUSPENDED (1 << 0)
25 * Prepare a MMC request. This just filters out odd stuff.
27 static int mmc_prep_request(struct request_queue *q, struct request *req)
30 * We only like normal block requests.
32 if (!blk_fs_request(req) && !blk_pc_request(req)) {
33 blk_dump_rq_flags(req, "MMC bad request");
37 req->cmd_flags |= REQ_DONTPREP;
42 static int mmc_queue_thread(void *d)
44 struct mmc_queue *mq = d;
45 struct request_queue *q = mq->queue;
48 * Set iothread to ensure that we aren't put to sleep by
49 * the process freezing. We handle suspension ourselves.
51 current->flags |= PF_MEMALLOC|PF_NOFREEZE;
53 down(&mq->thread_sem);
55 struct request *req = NULL;
57 spin_lock_irq(q->queue_lock);
58 set_current_state(TASK_INTERRUPTIBLE);
59 if (!blk_queue_plugged(q))
60 req = elv_next_request(q);
62 spin_unlock_irq(q->queue_lock);
65 if (kthread_should_stop()) {
66 set_current_state(TASK_RUNNING);
71 down(&mq->thread_sem);
74 set_current_state(TASK_RUNNING);
76 mq->issue_fn(mq, req);
84 * Generic MMC request handler. This is called for any queue on a
85 * particular host. When the host is not busy, we look for a request
86 * on any queue on this host, and attempt to issue it. This may
87 * not be the queue we were asked to process.
89 static void mmc_request(request_queue_t *q)
91 struct mmc_queue *mq = q->queuedata;
96 printk(KERN_ERR "MMC: killing requests for dead queue\n");
97 while ((req = elv_next_request(q)) != NULL) {
99 ret = end_that_request_chunk(req, 0,
100 req->current_nr_sectors << 9);
107 wake_up_process(mq->thread);
111 * mmc_init_queue - initialise a queue structure.
113 * @card: mmc card to attach this queue
116 * Initialise a MMC card request queue.
118 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
120 struct mmc_host *host = card->host;
121 u64 limit = BLK_BOUNCE_HIGH;
123 unsigned int bouncesz;
125 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
126 limit = *mmc_dev(host)->dma_mask;
129 mq->queue = blk_init_queue(mmc_request, lock);
133 mq->queue->queuedata = mq;
136 blk_queue_prep_rq(mq->queue, mmc_prep_request);
138 #ifdef CONFIG_MMC_BLOCK_BOUNCE
139 if (host->max_hw_segs == 1) {
140 bouncesz = MMC_QUEUE_BOUNCESZ;
142 if (bouncesz > host->max_req_size)
143 bouncesz = host->max_req_size;
144 if (bouncesz > host->max_seg_size)
145 bouncesz = host->max_seg_size;
147 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
148 if (!mq->bounce_buf) {
149 printk(KERN_WARNING "%s: unable to allocate "
150 "bounce buffer\n", mmc_card_name(card));
152 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
153 blk_queue_max_sectors(mq->queue, bouncesz / 512);
154 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
155 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
156 blk_queue_max_segment_size(mq->queue, bouncesz);
158 mq->sg = kmalloc(sizeof(struct scatterlist),
162 goto free_bounce_buf;
165 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
166 bouncesz / 512, GFP_KERNEL);
167 if (!mq->bounce_sg) {
175 if (!mq->bounce_buf) {
176 blk_queue_bounce_limit(mq->queue, limit);
177 blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
178 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
179 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
180 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
182 mq->sg = kmalloc(sizeof(struct scatterlist) *
183 host->max_phys_segs, GFP_KERNEL);
190 init_MUTEX(&mq->thread_sem);
192 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
193 if (IS_ERR(mq->thread)) {
194 ret = PTR_ERR(mq->thread);
201 kfree(mq->bounce_sg);
202 mq->bounce_sg = NULL;
208 kfree(mq->bounce_buf);
209 mq->bounce_buf = NULL;
211 blk_cleanup_queue(mq->queue);
215 void mmc_cleanup_queue(struct mmc_queue *mq)
217 request_queue_t *q = mq->queue;
220 /* Mark that we should start throwing out stragglers */
221 spin_lock_irqsave(q->queue_lock, flags);
223 spin_unlock_irqrestore(q->queue_lock, flags);
225 /* Make sure the queue isn't suspended, as that will deadlock */
226 mmc_queue_resume(mq);
228 /* Then terminate our worker thread */
229 kthread_stop(mq->thread);
232 kfree(mq->bounce_sg);
233 mq->bounce_sg = NULL;
239 kfree(mq->bounce_buf);
240 mq->bounce_buf = NULL;
242 blk_cleanup_queue(mq->queue);
246 EXPORT_SYMBOL(mmc_cleanup_queue);
249 * mmc_queue_suspend - suspend a MMC request queue
250 * @mq: MMC queue to suspend
252 * Stop the block request queue, and wait for our thread to
253 * complete any outstanding requests. This ensures that we
254 * won't suspend while a request is being processed.
256 void mmc_queue_suspend(struct mmc_queue *mq)
258 request_queue_t *q = mq->queue;
261 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
262 mq->flags |= MMC_QUEUE_SUSPENDED;
264 spin_lock_irqsave(q->queue_lock, flags);
266 spin_unlock_irqrestore(q->queue_lock, flags);
268 down(&mq->thread_sem);
273 * mmc_queue_resume - resume a previously suspended MMC request queue
274 * @mq: MMC queue to resume
276 void mmc_queue_resume(struct mmc_queue *mq)
278 request_queue_t *q = mq->queue;
281 if (mq->flags & MMC_QUEUE_SUSPENDED) {
282 mq->flags &= ~MMC_QUEUE_SUSPENDED;
286 spin_lock_irqsave(q->queue_lock, flags);
288 spin_unlock_irqrestore(q->queue_lock, flags);
292 static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
293 struct scatterlist *src, unsigned int src_len)
296 char *dst_buf, *src_buf;
297 unsigned int dst_size, src_size;
305 BUG_ON(dst_len == 0);
308 dst_buf = page_address(dst->page) + dst->offset;
309 dst_size = dst->length;
313 src_buf = page_address(src->page) + src->offset;
314 src_size = src->length;
317 chunk = min(dst_size, src_size);
319 memcpy(dst_buf, src_buf, chunk);
338 unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
343 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
345 BUG_ON(!mq->bounce_sg);
347 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
349 mq->bounce_sg_len = sg_len;
352 * Shortcut in the event we only get a single entry.
355 memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
359 mq->sg[0].page = virt_to_page(mq->bounce_buf);
360 mq->sg[0].offset = offset_in_page(mq->bounce_buf);
361 mq->sg[0].length = 0;
364 mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
371 void mmc_queue_bounce_pre(struct mmc_queue *mq)
376 if (mq->bounce_sg_len == 1)
378 if (rq_data_dir(mq->req) != WRITE)
381 copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len);
384 void mmc_queue_bounce_post(struct mmc_queue *mq)
389 if (mq->bounce_sg_len == 1)
391 if (rq_data_dir(mq->req) != READ)
394 copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1);