2 * Copyright (C) 2002 Sistina Software (UK) Limited.
4 * This file is released under the GPL.
6 * Kcopyd provides a simple interface for copying an area of one
7 * block-device to one or more other block-devices, with an asynchronous
8 * completion notification.
11 #include <asm/types.h>
12 #include <asm/atomic.h>
14 #include <linux/blkdev.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/workqueue.h>
24 #include <linux/mutex.h>
28 static struct workqueue_struct *_kcopyd_wq;
29 static struct work_struct _kcopyd_work;
31 static inline void wake(void)
33 queue_work(_kcopyd_wq, &_kcopyd_work);
36 /*-----------------------------------------------------------------
37 * Each kcopyd client has its own little pool of preallocated
38 * pages for kcopyd io.
39 *---------------------------------------------------------------*/
40 struct kcopyd_client {
41 struct list_head list;
44 struct page_list *pages;
45 unsigned int nr_pages;
46 unsigned int nr_free_pages;
48 wait_queue_head_t destroyq;
52 static struct page_list *alloc_pl(void)
56 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
60 pl->page = alloc_page(GFP_KERNEL);
69 static void free_pl(struct page_list *pl)
71 __free_page(pl->page);
75 static int kcopyd_get_pages(struct kcopyd_client *kc,
76 unsigned int nr, struct page_list **pages)
81 if (kc->nr_free_pages < nr) {
82 spin_unlock(&kc->lock);
86 kc->nr_free_pages -= nr;
87 for (*pages = pl = kc->pages; --nr; pl = pl->next)
93 spin_unlock(&kc->lock);
98 static void kcopyd_put_pages(struct kcopyd_client *kc, struct page_list *pl)
100 struct page_list *cursor;
102 spin_lock(&kc->lock);
103 for (cursor = pl; cursor->next; cursor = cursor->next)
107 cursor->next = kc->pages;
109 spin_unlock(&kc->lock);
113 * These three functions resize the page pool.
115 static void drop_pages(struct page_list *pl)
117 struct page_list *next;
126 static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr)
129 struct page_list *pl = NULL, *next;
131 for (i = 0; i < nr; i++) {
142 kcopyd_put_pages(kc, pl);
147 static void client_free_pages(struct kcopyd_client *kc)
149 BUG_ON(kc->nr_free_pages != kc->nr_pages);
150 drop_pages(kc->pages);
152 kc->nr_free_pages = kc->nr_pages = 0;
155 /*-----------------------------------------------------------------
156 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
157 * for this reason we use a mempool to prevent the client from
158 * ever having to do io (which could cause a deadlock).
159 *---------------------------------------------------------------*/
161 struct kcopyd_client *kc;
162 struct list_head list;
166 * Error state of the job.
169 unsigned int write_err;
172 * Either READ or WRITE
175 struct io_region source;
178 * The destinations for the transfer.
180 unsigned int num_dests;
181 struct io_region dests[KCOPYD_MAX_REGIONS];
184 unsigned int nr_pages;
185 struct page_list *pages;
188 * Set this to ensure you are notified when the job has
189 * completed. 'context' is for callback to use.
195 * These fields are only used if the job has been split
196 * into more manageable parts.
198 struct semaphore lock;
203 /* FIXME: this should scale with the number of pages */
206 static struct kmem_cache *_job_cache;
207 static mempool_t *_job_pool;
210 * We maintain three lists of jobs:
212 * i) jobs waiting for pages
213 * ii) jobs that have pages, and are waiting for the io to be issued.
214 * iii) jobs that have completed.
216 * All three of these are protected by job_lock.
218 static DEFINE_SPINLOCK(_job_lock);
220 static LIST_HEAD(_complete_jobs);
221 static LIST_HEAD(_io_jobs);
222 static LIST_HEAD(_pages_jobs);
224 static int jobs_init(void)
226 _job_cache = kmem_cache_create("kcopyd-jobs",
227 sizeof(struct kcopyd_job),
228 __alignof__(struct kcopyd_job),
233 _job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
235 kmem_cache_destroy(_job_cache);
242 static void jobs_exit(void)
244 BUG_ON(!list_empty(&_complete_jobs));
245 BUG_ON(!list_empty(&_io_jobs));
246 BUG_ON(!list_empty(&_pages_jobs));
248 mempool_destroy(_job_pool);
249 kmem_cache_destroy(_job_cache);
255 * Functions to push and pop a job onto the head of a given job
258 static inline struct kcopyd_job *pop(struct list_head *jobs)
260 struct kcopyd_job *job = NULL;
263 spin_lock_irqsave(&_job_lock, flags);
265 if (!list_empty(jobs)) {
266 job = list_entry(jobs->next, struct kcopyd_job, list);
267 list_del(&job->list);
269 spin_unlock_irqrestore(&_job_lock, flags);
274 static inline void push(struct list_head *jobs, struct kcopyd_job *job)
278 spin_lock_irqsave(&_job_lock, flags);
279 list_add_tail(&job->list, jobs);
280 spin_unlock_irqrestore(&_job_lock, flags);
284 * These three functions process 1 item from the corresponding
290 * > 0: can't process yet.
292 static int run_complete_job(struct kcopyd_job *job)
294 void *context = job->context;
295 int read_err = job->read_err;
296 unsigned int write_err = job->write_err;
297 kcopyd_notify_fn fn = job->fn;
298 struct kcopyd_client *kc = job->kc;
300 kcopyd_put_pages(kc, job->pages);
301 mempool_free(job, _job_pool);
302 fn(read_err, write_err, context);
304 if (atomic_dec_and_test(&kc->nr_jobs))
305 wake_up(&kc->destroyq);
310 static void complete_io(unsigned long error, void *context)
312 struct kcopyd_job *job = (struct kcopyd_job *) context;
315 if (job->rw == WRITE)
316 job->write_err |= error;
320 if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
321 push(&_complete_jobs, job);
327 if (job->rw == WRITE)
328 push(&_complete_jobs, job);
332 push(&_io_jobs, job);
339 * Request io on as many buffer heads as we can currently get for
342 static int run_io_job(struct kcopyd_job *job)
347 r = dm_io_async(1, &job->source, job->rw,
349 job->offset, complete_io, job);
352 r = dm_io_async(job->num_dests, job->dests, job->rw,
354 job->offset, complete_io, job);
359 static int run_pages_job(struct kcopyd_job *job)
363 job->nr_pages = dm_div_up(job->dests[0].count + job->offset,
365 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
367 /* this job is ready for io */
368 push(&_io_jobs, job);
373 /* can't complete now */
380 * Run through a list for as long as possible. Returns the count
381 * of successful jobs.
383 static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
385 struct kcopyd_job *job;
388 while ((job = pop(jobs))) {
393 /* error this rogue job */
394 if (job->rw == WRITE)
395 job->write_err = (unsigned int) -1;
398 push(&_complete_jobs, job);
404 * We couldn't service this job ATM, so
405 * push this job back onto the list.
418 * kcopyd does this every time it's woken up.
420 static void do_work(struct work_struct *ignored)
423 * The order that these are called is *very* important.
424 * complete jobs can free some pages for pages jobs.
425 * Pages jobs when successful will jump onto the io jobs
426 * list. io jobs call wake when they complete and it all
429 process_jobs(&_complete_jobs, run_complete_job);
430 process_jobs(&_pages_jobs, run_pages_job);
431 process_jobs(&_io_jobs, run_io_job);
435 * If we are copying a small region we just dispatch a single job
436 * to do the copy, otherwise the io has to be split up into many
439 static void dispatch_job(struct kcopyd_job *job)
441 atomic_inc(&job->kc->nr_jobs);
442 push(&_pages_jobs, job);
446 #define SUB_JOB_SIZE 128
447 static void segment_complete(int read_err,
448 unsigned int write_err, void *context)
450 /* FIXME: tidy this function */
451 sector_t progress = 0;
453 struct kcopyd_job *job = (struct kcopyd_job *) context;
457 /* update the error */
462 job->write_err |= write_err;
465 * Only dispatch more work if there hasn't been an error.
467 if ((!job->read_err && !job->write_err) ||
468 test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
469 /* get the next chunk of work */
470 progress = job->progress;
471 count = job->source.count - progress;
473 if (count > SUB_JOB_SIZE)
474 count = SUB_JOB_SIZE;
476 job->progress += count;
483 struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO);
486 sub_job->source.sector += progress;
487 sub_job->source.count = count;
489 for (i = 0; i < job->num_dests; i++) {
490 sub_job->dests[i].sector += progress;
491 sub_job->dests[i].count = count;
494 sub_job->fn = segment_complete;
495 sub_job->context = job;
496 dispatch_job(sub_job);
498 } else if (atomic_dec_and_test(&job->sub_jobs)) {
501 * To avoid a race we must keep the job around
502 * until after the notify function has completed.
503 * Otherwise the client may try and stop the job
504 * after we've completed.
506 job->fn(read_err, write_err, job->context);
507 mempool_free(job, _job_pool);
512 * Create some little jobs that will do the move between
515 #define SPLIT_COUNT 8
516 static void split_job(struct kcopyd_job *job)
520 atomic_set(&job->sub_jobs, SPLIT_COUNT);
521 for (i = 0; i < SPLIT_COUNT; i++)
522 segment_complete(0, 0u, job);
525 int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
526 unsigned int num_dests, struct io_region *dests,
527 unsigned int flags, kcopyd_notify_fn fn, void *context)
529 struct kcopyd_job *job;
532 * Allocate a new job.
534 job = mempool_alloc(_job_pool, GFP_NOIO);
537 * set up for the read.
547 job->num_dests = num_dests;
548 memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
555 job->context = context;
557 if (job->source.count < SUB_JOB_SIZE)
561 init_MUTEX(&job->lock);
570 * Cancels a kcopyd job, eg. someone might be deactivating a
574 int kcopyd_cancel(struct kcopyd_job *job, int block)
581 /*-----------------------------------------------------------------
583 *---------------------------------------------------------------*/
584 static DEFINE_MUTEX(_client_lock);
585 static LIST_HEAD(_clients);
587 static void client_add(struct kcopyd_client *kc)
589 mutex_lock(&_client_lock);
590 list_add(&kc->list, &_clients);
591 mutex_unlock(&_client_lock);
594 static void client_del(struct kcopyd_client *kc)
596 mutex_lock(&_client_lock);
598 mutex_unlock(&_client_lock);
601 static DEFINE_MUTEX(kcopyd_init_lock);
602 static int kcopyd_clients = 0;
604 static int kcopyd_init(void)
608 mutex_lock(&kcopyd_init_lock);
610 if (kcopyd_clients) {
611 /* Already initialized. */
613 mutex_unlock(&kcopyd_init_lock);
619 mutex_unlock(&kcopyd_init_lock);
623 _kcopyd_wq = create_singlethread_workqueue("kcopyd");
626 mutex_unlock(&kcopyd_init_lock);
631 INIT_WORK(&_kcopyd_work, do_work);
632 mutex_unlock(&kcopyd_init_lock);
636 static void kcopyd_exit(void)
638 mutex_lock(&kcopyd_init_lock);
640 if (!kcopyd_clients) {
642 destroy_workqueue(_kcopyd_wq);
645 mutex_unlock(&kcopyd_init_lock);
648 int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
651 struct kcopyd_client *kc;
657 kc = kmalloc(sizeof(*kc), GFP_KERNEL);
663 spin_lock_init(&kc->lock);
665 kc->nr_pages = kc->nr_free_pages = 0;
666 r = client_alloc_pages(kc, nr_pages);
673 r = dm_io_get(nr_pages);
675 client_free_pages(kc);
681 init_waitqueue_head(&kc->destroyq);
682 atomic_set(&kc->nr_jobs, 0);
689 void kcopyd_client_destroy(struct kcopyd_client *kc)
691 /* Wait for completion of all jobs submitted by this client. */
692 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
694 dm_io_put(kc->nr_pages);
695 client_free_pages(kc);
701 EXPORT_SYMBOL(kcopyd_client_create);
702 EXPORT_SYMBOL(kcopyd_client_destroy);
703 EXPORT_SYMBOL(kcopyd_copy);