Pull esi-support into release branch
[linux-2.6] / drivers / infiniband / core / fmr_pool.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  * $Id: fmr_pool.c 2730 2005-06-28 16:43:03Z sean.hefty $
34  */
35
36 #include <linux/errno.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/jhash.h>
40 #include <linux/kthread.h>
41
42 #include <rdma/ib_fmr_pool.h>
43
44 #include "core_priv.h"
45
46 enum {
47         IB_FMR_MAX_REMAPS = 32,
48
49         IB_FMR_HASH_BITS  = 8,
50         IB_FMR_HASH_SIZE  = 1 << IB_FMR_HASH_BITS,
51         IB_FMR_HASH_MASK  = IB_FMR_HASH_SIZE - 1
52 };
53
54 /*
55  * If an FMR is not in use, then the list member will point to either
56  * its pool's free_list (if the FMR can be mapped again; that is,
57  * remap_count < pool->max_remaps) or its pool's dirty_list (if the
58  * FMR needs to be unmapped before being remapped).  In either of
59  * these cases it is a bug if the ref_count is not 0.  In other words,
60  * if ref_count is > 0, then the list member must not be linked into
61  * either free_list or dirty_list.
62  *
63  * The cache_node member is used to link the FMR into a cache bucket
64  * (if caching is enabled).  This is independent of the reference
65  * count of the FMR.  When a valid FMR is released, its ref_count is
66  * decremented, and if ref_count reaches 0, the FMR is placed in
67  * either free_list or dirty_list as appropriate.  However, it is not
68  * removed from the cache and may be "revived" if a call to
69  * ib_fmr_register_physical() occurs before the FMR is remapped.  In
70  * this case we just increment the ref_count and remove the FMR from
71  * free_list/dirty_list.
72  *
73  * Before we remap an FMR from free_list, we remove it from the cache
74  * (to prevent another user from obtaining a stale FMR).  When an FMR
75  * is released, we add it to the tail of the free list, so that our
76  * cache eviction policy is "least recently used."
77  *
78  * All manipulation of ref_count, list and cache_node is protected by
79  * pool_lock to maintain consistency.
80  */
81
82 struct ib_fmr_pool {
83         spinlock_t                pool_lock;
84
85         int                       pool_size;
86         int                       max_pages;
87         int                       max_remaps;
88         int                       dirty_watermark;
89         int                       dirty_len;
90         struct list_head          free_list;
91         struct list_head          dirty_list;
92         struct hlist_head        *cache_bucket;
93
94         void                     (*flush_function)(struct ib_fmr_pool *pool,
95                                                    void *              arg);
96         void                     *flush_arg;
97
98         struct task_struct       *thread;
99
100         atomic_t                  req_ser;
101         atomic_t                  flush_ser;
102
103         wait_queue_head_t         force_wait;
104 };
105
106 static inline u32 ib_fmr_hash(u64 first_page)
107 {
108         return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
109                 (IB_FMR_HASH_SIZE - 1);
110 }
111
112 /* Caller must hold pool_lock */
113 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
114                                                       u64 *page_list,
115                                                       int  page_list_len,
116                                                       u64  io_virtual_address)
117 {
118         struct hlist_head *bucket;
119         struct ib_pool_fmr *fmr;
120         struct hlist_node *pos;
121
122         if (!pool->cache_bucket)
123                 return NULL;
124
125         bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
126
127         hlist_for_each_entry(fmr, pos, bucket, cache_node)
128                 if (io_virtual_address == fmr->io_virtual_address &&
129                     page_list_len      == fmr->page_list_len      &&
130                     !memcmp(page_list, fmr->page_list,
131                             page_list_len * sizeof *page_list))
132                         return fmr;
133
134         return NULL;
135 }
136
137 static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
138 {
139         int                 ret;
140         struct ib_pool_fmr *fmr;
141         LIST_HEAD(unmap_list);
142         LIST_HEAD(fmr_list);
143
144         spin_lock_irq(&pool->pool_lock);
145
146         list_for_each_entry(fmr, &pool->dirty_list, list) {
147                 hlist_del_init(&fmr->cache_node);
148                 fmr->remap_count = 0;
149                 list_add_tail(&fmr->fmr->list, &fmr_list);
150
151 #ifdef DEBUG
152                 if (fmr->ref_count !=0) {
153                         printk(KERN_WARNING "Unmapping FMR 0x%08x with ref count %d",
154                                fmr, fmr->ref_count);
155                 }
156 #endif
157         }
158
159         list_splice(&pool->dirty_list, &unmap_list);
160         INIT_LIST_HEAD(&pool->dirty_list);
161         pool->dirty_len = 0;
162
163         spin_unlock_irq(&pool->pool_lock);
164
165         if (list_empty(&unmap_list)) {
166                 return;
167         }
168
169         ret = ib_unmap_fmr(&fmr_list);
170         if (ret)
171                 printk(KERN_WARNING "ib_unmap_fmr returned %d", ret);
172
173         spin_lock_irq(&pool->pool_lock);
174         list_splice(&unmap_list, &pool->free_list);
175         spin_unlock_irq(&pool->pool_lock);
176 }
177
178 static int ib_fmr_cleanup_thread(void *pool_ptr)
179 {
180         struct ib_fmr_pool *pool = pool_ptr;
181
182         do {
183                 if (pool->dirty_len >= pool->dirty_watermark ||
184                     atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
185                         ib_fmr_batch_release(pool);
186
187                         atomic_inc(&pool->flush_ser);
188                         wake_up_interruptible(&pool->force_wait);
189
190                         if (pool->flush_function)
191                                 pool->flush_function(pool, pool->flush_arg);
192                 }
193
194                 set_current_state(TASK_INTERRUPTIBLE);
195                 if (pool->dirty_len < pool->dirty_watermark &&
196                     atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
197                     !kthread_should_stop())
198                         schedule();
199                 __set_current_state(TASK_RUNNING);
200         } while (!kthread_should_stop());
201
202         return 0;
203 }
204
205 /**
206  * ib_create_fmr_pool - Create an FMR pool
207  * @pd:Protection domain for FMRs
208  * @params:FMR pool parameters
209  *
210  * Create a pool of FMRs.  Return value is pointer to new pool or
211  * error code if creation failed.
212  */
213 struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
214                                        struct ib_fmr_pool_param *params)
215 {
216         struct ib_device   *device;
217         struct ib_fmr_pool *pool;
218         struct ib_device_attr *attr;
219         int i;
220         int ret;
221         int max_remaps;
222
223         if (!params)
224                 return ERR_PTR(-EINVAL);
225
226         device = pd->device;
227         if (!device->alloc_fmr    || !device->dealloc_fmr  ||
228             !device->map_phys_fmr || !device->unmap_fmr) {
229                 printk(KERN_WARNING "Device %s does not support fast memory regions",
230                        device->name);
231                 return ERR_PTR(-ENOSYS);
232         }
233
234         attr = kmalloc(sizeof *attr, GFP_KERNEL);
235         if (!attr) {
236                 printk(KERN_WARNING "couldn't allocate device attr struct");
237                 return ERR_PTR(-ENOMEM);
238         }
239
240         ret = ib_query_device(device, attr);
241         if (ret) {
242                 printk(KERN_WARNING "couldn't query device");
243                 kfree(attr);
244                 return ERR_PTR(ret);
245         }
246
247         if (!attr->max_map_per_fmr)
248                 max_remaps = IB_FMR_MAX_REMAPS;
249         else
250                 max_remaps = attr->max_map_per_fmr;
251
252         kfree(attr);
253
254         pool = kmalloc(sizeof *pool, GFP_KERNEL);
255         if (!pool) {
256                 printk(KERN_WARNING "couldn't allocate pool struct");
257                 return ERR_PTR(-ENOMEM);
258         }
259
260         pool->cache_bucket   = NULL;
261
262         pool->flush_function = params->flush_function;
263         pool->flush_arg      = params->flush_arg;
264
265         INIT_LIST_HEAD(&pool->free_list);
266         INIT_LIST_HEAD(&pool->dirty_list);
267
268         if (params->cache) {
269                 pool->cache_bucket =
270                         kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
271                                 GFP_KERNEL);
272                 if (!pool->cache_bucket) {
273                         printk(KERN_WARNING "Failed to allocate cache in pool");
274                         ret = -ENOMEM;
275                         goto out_free_pool;
276                 }
277
278                 for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
279                         INIT_HLIST_HEAD(pool->cache_bucket + i);
280         }
281
282         pool->pool_size       = 0;
283         pool->max_pages       = params->max_pages_per_fmr;
284         pool->max_remaps      = max_remaps;
285         pool->dirty_watermark = params->dirty_watermark;
286         pool->dirty_len       = 0;
287         spin_lock_init(&pool->pool_lock);
288         atomic_set(&pool->req_ser,   0);
289         atomic_set(&pool->flush_ser, 0);
290         init_waitqueue_head(&pool->force_wait);
291
292         pool->thread = kthread_create(ib_fmr_cleanup_thread,
293                                       pool,
294                                       "ib_fmr(%s)",
295                                       device->name);
296         if (IS_ERR(pool->thread)) {
297                 printk(KERN_WARNING "couldn't start cleanup thread");
298                 ret = PTR_ERR(pool->thread);
299                 goto out_free_pool;
300         }
301
302         {
303                 struct ib_pool_fmr *fmr;
304                 struct ib_fmr_attr attr = {
305                         .max_pages  = params->max_pages_per_fmr,
306                         .max_maps   = pool->max_remaps,
307                         .page_shift = params->page_shift
308                 };
309
310                 for (i = 0; i < params->pool_size; ++i) {
311                         fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64),
312                                       GFP_KERNEL);
313                         if (!fmr) {
314                                 printk(KERN_WARNING "failed to allocate fmr struct "
315                                        "for FMR %d", i);
316                                 goto out_fail;
317                         }
318
319                         fmr->pool             = pool;
320                         fmr->remap_count      = 0;
321                         fmr->ref_count        = 0;
322                         INIT_HLIST_NODE(&fmr->cache_node);
323
324                         fmr->fmr = ib_alloc_fmr(pd, params->access, &attr);
325                         if (IS_ERR(fmr->fmr)) {
326                                 printk(KERN_WARNING "fmr_create failed for FMR %d", i);
327                                 kfree(fmr);
328                                 goto out_fail;
329                         }
330
331                         list_add_tail(&fmr->list, &pool->free_list);
332                         ++pool->pool_size;
333                 }
334         }
335
336         return pool;
337
338  out_free_pool:
339         kfree(pool->cache_bucket);
340         kfree(pool);
341
342         return ERR_PTR(ret);
343
344  out_fail:
345         ib_destroy_fmr_pool(pool);
346
347         return ERR_PTR(-ENOMEM);
348 }
349 EXPORT_SYMBOL(ib_create_fmr_pool);
350
351 /**
352  * ib_destroy_fmr_pool - Free FMR pool
353  * @pool:FMR pool to free
354  *
355  * Destroy an FMR pool and free all associated resources.
356  */
357 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
358 {
359         struct ib_pool_fmr *fmr;
360         struct ib_pool_fmr *tmp;
361         LIST_HEAD(fmr_list);
362         int                 i;
363
364         kthread_stop(pool->thread);
365         ib_fmr_batch_release(pool);
366
367         i = 0;
368         list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
369                 if (fmr->remap_count) {
370                         INIT_LIST_HEAD(&fmr_list);
371                         list_add_tail(&fmr->fmr->list, &fmr_list);
372                         ib_unmap_fmr(&fmr_list);
373                 }
374                 ib_dealloc_fmr(fmr->fmr);
375                 list_del(&fmr->list);
376                 kfree(fmr);
377                 ++i;
378         }
379
380         if (i < pool->pool_size)
381                 printk(KERN_WARNING "pool still has %d regions registered",
382                        pool->pool_size - i);
383
384         kfree(pool->cache_bucket);
385         kfree(pool);
386 }
387 EXPORT_SYMBOL(ib_destroy_fmr_pool);
388
389 /**
390  * ib_flush_fmr_pool - Invalidate all unmapped FMRs
391  * @pool:FMR pool to flush
392  *
393  * Ensure that all unmapped FMRs are fully invalidated.
394  */
395 int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
396 {
397         int serial;
398
399         atomic_inc(&pool->req_ser);
400         /*
401          * It's OK if someone else bumps req_ser again here -- we'll
402          * just wait a little longer.
403          */
404         serial = atomic_read(&pool->req_ser);
405
406         wake_up_process(pool->thread);
407
408         if (wait_event_interruptible(pool->force_wait,
409                                      atomic_read(&pool->flush_ser) -
410                                      atomic_read(&pool->req_ser) >= 0))
411                 return -EINTR;
412
413         return 0;
414 }
415 EXPORT_SYMBOL(ib_flush_fmr_pool);
416
417 /**
418  * ib_fmr_pool_map_phys -
419  * @pool:FMR pool to allocate FMR from
420  * @page_list:List of pages to map
421  * @list_len:Number of pages in @page_list
422  * @io_virtual_address:I/O virtual address for new FMR
423  *
424  * Map an FMR from an FMR pool.
425  */
426 struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
427                                          u64                *page_list,
428                                          int                 list_len,
429                                          u64                 io_virtual_address)
430 {
431         struct ib_fmr_pool *pool = pool_handle;
432         struct ib_pool_fmr *fmr;
433         unsigned long       flags;
434         int                 result;
435
436         if (list_len < 1 || list_len > pool->max_pages)
437                 return ERR_PTR(-EINVAL);
438
439         spin_lock_irqsave(&pool->pool_lock, flags);
440         fmr = ib_fmr_cache_lookup(pool,
441                                   page_list,
442                                   list_len,
443                                   io_virtual_address);
444         if (fmr) {
445                 /* found in cache */
446                 ++fmr->ref_count;
447                 if (fmr->ref_count == 1) {
448                         list_del(&fmr->list);
449                 }
450
451                 spin_unlock_irqrestore(&pool->pool_lock, flags);
452
453                 return fmr;
454         }
455
456         if (list_empty(&pool->free_list)) {
457                 spin_unlock_irqrestore(&pool->pool_lock, flags);
458                 return ERR_PTR(-EAGAIN);
459         }
460
461         fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
462         list_del(&fmr->list);
463         hlist_del_init(&fmr->cache_node);
464         spin_unlock_irqrestore(&pool->pool_lock, flags);
465
466         result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
467                                  io_virtual_address);
468
469         if (result) {
470                 spin_lock_irqsave(&pool->pool_lock, flags);
471                 list_add(&fmr->list, &pool->free_list);
472                 spin_unlock_irqrestore(&pool->pool_lock, flags);
473
474                 printk(KERN_WARNING "fmr_map returns %d\n",
475                        result);
476
477                 return ERR_PTR(result);
478         }
479
480         ++fmr->remap_count;
481         fmr->ref_count = 1;
482
483         if (pool->cache_bucket) {
484                 fmr->io_virtual_address = io_virtual_address;
485                 fmr->page_list_len      = list_len;
486                 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
487
488                 spin_lock_irqsave(&pool->pool_lock, flags);
489                 hlist_add_head(&fmr->cache_node,
490                                pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
491                 spin_unlock_irqrestore(&pool->pool_lock, flags);
492         }
493
494         return fmr;
495 }
496 EXPORT_SYMBOL(ib_fmr_pool_map_phys);
497
498 /**
499  * ib_fmr_pool_unmap - Unmap FMR
500  * @fmr:FMR to unmap
501  *
502  * Unmap an FMR.  The FMR mapping may remain valid until the FMR is
503  * reused (or until ib_flush_fmr_pool() is called).
504  */
505 int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
506 {
507         struct ib_fmr_pool *pool;
508         unsigned long flags;
509
510         pool = fmr->pool;
511
512         spin_lock_irqsave(&pool->pool_lock, flags);
513
514         --fmr->ref_count;
515         if (!fmr->ref_count) {
516                 if (fmr->remap_count < pool->max_remaps) {
517                         list_add_tail(&fmr->list, &pool->free_list);
518                 } else {
519                         list_add_tail(&fmr->list, &pool->dirty_list);
520                         ++pool->dirty_len;
521                         wake_up_process(pool->thread);
522                 }
523         }
524
525 #ifdef DEBUG
526         if (fmr->ref_count < 0)
527                 printk(KERN_WARNING "FMR %p has ref count %d < 0",
528                        fmr, fmr->ref_count);
529 #endif
530
531         spin_unlock_irqrestore(&pool->pool_lock, flags);
532
533         return 0;
534 }
535 EXPORT_SYMBOL(ib_fmr_pool_unmap);