2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/errno.h>
34 #include <linux/slab.h>
35 #include <linux/bitmap.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/vmalloc.h>
41 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
45 spin_lock(&bitmap->lock);
47 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
48 if (obj >= bitmap->max) {
49 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
50 obj = find_first_zero_bit(bitmap->table, bitmap->max);
53 if (obj < bitmap->max) {
54 set_bit(obj, bitmap->table);
55 bitmap->last = (obj + 1) & (bitmap->max - 1);
60 spin_unlock(&bitmap->lock);
65 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
67 obj &= bitmap->max - 1;
69 spin_lock(&bitmap->lock);
70 clear_bit(obj, bitmap->table);
71 bitmap->last = min(bitmap->last, obj);
72 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
73 spin_unlock(&bitmap->lock);
76 int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved)
80 /* num must be a power of 2 */
81 if (num != roundup_pow_of_two(num))
88 spin_lock_init(&bitmap->lock);
89 bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL);
93 for (i = 0; i < reserved; ++i)
94 set_bit(i, bitmap->table);
99 void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
101 kfree(bitmap->table);
105 * Handling for queue buffers -- we allocate a bunch of memory and
106 * register it in a memory region at HCA virtual address 0. If the
107 * requested size is > max_direct, we split the allocation into
108 * multiple pages, so we don't require too much contiguous memory.
111 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
112 struct mlx4_buf *buf)
116 if (size <= max_direct) {
119 buf->page_shift = get_order(size) + PAGE_SHIFT;
120 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
121 size, &t, GFP_KERNEL);
122 if (!buf->direct.buf)
127 while (t & ((1 << buf->page_shift) - 1)) {
132 memset(buf->direct.buf, 0, size);
136 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
137 buf->npages = buf->nbufs;
138 buf->page_shift = PAGE_SHIFT;
139 buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list,
144 for (i = 0; i < buf->nbufs; ++i) {
145 buf->page_list[i].buf =
146 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
148 if (!buf->page_list[i].buf)
151 buf->page_list[i].map = t;
153 memset(buf->page_list[i].buf, 0, PAGE_SIZE);
156 if (BITS_PER_LONG == 64) {
158 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
161 for (i = 0; i < buf->nbufs; ++i)
162 pages[i] = virt_to_page(buf->page_list[i].buf);
163 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
165 if (!buf->direct.buf)
173 mlx4_buf_free(dev, size, buf);
177 EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
179 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
184 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
187 if (BITS_PER_LONG == 64)
188 vunmap(buf->direct.buf);
190 for (i = 0; i < buf->nbufs; ++i)
191 if (buf->page_list[i].buf)
192 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
193 buf->page_list[i].buf,
194 buf->page_list[i].map);
195 kfree(buf->page_list);
198 EXPORT_SYMBOL_GPL(mlx4_buf_free);
200 static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
202 struct mlx4_db_pgdir *pgdir;
204 pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
208 bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
209 pgdir->bits[0] = pgdir->order0;
210 pgdir->bits[1] = pgdir->order1;
211 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
212 &pgdir->db_dma, GFP_KERNEL);
213 if (!pgdir->db_page) {
221 static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
222 struct mlx4_db *db, int order)
227 for (o = order; o <= 1; ++o) {
228 i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
229 if (i < MLX4_DB_PER_PAGE >> o)
236 clear_bit(i, pgdir->bits[o]);
241 set_bit(i ^ 1, pgdir->bits[order]);
245 db->db = pgdir->db_page + db->index;
246 db->dma = pgdir->db_dma + db->index * 4;
252 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
254 struct mlx4_priv *priv = mlx4_priv(dev);
255 struct mlx4_db_pgdir *pgdir;
258 mutex_lock(&priv->pgdir_mutex);
260 list_for_each_entry(pgdir, &priv->pgdir_list, list)
261 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
264 pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
270 list_add(&pgdir->list, &priv->pgdir_list);
272 /* This should never fail -- we just allocated an empty page: */
273 WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
276 mutex_unlock(&priv->pgdir_mutex);
280 EXPORT_SYMBOL_GPL(mlx4_db_alloc);
282 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
284 struct mlx4_priv *priv = mlx4_priv(dev);
288 mutex_lock(&priv->pgdir_mutex);
293 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
294 clear_bit(i ^ 1, db->u.pgdir->order0);
298 set_bit(i, db->u.pgdir->bits[o]);
300 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
301 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
302 db->u.pgdir->db_page, db->u.pgdir->db_dma);
303 list_del(&db->u.pgdir->list);
307 mutex_unlock(&priv->pgdir_mutex);
309 EXPORT_SYMBOL_GPL(mlx4_db_free);
311 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
312 int size, int max_direct)
316 err = mlx4_db_alloc(dev, &wqres->db, 1);
322 err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
326 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
331 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
338 mlx4_mtt_cleanup(dev, &wqres->mtt);
340 mlx4_buf_free(dev, size, &wqres->buf);
342 mlx4_db_free(dev, &wqres->db);
346 EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
348 void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
351 mlx4_mtt_cleanup(dev, &wqres->mtt);
352 mlx4_buf_free(dev, size, &wqres->buf);
353 mlx4_db_free(dev, &wqres->db);
355 EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);