2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/scatterlist.h>
39 #include <linux/sched.h>
43 #include "mthca_memfree.h"
44 #include "mthca_dev.h"
45 #include "mthca_cmd.h"
48 * We allocate in as big chunks as we can, up to a maximum of 256 KB
52 MTHCA_ICM_ALLOC_SIZE = 1 << 18,
53 MTHCA_TABLE_CHUNK_SIZE = 1 << 18
56 struct mthca_user_db_table {
60 struct scatterlist mem;
65 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
70 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
71 PCI_DMA_BIDIRECTIONAL);
73 for (i = 0; i < chunk->npages; ++i)
74 __free_pages(chunk->mem[i].page,
75 get_order(chunk->mem[i].length));
78 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
82 for (i = 0; i < chunk->npages; ++i) {
83 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
84 lowmem_page_address(chunk->mem[i].page),
85 sg_dma_address(&chunk->mem[i]));
89 void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
91 struct mthca_icm_chunk *chunk, *tmp;
96 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
98 mthca_free_icm_coherent(dev, chunk);
100 mthca_free_icm_pages(dev, chunk);
108 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
110 mem->page = alloc_pages(gfp_mask, order);
114 mem->length = PAGE_SIZE << order;
119 static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
120 int order, gfp_t gfp_mask)
122 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),
127 sg_set_buf(mem, buf, PAGE_SIZE << order);
129 sg_dma_len(mem) = PAGE_SIZE << order;
133 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
134 gfp_t gfp_mask, int coherent)
136 struct mthca_icm *icm;
137 struct mthca_icm_chunk *chunk = NULL;
141 /* We use sg_set_buf for coherent allocs, which assumes low memory */
142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
144 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
149 INIT_LIST_HEAD(&icm->chunk_list);
151 cur_order = get_order(MTHCA_ICM_ALLOC_SIZE);
155 chunk = kmalloc(sizeof *chunk,
156 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
162 list_add_tail(&chunk->list, &icm->chunk_list);
165 while (1 << cur_order > npages)
169 ret = mthca_alloc_icm_coherent(&dev->pdev->dev,
170 &chunk->mem[chunk->npages],
171 cur_order, gfp_mask);
173 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
174 cur_order, gfp_mask);
181 else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
182 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
184 PCI_DMA_BIDIRECTIONAL);
190 if (chunk->npages == MTHCA_ICM_CHUNK_LEN)
193 npages -= 1 << cur_order;
201 if (!coherent && chunk) {
202 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
204 PCI_DMA_BIDIRECTIONAL);
213 mthca_free_icm(dev, icm, coherent);
217 int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
219 int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
223 mutex_lock(&table->mutex);
226 ++table->icm[i]->refcount;
230 table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
231 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
232 __GFP_NOWARN, table->coherent);
233 if (!table->icm[i]) {
238 if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
239 &status) || status) {
240 mthca_free_icm(dev, table->icm[i], table->coherent);
241 table->icm[i] = NULL;
246 ++table->icm[i]->refcount;
249 mutex_unlock(&table->mutex);
253 void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
258 if (!mthca_is_memfree(dev))
261 i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
263 mutex_lock(&table->mutex);
265 if (--table->icm[i]->refcount == 0) {
266 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
267 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
269 mthca_free_icm(dev, table->icm[i], table->coherent);
270 table->icm[i] = NULL;
273 mutex_unlock(&table->mutex);
276 void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle)
278 int idx, offset, dma_offset, i;
279 struct mthca_icm_chunk *chunk;
280 struct mthca_icm *icm;
281 struct page *page = NULL;
286 mutex_lock(&table->mutex);
288 idx = (obj & (table->num_obj - 1)) * table->obj_size;
289 icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
290 dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE;
295 list_for_each_entry(chunk, &icm->chunk_list, list) {
296 for (i = 0; i < chunk->npages; ++i) {
297 if (dma_handle && dma_offset >= 0) {
298 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
299 *dma_handle = sg_dma_address(&chunk->mem[i]) +
301 dma_offset -= sg_dma_len(&chunk->mem[i]);
303 /* DMA mapping can merge pages but not split them,
304 * so if we found the page, dma_handle has already
305 * been assigned to. */
306 if (chunk->mem[i].length > offset) {
307 page = chunk->mem[i].page;
310 offset -= chunk->mem[i].length;
315 mutex_unlock(&table->mutex);
316 return page ? lowmem_page_address(page) + offset : NULL;
319 int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
322 int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size;
325 for (i = start; i <= end; i += inc) {
326 err = mthca_table_get(dev, table, i);
336 mthca_table_put(dev, table, i);
342 void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
347 if (!mthca_is_memfree(dev))
350 for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
351 mthca_table_put(dev, table, i);
354 struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
355 u64 virt, int obj_size,
356 int nobj, int reserved,
357 int use_lowmem, int use_coherent)
359 struct mthca_icm_table *table;
365 num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE;
367 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
372 table->num_icm = num_icm;
373 table->num_obj = nobj;
374 table->obj_size = obj_size;
375 table->lowmem = use_lowmem;
376 table->coherent = use_coherent;
377 mutex_init(&table->mutex);
379 for (i = 0; i < num_icm; ++i)
380 table->icm[i] = NULL;
382 for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
383 chunk_size = MTHCA_TABLE_CHUNK_SIZE;
384 if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size)
385 chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE;
387 table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
388 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
389 __GFP_NOWARN, use_coherent);
392 if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE,
393 &status) || status) {
394 mthca_free_icm(dev, table->icm[i], table->coherent);
395 table->icm[i] = NULL;
400 * Add a reference to this ICM chunk so that it never
401 * gets freed (since it contains reserved firmware objects).
403 ++table->icm[i]->refcount;
409 for (i = 0; i < num_icm; ++i)
411 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
412 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
414 mthca_free_icm(dev, table->icm[i], table->coherent);
422 void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
427 for (i = 0; i < table->num_icm; ++i)
429 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
430 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
432 mthca_free_icm(dev, table->icm[i], table->coherent);
438 static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page)
440 return dev->uar_table.uarc_base +
441 uar->index * dev->uar_table.uarc_size +
442 page * MTHCA_ICM_PAGE_SIZE;
445 int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
446 struct mthca_user_db_table *db_tab, int index, u64 uaddr)
452 if (!mthca_is_memfree(dev))
455 if (index < 0 || index > dev->uar_table.uarc_size / 8)
458 mutex_lock(&db_tab->mutex);
460 i = index / MTHCA_DB_REC_PER_PAGE;
462 if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE) ||
463 (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) ||
469 if (db_tab->page[i].refcount) {
470 ++db_tab->page[i].refcount;
474 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
475 &db_tab->page[i].mem.page, NULL);
479 db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
480 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
482 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
484 put_page(db_tab->page[i].mem.page);
488 ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
489 mthca_uarc_virt(dev, uar, i), &status);
493 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
494 put_page(db_tab->page[i].mem.page);
498 db_tab->page[i].uvirt = uaddr;
499 db_tab->page[i].refcount = 1;
502 mutex_unlock(&db_tab->mutex);
506 void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
507 struct mthca_user_db_table *db_tab, int index)
509 if (!mthca_is_memfree(dev))
513 * To make our bookkeeping simpler, we don't unmap DB
514 * pages until we clean up the whole db table.
517 mutex_lock(&db_tab->mutex);
519 --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
521 mutex_unlock(&db_tab->mutex);
524 struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
526 struct mthca_user_db_table *db_tab;
530 if (!mthca_is_memfree(dev))
533 npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
534 db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
536 return ERR_PTR(-ENOMEM);
538 mutex_init(&db_tab->mutex);
539 for (i = 0; i < npages; ++i) {
540 db_tab->page[i].refcount = 0;
541 db_tab->page[i].uvirt = 0;
547 void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
548 struct mthca_user_db_table *db_tab)
553 if (!mthca_is_memfree(dev))
556 for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
557 if (db_tab->page[i].uvirt) {
558 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
559 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
560 put_page(db_tab->page[i].mem.page);
567 int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
573 struct mthca_db_page *page;
577 mutex_lock(&dev->db_tab->mutex);
580 case MTHCA_DB_TYPE_CQ_ARM:
581 case MTHCA_DB_TYPE_SQ:
584 end = dev->db_tab->max_group1;
588 case MTHCA_DB_TYPE_CQ_SET_CI:
589 case MTHCA_DB_TYPE_RQ:
590 case MTHCA_DB_TYPE_SRQ:
592 start = dev->db_tab->npages - 1;
593 end = dev->db_tab->min_group2;
602 for (i = start; i != end; i += dir)
603 if (dev->db_tab->page[i].db_rec &&
604 !bitmap_full(dev->db_tab->page[i].used,
605 MTHCA_DB_REC_PER_PAGE)) {
606 page = dev->db_tab->page + i;
610 for (i = start; i != end; i += dir)
611 if (!dev->db_tab->page[i].db_rec) {
612 page = dev->db_tab->page + i;
616 if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
622 ++dev->db_tab->max_group1;
624 --dev->db_tab->min_group2;
626 page = dev->db_tab->page + end;
629 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
630 &page->mapping, GFP_KERNEL);
635 memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
637 ret = mthca_MAP_ICM_page(dev, page->mapping,
638 mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
642 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
643 page->db_rec, page->mapping);
647 bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
650 j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
651 set_bit(j, page->used);
654 j = MTHCA_DB_REC_PER_PAGE - 1 - j;
656 ret = i * MTHCA_DB_REC_PER_PAGE + j;
658 page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
660 *db = (__be32 *) &page->db_rec[j];
663 mutex_unlock(&dev->db_tab->mutex);
668 void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
671 struct mthca_db_page *page;
674 i = db_index / MTHCA_DB_REC_PER_PAGE;
675 j = db_index % MTHCA_DB_REC_PER_PAGE;
677 page = dev->db_tab->page + i;
679 mutex_lock(&dev->db_tab->mutex);
682 if (i >= dev->db_tab->min_group2)
683 j = MTHCA_DB_REC_PER_PAGE - 1 - j;
684 clear_bit(j, page->used);
686 if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
687 i >= dev->db_tab->max_group1 - 1) {
688 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
690 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
691 page->db_rec, page->mapping);
694 if (i == dev->db_tab->max_group1) {
695 --dev->db_tab->max_group1;
696 /* XXX may be able to unmap more pages now */
698 if (i == dev->db_tab->min_group2)
699 ++dev->db_tab->min_group2;
702 mutex_unlock(&dev->db_tab->mutex);
705 int mthca_init_db_tab(struct mthca_dev *dev)
709 if (!mthca_is_memfree(dev))
712 dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
716 mutex_init(&dev->db_tab->mutex);
718 dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
719 dev->db_tab->max_group1 = 0;
720 dev->db_tab->min_group2 = dev->db_tab->npages - 1;
722 dev->db_tab->page = kmalloc(dev->db_tab->npages *
723 sizeof *dev->db_tab->page,
725 if (!dev->db_tab->page) {
730 for (i = 0; i < dev->db_tab->npages; ++i)
731 dev->db_tab->page[i].db_rec = NULL;
736 void mthca_cleanup_db_tab(struct mthca_dev *dev)
741 if (!mthca_is_memfree(dev))
745 * Because we don't always free our UARC pages when they
746 * become empty to make mthca_free_db() simpler we need to
747 * make a sweep through the doorbell pages and free any
748 * leftover pages now.
750 for (i = 0; i < dev->db_tab->npages; ++i) {
751 if (!dev->db_tab->page[i].db_rec)
754 if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
755 mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
757 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
759 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
760 dev->db_tab->page[i].db_rec,
761 dev->db_tab->page[i].mapping);
764 kfree(dev->db_tab->page);