2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/pci.h>
35 #include <linux/poll.h>
36 #include <linux/cdev.h>
37 #include <linux/swap.h>
38 #include <linux/vmalloc.h>
39 #include <asm/pgtable.h>
41 #include "ipath_kernel.h"
42 #include "ips_common.h"
43 #include "ipath_layer.h"
45 static int ipath_open(struct inode *, struct file *);
46 static int ipath_close(struct inode *, struct file *);
47 static ssize_t ipath_write(struct file *, const char __user *, size_t,
49 static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
50 static int ipath_mmap(struct file *, struct vm_area_struct *);
52 static struct file_operations ipath_file_ops = {
56 .release = ipath_close,
61 static int ipath_get_base_info(struct ipath_portdata *pd,
62 void __user *ubase, size_t ubase_size)
65 struct ipath_base_info *kinfo = NULL;
66 struct ipath_devdata *dd = pd->port_dd;
68 if (ubase_size < sizeof(*kinfo)) {
70 "Base size %lu, need %lu (version mismatch?)\n",
71 (unsigned long) ubase_size,
72 (unsigned long) sizeof(*kinfo));
77 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
83 ret = dd->ipath_f_get_base_info(pd, kinfo);
87 kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt;
88 kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize;
89 kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt;
90 kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize;
92 * have to mmap whole thing
94 kinfo->spi_rcv_egrbuftotlen =
95 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
96 kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk;
97 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
98 pd->port_rcvegrbuf_chunks;
99 kinfo->spi_tidcnt = dd->ipath_rcvtidcnt;
101 * for this use, may be ipath_cfgports summed over all chips that
102 * are are configured and present
104 kinfo->spi_nports = dd->ipath_cfgports;
105 /* unit (chip/board) our port is on */
106 kinfo->spi_unit = dd->ipath_unit;
107 /* for now, only a single page */
108 kinfo->spi_tid_maxsize = PAGE_SIZE;
111 * Doing this per port, and based on the skip value, etc. This has
112 * to be the actual buffer size, since the protocol code treats it
115 * These have to be set to user addresses in the user code via mmap.
116 * These values are used on return to user code for the mmap target
117 * addresses only. For 32 bit, same 44 bit address problem, so use
118 * the physical address, not virtual. Before 2.6.11, using the
119 * page_address() macro worked, but in 2.6.11, even that returns the
120 * full 64 bit address (upper bits all 1's). So far, using the
121 * physical addresses (or chip offsets, for chip mapping) works, but
122 * no doubt some future kernel release will chang that, and we'll be
123 * on to yet another method of dealing with this
125 kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
126 kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
127 kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
128 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
129 (void *) dd->ipath_statusp -
130 (void *) dd->ipath_pioavailregs_dma;
131 kinfo->spi_piobufbase = (u64) pd->port_piobufs;
132 kinfo->__spi_uregbase =
133 dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
135 kinfo->spi_pioindex = dd->ipath_pbufsport * (pd->port_port - 1);
136 kinfo->spi_piocnt = dd->ipath_pbufsport;
137 kinfo->spi_pioalign = dd->ipath_palign;
139 kinfo->spi_qpair = IPATH_KD_QP;
140 kinfo->spi_piosize = dd->ipath_ibmaxlen;
141 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
142 kinfo->spi_port = pd->port_port;
143 kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
144 kinfo->spi_hw_version = dd->ipath_revision;
146 if (copy_to_user(ubase, kinfo, sizeof(*kinfo)))
155 * ipath_tid_update - update a port TID
157 * @ti: the TID information
159 * The new implementation as of Oct 2004 is that the driver assigns
160 * the tid and returns it to the caller. To make it easier to
161 * catch bugs, and to reduce search time, we keep a cursor for
162 * each port, walking the shadow tid array to find one that's not
165 * For now, if we can't allocate the full list, we fail, although
166 * in the long run, we'll allocate as many as we can, and the
167 * caller will deal with that by trying the remaining pages later.
168 * That means that when we fail, we have to mark the tids as not in
169 * use again, in our shadow copy.
171 * It's up to the caller to free the tids when they are done.
172 * We'll unlock the pages as they free them.
174 * Also, right now we are locking one page at a time, but since
175 * the intended use of this routine is for a single group of
176 * virtually contiguous pages, that should change to improve
179 static int ipath_tid_update(struct ipath_portdata *pd,
180 const struct ipath_tid_info *ti)
183 u32 tid, porttid, cnt, i, tidcnt;
185 struct ipath_devdata *dd = pd->port_dd;
188 u64 __iomem *tidbase;
189 unsigned long tidmap[8];
190 struct page **pagep = NULL;
192 if (!dd->ipath_pageshadow) {
199 ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n",
200 (unsigned long long) ti->tidlist);
202 * Should we treat as success? likely a bug
207 tidcnt = dd->ipath_rcvtidcnt;
209 /* make sure it all fits in port_tid_pg_list */
210 dev_info(&dd->pcidev->dev, "Process tried to allocate %u "
211 "TIDs, only trying max (%u)\n", cnt, tidcnt);
214 pagep = (struct page **)pd->port_tid_pg_list;
215 tidlist = (u16 *) (&pagep[cnt]);
217 memset(tidmap, 0, sizeof(tidmap));
218 tid = pd->port_tidcursor;
219 /* before decrement; chip actual # */
220 porttid = pd->port_port * tidcnt;
222 tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
223 dd->ipath_rcvtidbase +
224 porttid * sizeof(*tidbase));
226 ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n",
227 pd->port_port, cnt, tid, tidbase);
229 /* virtual address of first page in transfer */
230 vaddr = ti->tidvaddr;
231 if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
233 ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n",
238 ret = ipath_get_user_pages(vaddr, cnt, pagep);
241 ipath_dbg("Failed to lock addr %p, %u pages "
242 "(already locked)\n",
243 (void *) vaddr, cnt);
245 * for now, continue, and see what happens but with
246 * the new implementation, this should never happen,
247 * unless perhaps the user has mpin'ed the pages
248 * themselves (something we need to test)
252 dev_info(&dd->pcidev->dev,
253 "Failed to lock addr %p, %u pages: "
254 "errno %d\n", (void *) vaddr, cnt, -ret);
258 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
259 for (; ntids--; tid++) {
262 if (!dd->ipath_pageshadow[porttid + tid])
267 * oops, wrapped all the way through their TIDs,
268 * and didn't have enough free; see comments at
271 ipath_dbg("Not enough free TIDs for %u pages "
272 "(index %d), failing\n", cnt, i);
273 i--; /* last tidlist[i] not filled in */
278 ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, "
279 "vaddr %lx\n", i, tid, vaddr);
280 /* we "know" system pages and TID pages are same size */
281 dd->ipath_pageshadow[porttid + tid] = pagep[i];
283 * don't need atomic or it's overhead
285 __set_bit(tid, tidmap);
286 physaddr = page_to_phys(pagep[i]);
287 ipath_stats.sps_pagelocks++;
289 "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
290 tid, vaddr, (unsigned long long) physaddr,
292 dd->ipath_f_put_tid(dd, &tidbase[tid], 1, physaddr);
294 * don't check this tid in ipath_portshadow, since we
295 * just filled it in; start with the next one.
303 /* jump here if copy out of updated info failed... */
304 ipath_dbg("After failure (ret=%d), undo %d of %d entries\n",
306 /* same code that's in ipath_free_tid() */
307 limit = sizeof(tidmap) * BITS_PER_BYTE;
309 /* just in case size changes in future */
311 tid = find_first_bit((const unsigned long *)tidmap, limit);
312 for (; tid < limit; tid++) {
313 if (!test_bit(tid, tidmap))
315 if (dd->ipath_pageshadow[porttid + tid]) {
316 ipath_cdbg(VERBOSE, "Freeing TID %u\n",
318 dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
319 dd->ipath_tidinvalid);
320 dd->ipath_pageshadow[porttid + tid] = NULL;
321 ipath_stats.sps_pageunlocks++;
324 ipath_release_user_pages(pagep, cnt);
327 * Copy the updated array, with ipath_tid's filled in, back
328 * to user. Since we did the copy in already, this "should
329 * never fail" If it does, we have to clean up...
331 if (copy_to_user((void __user *)
332 (unsigned long) ti->tidlist,
333 tidlist, cnt * sizeof(*tidlist))) {
337 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
338 tidmap, sizeof tidmap)) {
344 pd->port_tidcursor = tid;
349 ipath_dbg("Failed to map %u TID pages, failing with %d\n",
355 * ipath_tid_free - free a port TID
359 * right now we are unlocking one page at a time, but since
360 * the intended use of this routine is for a single group of
361 * virtually contiguous pages, that should change to improve
362 * performance. We check that the TID is in range for this port
363 * but otherwise don't check validity; if user has an error and
364 * frees the wrong tid, it's only their own data that can thereby
365 * be corrupted. We do check that the TID was in use, for sanity
366 * We always use our idea of the saved address, not the address that
367 * they pass in to us.
370 static int ipath_tid_free(struct ipath_portdata *pd,
371 const struct ipath_tid_info *ti)
374 u32 tid, porttid, cnt, limit, tidcnt;
375 struct ipath_devdata *dd = pd->port_dd;
376 u64 __iomem *tidbase;
377 unsigned long tidmap[8];
379 if (!dd->ipath_pageshadow) {
384 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
390 porttid = pd->port_port * dd->ipath_rcvtidcnt;
391 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
392 dd->ipath_rcvtidbase +
393 porttid * sizeof(*tidbase));
395 tidcnt = dd->ipath_rcvtidcnt;
396 limit = sizeof(tidmap) * BITS_PER_BYTE;
398 /* just in case size changes in future */
400 tid = find_first_bit(tidmap, limit);
401 ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) "
402 "set is %d, porttid %u\n", pd->port_port, ti->tidcnt,
403 limit, tid, porttid);
404 for (cnt = 0; tid < limit; tid++) {
406 * small optimization; if we detect a run of 3 or so without
407 * any set, use find_first_bit again. That's mainly to
408 * accelerate the case where we wrapped, so we have some at
409 * the beginning, and some at the end, and a big gap
412 if (!test_bit(tid, tidmap))
415 if (dd->ipath_pageshadow[porttid + tid]) {
416 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
418 dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
419 dd->ipath_tidinvalid);
420 ipath_release_user_pages(
421 &dd->ipath_pageshadow[porttid + tid], 1);
422 dd->ipath_pageshadow[porttid + tid] = NULL;
423 ipath_stats.sps_pageunlocks++;
425 ipath_dbg("Unused tid %u, ignoring\n", tid);
427 if (cnt != ti->tidcnt)
428 ipath_dbg("passed in tidcnt %d, only %d bits set in map\n",
432 ipath_dbg("Failed to unmap %u TID pages, failing with %d\n",
438 * ipath_set_part_key - set a partition key
442 * We can have up to 4 active at a time (other than the default, which is
443 * always allowed). This is somewhat tricky, since multiple ports may set
444 * the same key, so we reference count them, and clean up at exit. All 4
445 * partition keys are packed into a single infinipath register. It's an
446 * error for a process to set the same pkey multiple times. We provide no
447 * mechanism to de-allocate a pkey at this time, we may eventually need to
448 * do that. I've used the atomic operations, and no locking, and only make
449 * a single pass through what's available. This should be more than
450 * adequate for some time. I'll think about spinlocks or the like if and as
453 static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
455 struct ipath_devdata *dd = pd->port_dd;
456 int i, any = 0, pidx = -1;
457 u16 lkey = key & 0x7FFF;
460 if (lkey == (IPS_DEFAULT_P_KEY & 0x7FFF)) {
461 /* nothing to do; this key always valid */
466 ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys "
467 "%hx:%x %hx:%x %hx:%x %hx:%x\n",
468 pd->port_port, key, dd->ipath_pkeys[0],
469 atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1],
470 atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2],
471 atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3],
472 atomic_read(&dd->ipath_pkeyrefs[3]));
475 ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n",
482 * Set the full membership bit, because it has to be
483 * set in the register or the packet, and it seems
484 * cleaner to set in the register than to force all
485 * callers to set it. (see bug 4331)
489 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
490 if (!pd->port_pkeys[i] && pidx == -1)
492 if (pd->port_pkeys[i] == key) {
493 ipath_cdbg(VERBOSE, "p%u tries to set same pkey "
494 "(%x) more than once\n",
501 ipath_dbg("All pkeys for port %u already in use, "
502 "can't set %x\n", pd->port_port, key);
506 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
507 if (!dd->ipath_pkeys[i]) {
511 if (dd->ipath_pkeys[i] == key) {
512 atomic_t *pkrefs = &dd->ipath_pkeyrefs[i];
514 if (atomic_inc_return(pkrefs) > 1) {
515 pd->port_pkeys[pidx] = key;
516 ipath_cdbg(VERBOSE, "p%u set key %x "
517 "matches #%d, count now %d\n",
518 pd->port_port, key, i,
519 atomic_read(pkrefs));
524 * lost race, decrement count, catch below
527 ipath_cdbg(VERBOSE, "Lost race, count was "
528 "0, after dec, it's %d\n",
529 atomic_read(pkrefs));
533 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
535 * It makes no sense to have both the limited and
536 * full membership PKEY set at the same time since
537 * the unlimited one will disable the limited one.
544 ipath_dbg("port %u, all pkeys already in use, "
545 "can't set %x\n", pd->port_port, key);
549 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
550 if (!dd->ipath_pkeys[i] &&
551 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
554 /* for ipathstats, etc. */
555 ipath_stats.sps_pkeys[i] = lkey;
556 pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key;
558 (u64) dd->ipath_pkeys[0] |
559 ((u64) dd->ipath_pkeys[1] << 16) |
560 ((u64) dd->ipath_pkeys[2] << 32) |
561 ((u64) dd->ipath_pkeys[3] << 48);
562 ipath_cdbg(PROC, "p%u set key %x in #%d, "
563 "portidx %d, new pkey reg %llx\n",
564 pd->port_port, key, i, pidx,
565 (unsigned long long) pkey);
567 dd, dd->ipath_kregs->kr_partitionkey, pkey);
573 ipath_dbg("port %u, all pkeys already in use 2nd pass, "
574 "can't set %x\n", pd->port_port, key);
582 * ipath_manage_rcvq - manage a port's receive queue
584 * @start_stop: action to carry out
586 * start_stop == 0 disables receive on the port, for use in queue
587 * overflow conditions. start_stop==1 re-enables, to be used to
588 * re-init the software copy of the head register
590 static int ipath_manage_rcvq(struct ipath_portdata *pd, int start_stop)
592 struct ipath_devdata *dd = pd->port_dd;
595 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u\n",
596 start_stop ? "en" : "dis", dd->ipath_unit,
598 /* atomically clear receive enable port. */
601 * On enable, force in-memory copy of the tail register to
602 * 0, so that protocol code doesn't have to worry about
603 * whether or not the chip has yet updated the in-memory
604 * copy or not on return from the system call. The chip
605 * always resets it's tail register back to 0 on a
606 * transition from disabled to enabled. This could cause a
607 * problem if software was broken, and did the enable w/o
608 * the disable, but eventually the in-memory copy will be
609 * updated and correct itself, even in the face of software
612 *pd->port_rcvhdrtail_kvaddr = 0;
613 set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
616 clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
618 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
620 /* now be sure chip saw it before we return */
621 tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
624 * And try to be sure that tail reg update has happened too.
625 * This should in theory interlock with the RXE changes to
626 * the tail register. Don't assign it to the tail register
627 * in memory copy, since we could overwrite an update by the
630 tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
632 /* always; new head should be equal to new tail; see above */
636 static void ipath_clean_part_key(struct ipath_portdata *pd,
637 struct ipath_devdata *dd)
639 int i, j, pchanged = 0;
642 /* for debugging only */
643 oldpkey = (u64) dd->ipath_pkeys[0] |
644 ((u64) dd->ipath_pkeys[1] << 16) |
645 ((u64) dd->ipath_pkeys[2] << 32) |
646 ((u64) dd->ipath_pkeys[3] << 48);
648 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
649 if (!pd->port_pkeys[i])
651 ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i,
653 for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) {
654 /* check for match independent of the global bit */
655 if ((dd->ipath_pkeys[j] & 0x7fff) !=
656 (pd->port_pkeys[i] & 0x7fff))
658 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) {
659 ipath_cdbg(VERBOSE, "p%u clear key "
662 pd->port_pkeys[i], j);
663 ipath_stats.sps_pkeys[j] =
664 dd->ipath_pkeys[j] = 0;
668 VERBOSE, "p%u key %x matches #%d, "
669 "but ref still %d\n", pd->port_port,
670 pd->port_pkeys[i], j,
671 atomic_read(&dd->ipath_pkeyrefs[j]));
674 pd->port_pkeys[i] = 0;
677 u64 pkey = (u64) dd->ipath_pkeys[0] |
678 ((u64) dd->ipath_pkeys[1] << 16) |
679 ((u64) dd->ipath_pkeys[2] << 32) |
680 ((u64) dd->ipath_pkeys[3] << 48);
681 ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, "
682 "new pkey reg %llx\n", pd->port_port,
683 (unsigned long long) oldpkey,
684 (unsigned long long) pkey);
685 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
691 * ipath_create_user_egr - allocate eager TID buffers
692 * @pd: the port to allocate TID buffers for
694 * This routine is now quite different for user and kernel, because
695 * the kernel uses skb's, for the accelerated network performance
696 * This is the user port version
698 * Allocate the eager TID buffers and program them into infinipath
699 * They are no longer completely contiguous, we do multiple allocation
702 static int ipath_create_user_egr(struct ipath_portdata *pd)
704 struct ipath_devdata *dd = pd->port_dd;
705 unsigned e, egrcnt, alloced, egrperchunk, chunk, egrsize, egroff;
709 egrcnt = dd->ipath_rcvegrcnt;
710 /* TID number offset for this port */
711 egroff = pd->port_port * egrcnt;
712 egrsize = dd->ipath_rcvegrbufsize;
713 ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
714 "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
717 * to avoid wasting a lot of memory, we allocate 32KB chunks of
718 * physically contiguous memory, advance through it until used up
719 * and then allocate more. Of course, we need memory to store those
720 * extra pointers, now. Started out with 256KB, but under heavy
721 * memory pressure (creating large files and then copying them over
722 * NFS while doing lots of MPI jobs), we hit some allocation
723 * failures, even though we can sleep... (2.6.10) Still get
724 * failures at 64K. 32K is the lowest we can go without waiting
725 * more memory again. It seems likely that the coalescing in
726 * free_pages, etc. still has issues (as it has had previously
727 * during 2.6.x development).
730 alloced = ALIGN(egrsize * egrcnt, size);
731 egrperchunk = size / egrsize;
732 chunk = (egrcnt + egrperchunk - 1) / egrperchunk;
733 pd->port_rcvegrbuf_chunks = chunk;
734 pd->port_rcvegrbufs_perchunk = egrperchunk;
735 pd->port_rcvegrbuf_size = size;
736 pd->port_rcvegrbuf = vmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]));
737 if (!pd->port_rcvegrbuf) {
741 pd->port_rcvegrbuf_phys =
742 vmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]));
743 if (!pd->port_rcvegrbuf_phys) {
747 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
749 * GFP_USER, but without GFP_FS, so buffer cache can be
750 * coalesced (we hope); otherwise, even at order 4,
751 * heavy filesystem activity makes these fail
753 gfp_t gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
755 pd->port_rcvegrbuf[e] = dma_alloc_coherent(
756 &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
759 if (!pd->port_rcvegrbuf[e]) {
761 goto bail_rcvegrbuf_phys;
765 pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0];
767 for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) {
768 dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk];
771 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
772 dd->ipath_f_put_tid(dd, e + egroff +
776 dd->ipath_rcvegrbase), 0, pa);
779 cond_resched(); /* don't hog the cpu */
786 for (e = 0; e < pd->port_rcvegrbuf_chunks &&
787 pd->port_rcvegrbuf[e]; e++)
788 dma_free_coherent(&dd->pcidev->dev, size,
789 pd->port_rcvegrbuf[e],
790 pd->port_rcvegrbuf_phys[e]);
792 vfree(pd->port_rcvegrbuf_phys);
793 pd->port_rcvegrbuf_phys = NULL;
795 vfree(pd->port_rcvegrbuf);
796 pd->port_rcvegrbuf = NULL;
801 static int ipath_do_user_init(struct ipath_portdata *pd,
802 const struct ipath_user_info *uinfo)
805 struct ipath_devdata *dd = pd->port_dd;
806 u64 physaddr, uaddr, off, atmp;
811 /* for now, if major version is different, bail */
812 if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) {
813 dev_info(&dd->pcidev->dev,
814 "User major version %d not same as driver "
815 "major %d\n", uinfo->spu_userversion >> 16,
821 if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR)
822 ipath_dbg("User minor version %d not same as driver "
823 "minor %d\n", uinfo->spu_userversion & 0xffff,
826 if (uinfo->spu_rcvhdrsize) {
827 ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
832 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
834 /* set up for the rcvhdr Q tail register writeback to user memory */
835 if (!uinfo->spu_rcvhdraddr ||
836 !access_ok(VERIFY_WRITE, (u64 __user *) (unsigned long)
837 uinfo->spu_rcvhdraddr, sizeof(u64))) {
838 ipath_dbg("Port %d rcvhdrtail addr %llx not valid\n",
840 (unsigned long long) uinfo->spu_rcvhdraddr);
845 off = offset_in_page(uinfo->spu_rcvhdraddr);
846 uaddr = PAGE_MASK & (unsigned long) uinfo->spu_rcvhdraddr;
847 ret = ipath_get_user_pages_nocopy(uaddr, &pagep);
849 dev_info(&dd->pcidev->dev, "Failed to lookup and lock "
850 "address %llx for rcvhdrtail: errno %d\n",
851 (unsigned long long) uinfo->spu_rcvhdraddr, -ret);
854 ipath_stats.sps_pagelocks++;
855 pd->port_rcvhdrtail_uaddr = uaddr;
856 pd->port_rcvhdrtail_pagep = pagep;
857 pd->port_rcvhdrtail_kvaddr =
859 pd->port_rcvhdrtail_kvaddr += off;
860 physaddr = page_to_phys(pagep) + off;
861 ipath_cdbg(VERBOSE, "port %d user addr %llx hdrtailaddr, %llx "
862 "physical (off=%llx)\n",
864 (unsigned long long) uinfo->spu_rcvhdraddr,
865 (unsigned long long) physaddr, (unsigned long long) off);
866 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
867 pd->port_port, physaddr);
868 atmp = ipath_read_kreg64_port(dd,
869 dd->ipath_kregs->kr_rcvhdrtailaddr,
871 if (physaddr != atmp) {
873 "Catastrophic software error, "
874 "RcvHdrTailAddr%u written as %llx, "
875 "read back as %llx\n", pd->port_port,
876 (unsigned long long) physaddr,
877 (unsigned long long) atmp);
882 /* for right now, kernel piobufs are at end, so port 1 is at 0 */
883 pd->port_piobufs = dd->ipath_piobufbase +
884 dd->ipath_pbufsport * (pd->port_port -
885 1) * dd->ipath_palign;
886 ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n",
887 pd->port_port, pd->port_piobufs);
890 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
891 * array for time being. If pd->port_port > chip-supported,
892 * we need to do extra stuff here to handle by handling overflow
893 * through port 0, someday
895 ret = ipath_create_rcvhdrq(dd, pd);
897 ret = ipath_create_user_egr(pd);
900 /* enable receives now */
901 /* atomically set enable bit for this port */
902 set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
906 * set the head registers for this port to the current values
907 * of the tail pointers, since we don't know if they were
908 * updated on last use of the port.
910 head32 = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
912 ipath_write_ureg(dd, ur_rcvhdrhead, head, pd->port_port);
913 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
914 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
915 dd->ipath_lastegrheads[pd->port_port] = -1;
916 dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
917 ipath_cdbg(VERBOSE, "Wrote port%d head %llx, egrhead %x from "
918 "tail regs\n", pd->port_port,
919 (unsigned long long) head, head32);
920 pd->port_tidcursor = 0; /* start at beginning after open */
922 * now enable the port; the tail registers will be written to memory
923 * by the chip as soon as it sees the write to
924 * dd->ipath_kregs->kr_rcvctrl. The update only happens on
925 * transition from 0 to 1, so clear it first, then set it as part of
926 * enabling the port. This will (very briefly) affect any other
927 * open ports, but it shouldn't be long enough to be an issue.
929 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
930 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
931 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
938 static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
944 /* it's the real hardware, so io_remap works */
946 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
947 dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
948 "%lx > PAGE\n", vma->vm_end - vma->vm_start);
951 phys = dd->ipath_physaddr + ureg;
952 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
954 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
955 ret = io_remap_pfn_range(vma, vma->vm_start,
957 vma->vm_end - vma->vm_start,
963 static int mmap_piobufs(struct vm_area_struct *vma,
964 struct ipath_devdata *dd,
965 struct ipath_portdata *pd)
971 * When we map the PIO buffers, we want to map them as writeonly, no
975 if ((vma->vm_end - vma->vm_start) >
976 (dd->ipath_pbufsport * dd->ipath_palign)) {
977 dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
978 "reqlen %lx > PAGE\n",
979 vma->vm_end - vma->vm_start);
984 phys = dd->ipath_physaddr + pd->port_piobufs;
986 * Do *NOT* mark this as non-cached (PWT bit), or we don't get the
987 * write combining behavior we want on the PIO buffers!
988 * vma->vm_page_prot =
989 * pgprot_noncached(vma->vm_page_prot);
992 if (vma->vm_flags & VM_READ) {
993 dev_info(&dd->pcidev->dev,
994 "Can't map piobufs as readable (flags=%lx)\n",
1000 /* don't allow them to later change to readable with mprotect */
1002 vma->vm_flags &= ~VM_MAYWRITE;
1003 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
1005 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
1006 vma->vm_end - vma->vm_start,
1012 static int mmap_rcvegrbufs(struct vm_area_struct *vma,
1013 struct ipath_portdata *pd)
1015 struct ipath_devdata *dd = pd->port_dd;
1016 unsigned long start, size;
1017 size_t total_size, i;
1021 if (!pd->port_rcvegrbuf) {
1026 size = pd->port_rcvegrbuf_size;
1027 total_size = pd->port_rcvegrbuf_chunks * size;
1028 if ((vma->vm_end - vma->vm_start) > total_size) {
1029 dev_info(&dd->pcidev->dev, "FAIL on egr bufs: "
1030 "reqlen %lx > actual %lx\n",
1031 vma->vm_end - vma->vm_start,
1032 (unsigned long) total_size);
1037 if (vma->vm_flags & VM_WRITE) {
1038 dev_info(&dd->pcidev->dev, "Can't map eager buffers as "
1039 "writable (flags=%lx)\n", vma->vm_flags);
1044 start = vma->vm_start;
1045 phys = pd->port_rcvegrbuf_phys;
1047 /* don't allow them to later change to writeable with mprotect */
1048 vma->vm_flags &= ~VM_MAYWRITE;
1050 for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
1051 ret = remap_pfn_range(vma, start, phys[i] >> PAGE_SHIFT,
1052 size, vma->vm_page_prot);
1062 static int mmap_rcvhdrq(struct vm_area_struct *vma,
1063 struct ipath_portdata *pd)
1065 struct ipath_devdata *dd = pd->port_dd;
1070 * kmalloc'ed memory, physically contiguous; this is from
1071 * spi_rcvhdr_base; we allow user to map read-write so they can
1072 * write hdrq entries to allow protocol code to directly poll
1073 * whether a hdrq entry has been written.
1075 total_size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1076 sizeof(u32), PAGE_SIZE);
1077 if ((vma->vm_end - vma->vm_start) > total_size) {
1078 dev_info(&dd->pcidev->dev,
1079 "FAIL on rcvhdrq: reqlen %lx > actual %lx\n",
1080 vma->vm_end - vma->vm_start,
1081 (unsigned long) total_size);
1086 ret = remap_pfn_range(vma, vma->vm_start,
1087 pd->port_rcvhdrq_phys >> PAGE_SHIFT,
1088 vma->vm_end - vma->vm_start,
1094 static int mmap_pioavailregs(struct vm_area_struct *vma,
1095 struct ipath_portdata *pd)
1097 struct ipath_devdata *dd = pd->port_dd;
1101 * when we map the PIO bufferavail registers, we want to map them as
1102 * readonly, no write possible.
1104 * kmalloc'ed memory, physically contiguous, one page only, readonly
1107 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
1108 dev_info(&dd->pcidev->dev, "FAIL on pioavailregs_dma: "
1109 "reqlen %lx > actual %lx\n",
1110 vma->vm_end - vma->vm_start,
1111 (unsigned long) PAGE_SIZE);
1116 if (vma->vm_flags & VM_WRITE) {
1117 dev_info(&dd->pcidev->dev,
1118 "Can't map pioavailregs as writable (flags=%lx)\n",
1124 /* don't allow them to later change with mprotect */
1125 vma->vm_flags &= ~VM_MAYWRITE;
1127 ret = remap_pfn_range(vma, vma->vm_start,
1128 dd->ipath_pioavailregs_phys >> PAGE_SHIFT,
1129 PAGE_SIZE, vma->vm_page_prot);
1135 * ipath_mmap - mmap various structures into user space
1136 * @fp: the file pointer
1139 * We use this to have a shared buffer between the kernel and the user code
1140 * for the rcvhdr queue, egr buffers, and the per-port user regs and pio
1141 * buffers in the chip. We have the open and close entries so we can bump
1142 * the ref count and keep the driver from being unloaded while still mapped.
1144 static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1146 struct ipath_portdata *pd;
1147 struct ipath_devdata *dd;
1154 * This is the ipath_do_user_init() code, mapping the shared buffers
1155 * into the user process. The address referred to by vm_pgoff is the
1156 * virtual, not physical, address; we only do one mmap for each
1159 pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1162 * note that ureg does *NOT* have the kregvirt as part of it, to be
1163 * sure that for 32 bit programs, we don't end up trying to map a >
1164 * 44 address. Has to match ipath_get_base_info() code that sets
1168 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
1170 ipath_cdbg(MM, "ushare: pgaddr %llx vm_start=%lx, vmlen %lx\n",
1171 (unsigned long long) pgaddr, vma->vm_start,
1172 vma->vm_end - vma->vm_start);
1175 ret = mmap_ureg(vma, dd, ureg);
1176 else if (pgaddr == pd->port_piobufs)
1177 ret = mmap_piobufs(vma, dd, pd);
1178 else if (pgaddr == (u64) pd->port_rcvegr_phys)
1179 ret = mmap_rcvegrbufs(vma, pd);
1180 else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
1181 ret = mmap_rcvhdrq(vma, pd);
1182 else if (pgaddr == dd->ipath_pioavailregs_phys)
1183 ret = mmap_pioavailregs(vma, pd);
1187 vma->vm_private_data = NULL;
1190 dev_info(&dd->pcidev->dev,
1191 "Failure %d on addr %lx, off %lx\n",
1192 -ret, vma->vm_start, vma->vm_pgoff);
1197 static unsigned int ipath_poll(struct file *fp,
1198 struct poll_table_struct *pt)
1200 struct ipath_portdata *pd;
1203 struct ipath_devdata *dd;
1208 bit = pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT;
1209 set_bit(bit, &dd->ipath_rcvctrl);
1212 * Before blocking, make sure that head is still == tail,
1213 * reading from the chip, so we can be sure the interrupt
1214 * enable has made it to the chip. If not equal, disable
1215 * interrupt again and return immediately. This avoids races,
1216 * and the overhead of the chip read doesn't matter much at
1217 * this point, since we are waiting for something anyway.
1220 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1223 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
1224 tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
1227 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
1228 if(dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
1229 (void)ipath_write_ureg(dd, ur_rcvhdrhead,
1230 dd->ipath_rhdrhead_intr_off
1231 | head, pd->port_port);
1232 poll_wait(fp, &pd->port_wait, pt);
1234 if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
1235 /* timed out, no packets received */
1236 clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
1237 pd->port_rcvwait_to++;
1241 /* it's already happened; don't do wait_event overhead */
1242 pd->port_rcvnowait++;
1245 clear_bit(bit, &dd->ipath_rcvctrl);
1246 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1252 static int try_alloc_port(struct ipath_devdata *dd, int port,
1257 if (!dd->ipath_pd[port]) {
1260 p = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
1263 * Allocate memory for use in ipath_tid_update() just once
1264 * at open, not per call. Reduces cost of expected send
1267 ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) +
1268 dd->ipath_rcvtidcnt * sizeof(struct page **),
1271 ipath_dev_err(dd, "Unable to allocate portdata "
1272 "memory, failing open\n");
1278 dd->ipath_pd[port] = p;
1279 dd->ipath_pd[port]->port_port = port;
1280 dd->ipath_pd[port]->port_dd = dd;
1281 dd->ipath_pd[port]->port_tid_pg_list = ptmp;
1282 init_waitqueue_head(&dd->ipath_pd[port]->port_wait);
1284 if (!dd->ipath_pd[port]->port_cnt) {
1285 dd->ipath_pd[port]->port_cnt = 1;
1286 fp->private_data = (void *) dd->ipath_pd[port];
1287 ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n",
1288 current->comm, current->pid, dd->ipath_unit,
1290 dd->ipath_pd[port]->port_pid = current->pid;
1291 strncpy(dd->ipath_pd[port]->port_comm, current->comm,
1292 sizeof(dd->ipath_pd[port]->port_comm));
1293 ipath_stats.sps_ports++;
1303 static inline int usable(struct ipath_devdata *dd)
1306 (dd->ipath_flags & IPATH_PRESENT) &&
1307 dd->ipath_kregbase &&
1309 !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED
1313 static int find_free_port(int unit, struct file *fp)
1315 struct ipath_devdata *dd = ipath_lookup(unit);
1328 for (i = 0; i < dd->ipath_cfgports; i++) {
1329 ret = try_alloc_port(dd, i, fp);
1339 static int find_best_unit(struct file *fp)
1341 int ret = 0, i, prefunit = -1, devmax;
1342 int maxofallports, npresent, nup;
1345 (void) ipath_count_units(&npresent, &nup, &maxofallports);
1348 * This code is present to allow a knowledgeable person to
1349 * specify the layout of processes to processors before opening
1350 * this driver, and then we'll assign the process to the "closest"
1351 * HT-400 to that processor (we assume reasonable connectivity,
1352 * for now). This code assumes that if affinity has been set
1353 * before this point, that at most one cpu is set; for now this
1354 * is reasonable. I check for both cpus_empty() and cpus_full(),
1355 * in case some kernel variant sets none of the bits when no
1356 * affinity is set. 2.6.11 and 12 kernels have all present
1357 * cpus set. Some day we'll have to fix it up further to handle
1358 * a cpu subset. This algorithm fails for two HT-400's connected
1359 * in tunnel fashion. Eventually this needs real topology
1360 * information. There may be some issues with dual core numbering
1361 * as well. This needs more work prior to release.
1363 if (!cpus_empty(current->cpus_allowed) &&
1364 !cpus_full(current->cpus_allowed)) {
1365 int ncpus = num_online_cpus(), curcpu = -1;
1366 for (i = 0; i < ncpus; i++)
1367 if (cpu_isset(i, current->cpus_allowed)) {
1368 ipath_cdbg(PROC, "%s[%u] affinity set for "
1369 "cpu %d\n", current->comm,
1375 prefunit = curcpu / (ncpus / npresent);
1376 ipath_dbg("%s[%u] %d chips, %d cpus, "
1377 "%d cpus/chip, select unit %d\n",
1378 current->comm, current->pid,
1379 npresent, ncpus, ncpus / npresent,
1386 * user ports start at 1, kernel port is 0
1387 * For now, we do round-robin access across all chips
1391 devmax = prefunit + 1;
1393 devmax = ipath_count_units(NULL, NULL, NULL);
1395 for (i = 1; i < maxofallports; i++) {
1396 for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
1398 struct ipath_devdata *dd = ipath_lookup(ndev);
1401 continue; /* can't use this unit */
1402 if (i >= dd->ipath_cfgports)
1404 * Maxed out on users of this unit. Try
1408 ret = try_alloc_port(dd, i, fp);
1417 ipath_dbg("No ports available (none initialized "
1421 /* if started above 0, retry from 0 */
1423 "%s[%u] no ports on prefunit "
1424 "%d, clear and re-check\n",
1425 current->comm, current->pid,
1427 devmax = ipath_count_units(NULL, NULL,
1433 ipath_dbg("No ports available\n");
1437 ipath_dbg("No boards found\n");
1444 static int ipath_open(struct inode *in, struct file *fp)
1446 int ret, user_minor;
1448 mutex_lock(&ipath_mutex);
1450 user_minor = iminor(in) - IPATH_USER_MINOR_BASE;
1451 ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
1452 (long)in->i_rdev, user_minor);
1455 ret = find_free_port(user_minor - 1, fp);
1457 ret = find_best_unit(fp);
1459 mutex_unlock(&ipath_mutex);
1464 * unlock_exptid - unlock any expected TID entries port still had in use
1467 * We don't actually update the chip here, because we do a bulk update
1468 * below, using ipath_f_clear_tids.
1470 static void unlock_expected_tids(struct ipath_portdata *pd)
1472 struct ipath_devdata *dd = pd->port_dd;
1473 int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt;
1474 int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt;
1476 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
1478 for (i = port_tidbase; i < maxtid; i++) {
1479 if (!dd->ipath_pageshadow[i])
1482 ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i],
1484 dd->ipath_pageshadow[i] = NULL;
1486 ipath_stats.sps_pageunlocks++;
1489 ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n",
1490 pd->port_port, cnt);
1492 if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks)
1493 ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n",
1494 (unsigned long long) ipath_stats.sps_pagelocks,
1495 (unsigned long long)
1496 ipath_stats.sps_pageunlocks);
1499 static int ipath_close(struct inode *in, struct file *fp)
1502 struct ipath_portdata *pd;
1503 struct ipath_devdata *dd;
1506 ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
1507 (long)in->i_rdev, fp->private_data);
1509 mutex_lock(&ipath_mutex);
1512 port = pd->port_port;
1513 fp->private_data = NULL;
1516 if (pd->port_hdrqfull) {
1517 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
1518 "during run\n", pd->port_comm, pd->port_pid,
1520 pd->port_hdrqfull = 0;
1523 if (pd->port_rcvwait_to || pd->port_piowait_to
1524 || pd->port_rcvnowait || pd->port_pionowait) {
1525 ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; "
1526 "%u rcv %u, pio already\n",
1527 pd->port_port, pd->port_rcvwait_to,
1528 pd->port_piowait_to, pd->port_rcvnowait,
1529 pd->port_pionowait);
1530 pd->port_rcvwait_to = pd->port_piowait_to =
1531 pd->port_rcvnowait = pd->port_pionowait = 0;
1533 if (pd->port_flag) {
1534 ipath_dbg("port %u port_flag still set to 0x%lx\n",
1535 pd->port_port, pd->port_flag);
1539 if (dd->ipath_kregbase) {
1540 if (pd->port_rcvhdrtail_uaddr) {
1541 pd->port_rcvhdrtail_uaddr = 0;
1542 pd->port_rcvhdrtail_kvaddr = NULL;
1543 ipath_release_user_pages_on_close(
1544 &pd->port_rcvhdrtail_pagep, 1);
1545 pd->port_rcvhdrtail_pagep = NULL;
1546 ipath_stats.sps_pageunlocks++;
1548 ipath_write_kreg_port(
1549 dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
1551 ipath_write_kreg_port(
1552 dd, dd->ipath_kregs->kr_rcvhdraddr,
1555 /* clean up the pkeys for this port user */
1556 ipath_clean_part_key(pd, dd);
1558 if (port < dd->ipath_cfgports) {
1559 int i = dd->ipath_pbufsport * (port - 1);
1560 ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
1562 /* atomically clear receive enable port. */
1563 clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
1564 &dd->ipath_rcvctrl);
1567 dd->ipath_kregs->kr_rcvctrl,
1570 if (dd->ipath_pageshadow)
1571 unlock_expected_tids(pd);
1572 ipath_stats.sps_ports--;
1573 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
1574 pd->port_comm, pd->port_pid,
1575 dd->ipath_unit, port);
1582 dd->ipath_f_clear_tids(dd, pd->port_port);
1584 ipath_free_pddata(dd, pd->port_port, 0);
1586 mutex_unlock(&ipath_mutex);
1591 static int ipath_port_info(struct ipath_portdata *pd,
1592 struct ipath_port_info __user *uinfo)
1594 struct ipath_port_info info;
1598 (void) ipath_count_units(NULL, &nup, NULL);
1599 info.num_active = nup;
1600 info.unit = pd->port_dd->ipath_unit;
1601 info.port = pd->port_port;
1603 if (copy_to_user(uinfo, &info, sizeof(info))) {
1613 static ssize_t ipath_write(struct file *fp, const char __user *data,
1614 size_t count, loff_t *off)
1616 const struct ipath_cmd __user *ucmd;
1617 struct ipath_portdata *pd;
1618 const void __user *src;
1619 size_t consumed, copy;
1620 struct ipath_cmd cmd;
1624 if (count < sizeof(cmd.type)) {
1629 ucmd = (const struct ipath_cmd __user *) data;
1631 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
1636 consumed = sizeof(cmd.type);
1639 case IPATH_CMD_USER_INIT:
1640 copy = sizeof(cmd.cmd.user_info);
1641 dest = &cmd.cmd.user_info;
1642 src = &ucmd->cmd.user_info;
1644 case IPATH_CMD_RECV_CTRL:
1645 copy = sizeof(cmd.cmd.recv_ctrl);
1646 dest = &cmd.cmd.recv_ctrl;
1647 src = &ucmd->cmd.recv_ctrl;
1649 case IPATH_CMD_PORT_INFO:
1650 copy = sizeof(cmd.cmd.port_info);
1651 dest = &cmd.cmd.port_info;
1652 src = &ucmd->cmd.port_info;
1654 case IPATH_CMD_TID_UPDATE:
1655 case IPATH_CMD_TID_FREE:
1656 copy = sizeof(cmd.cmd.tid_info);
1657 dest = &cmd.cmd.tid_info;
1658 src = &ucmd->cmd.tid_info;
1660 case IPATH_CMD_SET_PART_KEY:
1661 copy = sizeof(cmd.cmd.part_key);
1662 dest = &cmd.cmd.part_key;
1663 src = &ucmd->cmd.part_key;
1670 if ((count - consumed) < copy) {
1675 if (copy_from_user(dest, src, copy)) {
1684 case IPATH_CMD_USER_INIT:
1685 ret = ipath_do_user_init(pd, &cmd.cmd.user_info);
1688 ret = ipath_get_base_info(
1689 pd, (void __user *) (unsigned long)
1690 cmd.cmd.user_info.spu_base_info,
1691 cmd.cmd.user_info.spu_base_info_size);
1693 case IPATH_CMD_RECV_CTRL:
1694 ret = ipath_manage_rcvq(pd, cmd.cmd.recv_ctrl);
1696 case IPATH_CMD_PORT_INFO:
1697 ret = ipath_port_info(pd,
1698 (struct ipath_port_info __user *)
1699 (unsigned long) cmd.cmd.port_info);
1701 case IPATH_CMD_TID_UPDATE:
1702 ret = ipath_tid_update(pd, &cmd.cmd.tid_info);
1704 case IPATH_CMD_TID_FREE:
1705 ret = ipath_tid_free(pd, &cmd.cmd.tid_info);
1707 case IPATH_CMD_SET_PART_KEY:
1708 ret = ipath_set_part_key(pd, cmd.cmd.part_key);
1719 static struct class *ipath_class;
1721 static int init_cdev(int minor, char *name, struct file_operations *fops,
1722 struct cdev **cdevp, struct class_device **class_devp)
1724 const dev_t dev = MKDEV(IPATH_MAJOR, minor);
1725 struct cdev *cdev = NULL;
1726 struct class_device *class_dev = NULL;
1729 cdev = cdev_alloc();
1731 printk(KERN_ERR IPATH_DRV_NAME
1732 ": Could not allocate cdev for minor %d, %s\n",
1738 cdev->owner = THIS_MODULE;
1740 kobject_set_name(&cdev->kobj, name);
1742 ret = cdev_add(cdev, dev, 1);
1744 printk(KERN_ERR IPATH_DRV_NAME
1745 ": Could not add cdev for minor %d, %s (err %d)\n",
1750 class_dev = class_device_create(ipath_class, NULL, dev, NULL, name);
1752 if (IS_ERR(class_dev)) {
1753 ret = PTR_ERR(class_dev);
1754 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
1755 "class_dev for minor %d, %s (err %d)\n",
1769 *class_devp = class_dev;
1778 int ipath_cdev_init(int minor, char *name, struct file_operations *fops,
1779 struct cdev **cdevp, struct class_device **class_devp)
1781 return init_cdev(minor, name, fops, cdevp, class_devp);
1784 static void cleanup_cdev(struct cdev **cdevp,
1785 struct class_device **class_devp)
1787 struct class_device *class_dev = *class_devp;
1790 class_device_unregister(class_dev);
1800 void ipath_cdev_cleanup(struct cdev **cdevp,
1801 struct class_device **class_devp)
1803 cleanup_cdev(cdevp, class_devp);
1806 static struct cdev *wildcard_cdev;
1807 static struct class_device *wildcard_class_dev;
1809 static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
1811 static int user_init(void)
1815 ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME);
1817 printk(KERN_ERR IPATH_DRV_NAME ": Could not register "
1818 "chrdev region (err %d)\n", -ret);
1822 ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME);
1824 if (IS_ERR(ipath_class)) {
1825 ret = PTR_ERR(ipath_class);
1826 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
1827 "device class (err %d)\n", -ret);
1833 unregister_chrdev_region(dev, IPATH_NMINORS);
1838 static void user_cleanup(void)
1841 class_destroy(ipath_class);
1845 unregister_chrdev_region(dev, IPATH_NMINORS);
1848 static atomic_t user_count = ATOMIC_INIT(0);
1849 static atomic_t user_setup = ATOMIC_INIT(0);
1851 int ipath_user_add(struct ipath_devdata *dd)
1856 if (atomic_inc_return(&user_count) == 1) {
1859 ipath_dev_err(dd, "Unable to set up user support: "
1860 "error %d\n", -ret);
1863 ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
1864 &wildcard_class_dev);
1866 ipath_dev_err(dd, "Could not create wildcard "
1867 "minor: error %d\n", -ret);
1871 atomic_set(&user_setup, 1);
1874 snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
1876 ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
1877 &dd->user_cdev, &dd->user_class_dev);
1879 ipath_dev_err(dd, "Could not create user minor %d, %s\n",
1880 dd->ipath_unit + 1, name);
1890 void ipath_user_remove(struct ipath_devdata *dd)
1892 cleanup_cdev(&dd->user_cdev, &dd->user_class_dev);
1894 if (atomic_dec_return(&user_count) == 0) {
1895 if (atomic_read(&user_setup) == 0)
1898 cleanup_cdev(&wildcard_cdev, &wildcard_class_dev);
1901 atomic_set(&user_setup, 0);