2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/pci.h>
35 #include <linux/poll.h>
36 #include <linux/cdev.h>
37 #include <linux/swap.h>
38 #include <linux/vmalloc.h>
39 #include <asm/pgtable.h>
41 #include "ipath_kernel.h"
42 #include "ipath_common.h"
45 * mmap64 doesn't allow all 64 bits for 32-bit applications
46 * so only use the low 43 bits.
48 #define MMAP64_MASK 0x7FFFFFFFFFFUL
50 static int ipath_open(struct inode *, struct file *);
51 static int ipath_close(struct inode *, struct file *);
52 static ssize_t ipath_write(struct file *, const char __user *, size_t,
54 static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
55 static int ipath_mmap(struct file *, struct vm_area_struct *);
57 static struct file_operations ipath_file_ops = {
61 .release = ipath_close,
66 static int ipath_get_base_info(struct file *fp,
67 void __user *ubase, size_t ubase_size)
69 struct ipath_portdata *pd = port_fp(fp);
71 struct ipath_base_info *kinfo = NULL;
72 struct ipath_devdata *dd = pd->port_dd;
77 subport_cnt = pd->port_subport_cnt;
84 master = !subport_fp(fp);
88 /* If port sharing is not requested, allow the old size structure */
90 sz -= 3 * sizeof(u64);
91 if (ubase_size < sz) {
93 "Base size %zu, need %zu (version mismatch?)\n",
99 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
105 ret = dd->ipath_f_get_base_info(pd, kinfo);
109 kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt;
110 kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize;
111 kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt;
112 kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize;
114 * have to mmap whole thing
116 kinfo->spi_rcv_egrbuftotlen =
117 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
118 kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk;
119 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
120 pd->port_rcvegrbuf_chunks;
121 kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt;
123 kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt;
125 * for this use, may be ipath_cfgports summed over all chips that
126 * are are configured and present
128 kinfo->spi_nports = dd->ipath_cfgports;
129 /* unit (chip/board) our port is on */
130 kinfo->spi_unit = dd->ipath_unit;
131 /* for now, only a single page */
132 kinfo->spi_tid_maxsize = PAGE_SIZE;
135 * Doing this per port, and based on the skip value, etc. This has
136 * to be the actual buffer size, since the protocol code treats it
139 * These have to be set to user addresses in the user code via mmap.
140 * These values are used on return to user code for the mmap target
141 * addresses only. For 32 bit, same 44 bit address problem, so use
142 * the physical address, not virtual. Before 2.6.11, using the
143 * page_address() macro worked, but in 2.6.11, even that returns the
144 * full 64 bit address (upper bits all 1's). So far, using the
145 * physical addresses (or chip offsets, for chip mapping) works, but
146 * no doubt some future kernel release will change that, and we'll be
147 * on to yet another method of dealing with this.
149 kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
150 kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys;
151 kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
152 kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
153 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
154 (void *) dd->ipath_statusp -
155 (void *) dd->ipath_pioavailregs_dma;
157 kinfo->spi_piocnt = dd->ipath_pbufsport;
158 kinfo->spi_piobufbase = (u64) pd->port_piobufs;
159 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
160 dd->ipath_palign * pd->port_port;
162 kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) +
163 (dd->ipath_pbufsport % subport_cnt);
164 /* Master's PIO buffers are after all the slave's */
165 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
167 (dd->ipath_pbufsport - kinfo->spi_piocnt);
168 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
169 dd->ipath_palign * pd->port_port;
171 unsigned slave = subport_fp(fp) - 1;
173 kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt;
174 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
175 dd->ipath_palign * kinfo->spi_piocnt * slave;
176 kinfo->__spi_uregbase = ((u64) pd->subport_uregbase +
177 PAGE_SIZE * slave) & MMAP64_MASK;
179 kinfo->spi_rcvhdr_base = ((u64) pd->subport_rcvhdr_base +
180 pd->port_rcvhdrq_size * slave) & MMAP64_MASK;
181 kinfo->spi_rcvhdr_tailaddr =
182 (u64) pd->port_rcvhdrqtailaddr_phys & MMAP64_MASK;
183 kinfo->spi_rcv_egrbufs = ((u64) pd->subport_rcvegrbuf +
184 dd->ipath_rcvegrcnt * dd->ipath_rcvegrbufsize * slave) &
188 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) /
190 kinfo->spi_pioalign = dd->ipath_palign;
192 kinfo->spi_qpair = IPATH_KD_QP;
193 kinfo->spi_piosize = dd->ipath_ibmaxlen;
194 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
195 kinfo->spi_port = pd->port_port;
196 kinfo->spi_subport = subport_fp(fp);
197 kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
198 kinfo->spi_hw_version = dd->ipath_revision;
201 kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
202 kinfo->spi_subport_uregbase =
203 (u64) pd->subport_uregbase & MMAP64_MASK;
204 kinfo->spi_subport_rcvegrbuf =
205 (u64) pd->subport_rcvegrbuf & MMAP64_MASK;
206 kinfo->spi_subport_rcvhdr_base =
207 (u64) pd->subport_rcvhdr_base & MMAP64_MASK;
208 ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
209 kinfo->spi_port, kinfo->spi_runtime_flags,
210 (unsigned long long) kinfo->spi_subport_uregbase,
211 (unsigned long long) kinfo->spi_subport_rcvegrbuf,
212 (unsigned long long) kinfo->spi_subport_rcvhdr_base);
215 if (copy_to_user(ubase, kinfo, sizeof(*kinfo)))
224 * ipath_tid_update - update a port TID
226 * @fp: the ipath device file
227 * @ti: the TID information
229 * The new implementation as of Oct 2004 is that the driver assigns
230 * the tid and returns it to the caller. To make it easier to
231 * catch bugs, and to reduce search time, we keep a cursor for
232 * each port, walking the shadow tid array to find one that's not
235 * For now, if we can't allocate the full list, we fail, although
236 * in the long run, we'll allocate as many as we can, and the
237 * caller will deal with that by trying the remaining pages later.
238 * That means that when we fail, we have to mark the tids as not in
239 * use again, in our shadow copy.
241 * It's up to the caller to free the tids when they are done.
242 * We'll unlock the pages as they free them.
244 * Also, right now we are locking one page at a time, but since
245 * the intended use of this routine is for a single group of
246 * virtually contiguous pages, that should change to improve
249 static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp,
250 const struct ipath_tid_info *ti)
253 u32 tid, porttid, cnt, i, tidcnt, tidoff;
255 struct ipath_devdata *dd = pd->port_dd;
258 u64 __iomem *tidbase;
259 unsigned long tidmap[8];
260 struct page **pagep = NULL;
261 unsigned subport = subport_fp(fp);
263 if (!dd->ipath_pageshadow) {
270 ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n",
271 (unsigned long long) ti->tidlist);
273 * Should we treat as success? likely a bug
278 porttid = pd->port_port * dd->ipath_rcvtidcnt;
279 if (!pd->port_subport_cnt) {
280 tidcnt = dd->ipath_rcvtidcnt;
281 tid = pd->port_tidcursor;
283 } else if (!subport) {
284 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
285 (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
286 tidoff = dd->ipath_rcvtidcnt - tidcnt;
288 tid = tidcursor_fp(fp);
290 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
291 tidoff = tidcnt * (subport - 1);
293 tid = tidcursor_fp(fp);
296 /* make sure it all fits in port_tid_pg_list */
297 dev_info(&dd->pcidev->dev, "Process tried to allocate %u "
298 "TIDs, only trying max (%u)\n", cnt, tidcnt);
301 pagep = &((struct page **) pd->port_tid_pg_list)[tidoff];
302 tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff];
304 memset(tidmap, 0, sizeof(tidmap));
305 /* before decrement; chip actual # */
307 tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
308 dd->ipath_rcvtidbase +
309 porttid * sizeof(*tidbase));
311 ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n",
312 pd->port_port, cnt, tid, tidbase);
314 /* virtual address of first page in transfer */
315 vaddr = ti->tidvaddr;
316 if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
318 ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n",
323 ret = ipath_get_user_pages(vaddr, cnt, pagep);
326 ipath_dbg("Failed to lock addr %p, %u pages "
327 "(already locked)\n",
328 (void *) vaddr, cnt);
330 * for now, continue, and see what happens but with
331 * the new implementation, this should never happen,
332 * unless perhaps the user has mpin'ed the pages
333 * themselves (something we need to test)
337 dev_info(&dd->pcidev->dev,
338 "Failed to lock addr %p, %u pages: "
339 "errno %d\n", (void *) vaddr, cnt, -ret);
343 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
344 for (; ntids--; tid++) {
347 if (!dd->ipath_pageshadow[porttid + tid])
352 * oops, wrapped all the way through their TIDs,
353 * and didn't have enough free; see comments at
356 ipath_dbg("Not enough free TIDs for %u pages "
357 "(index %d), failing\n", cnt, i);
358 i--; /* last tidlist[i] not filled in */
362 tidlist[i] = tid + tidoff;
363 ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, "
364 "vaddr %lx\n", i, tid + tidoff, vaddr);
365 /* we "know" system pages and TID pages are same size */
366 dd->ipath_pageshadow[porttid + tid] = pagep[i];
367 dd->ipath_physshadow[porttid + tid] = ipath_map_page(
368 dd->pcidev, pagep[i], 0, PAGE_SIZE,
371 * don't need atomic or it's overhead
373 __set_bit(tid, tidmap);
374 physaddr = dd->ipath_physshadow[porttid + tid];
375 ipath_stats.sps_pagelocks++;
377 "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
378 tid, vaddr, (unsigned long long) physaddr,
380 dd->ipath_f_put_tid(dd, &tidbase[tid], 1, physaddr);
382 * don't check this tid in ipath_portshadow, since we
383 * just filled it in; start with the next one.
391 /* jump here if copy out of updated info failed... */
392 ipath_dbg("After failure (ret=%d), undo %d of %d entries\n",
394 /* same code that's in ipath_free_tid() */
395 limit = sizeof(tidmap) * BITS_PER_BYTE;
397 /* just in case size changes in future */
399 tid = find_first_bit((const unsigned long *)tidmap, limit);
400 for (; tid < limit; tid++) {
401 if (!test_bit(tid, tidmap))
403 if (dd->ipath_pageshadow[porttid + tid]) {
404 ipath_cdbg(VERBOSE, "Freeing TID %u\n",
406 dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
407 dd->ipath_tidinvalid);
408 pci_unmap_page(dd->pcidev,
409 dd->ipath_physshadow[porttid + tid],
410 PAGE_SIZE, PCI_DMA_FROMDEVICE);
411 dd->ipath_pageshadow[porttid + tid] = NULL;
412 ipath_stats.sps_pageunlocks++;
415 ipath_release_user_pages(pagep, cnt);
418 * Copy the updated array, with ipath_tid's filled in, back
419 * to user. Since we did the copy in already, this "should
420 * never fail" If it does, we have to clean up...
422 if (copy_to_user((void __user *)
423 (unsigned long) ti->tidlist,
424 tidlist, cnt * sizeof(*tidlist))) {
428 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
429 tidmap, sizeof tidmap)) {
435 if (!pd->port_subport_cnt)
436 pd->port_tidcursor = tid;
438 tidcursor_fp(fp) = tid;
443 ipath_dbg("Failed to map %u TID pages, failing with %d\n",
449 * ipath_tid_free - free a port TID
451 * @subport: the subport
454 * right now we are unlocking one page at a time, but since
455 * the intended use of this routine is for a single group of
456 * virtually contiguous pages, that should change to improve
457 * performance. We check that the TID is in range for this port
458 * but otherwise don't check validity; if user has an error and
459 * frees the wrong tid, it's only their own data that can thereby
460 * be corrupted. We do check that the TID was in use, for sanity
461 * We always use our idea of the saved address, not the address that
462 * they pass in to us.
465 static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
466 const struct ipath_tid_info *ti)
469 u32 tid, porttid, cnt, limit, tidcnt;
470 struct ipath_devdata *dd = pd->port_dd;
471 u64 __iomem *tidbase;
472 unsigned long tidmap[8];
474 if (!dd->ipath_pageshadow) {
479 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
485 porttid = pd->port_port * dd->ipath_rcvtidcnt;
486 if (!pd->port_subport_cnt)
487 tidcnt = dd->ipath_rcvtidcnt;
489 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
490 (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
491 porttid += dd->ipath_rcvtidcnt - tidcnt;
493 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
494 porttid += tidcnt * (subport - 1);
496 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
497 dd->ipath_rcvtidbase +
498 porttid * sizeof(*tidbase));
500 limit = sizeof(tidmap) * BITS_PER_BYTE;
502 /* just in case size changes in future */
504 tid = find_first_bit(tidmap, limit);
505 ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) "
506 "set is %d, porttid %u\n", pd->port_port, ti->tidcnt,
507 limit, tid, porttid);
508 for (cnt = 0; tid < limit; tid++) {
510 * small optimization; if we detect a run of 3 or so without
511 * any set, use find_first_bit again. That's mainly to
512 * accelerate the case where we wrapped, so we have some at
513 * the beginning, and some at the end, and a big gap
516 if (!test_bit(tid, tidmap))
519 if (dd->ipath_pageshadow[porttid + tid]) {
520 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
522 dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
523 dd->ipath_tidinvalid);
524 pci_unmap_page(dd->pcidev,
525 dd->ipath_physshadow[porttid + tid],
526 PAGE_SIZE, PCI_DMA_FROMDEVICE);
527 ipath_release_user_pages(
528 &dd->ipath_pageshadow[porttid + tid], 1);
529 dd->ipath_pageshadow[porttid + tid] = NULL;
530 ipath_stats.sps_pageunlocks++;
532 ipath_dbg("Unused tid %u, ignoring\n", tid);
534 if (cnt != ti->tidcnt)
535 ipath_dbg("passed in tidcnt %d, only %d bits set in map\n",
539 ipath_dbg("Failed to unmap %u TID pages, failing with %d\n",
545 * ipath_set_part_key - set a partition key
549 * We can have up to 4 active at a time (other than the default, which is
550 * always allowed). This is somewhat tricky, since multiple ports may set
551 * the same key, so we reference count them, and clean up at exit. All 4
552 * partition keys are packed into a single infinipath register. It's an
553 * error for a process to set the same pkey multiple times. We provide no
554 * mechanism to de-allocate a pkey at this time, we may eventually need to
555 * do that. I've used the atomic operations, and no locking, and only make
556 * a single pass through what's available. This should be more than
557 * adequate for some time. I'll think about spinlocks or the like if and as
560 static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
562 struct ipath_devdata *dd = pd->port_dd;
563 int i, any = 0, pidx = -1;
564 u16 lkey = key & 0x7FFF;
567 if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) {
568 /* nothing to do; this key always valid */
573 ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys "
574 "%hx:%x %hx:%x %hx:%x %hx:%x\n",
575 pd->port_port, key, dd->ipath_pkeys[0],
576 atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1],
577 atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2],
578 atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3],
579 atomic_read(&dd->ipath_pkeyrefs[3]));
582 ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n",
589 * Set the full membership bit, because it has to be
590 * set in the register or the packet, and it seems
591 * cleaner to set in the register than to force all
592 * callers to set it. (see bug 4331)
596 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
597 if (!pd->port_pkeys[i] && pidx == -1)
599 if (pd->port_pkeys[i] == key) {
600 ipath_cdbg(VERBOSE, "p%u tries to set same pkey "
601 "(%x) more than once\n",
608 ipath_dbg("All pkeys for port %u already in use, "
609 "can't set %x\n", pd->port_port, key);
613 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
614 if (!dd->ipath_pkeys[i]) {
618 if (dd->ipath_pkeys[i] == key) {
619 atomic_t *pkrefs = &dd->ipath_pkeyrefs[i];
621 if (atomic_inc_return(pkrefs) > 1) {
622 pd->port_pkeys[pidx] = key;
623 ipath_cdbg(VERBOSE, "p%u set key %x "
624 "matches #%d, count now %d\n",
625 pd->port_port, key, i,
626 atomic_read(pkrefs));
631 * lost race, decrement count, catch below
634 ipath_cdbg(VERBOSE, "Lost race, count was "
635 "0, after dec, it's %d\n",
636 atomic_read(pkrefs));
640 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
642 * It makes no sense to have both the limited and
643 * full membership PKEY set at the same time since
644 * the unlimited one will disable the limited one.
651 ipath_dbg("port %u, all pkeys already in use, "
652 "can't set %x\n", pd->port_port, key);
656 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
657 if (!dd->ipath_pkeys[i] &&
658 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
661 /* for ipathstats, etc. */
662 ipath_stats.sps_pkeys[i] = lkey;
663 pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key;
665 (u64) dd->ipath_pkeys[0] |
666 ((u64) dd->ipath_pkeys[1] << 16) |
667 ((u64) dd->ipath_pkeys[2] << 32) |
668 ((u64) dd->ipath_pkeys[3] << 48);
669 ipath_cdbg(PROC, "p%u set key %x in #%d, "
670 "portidx %d, new pkey reg %llx\n",
671 pd->port_port, key, i, pidx,
672 (unsigned long long) pkey);
674 dd, dd->ipath_kregs->kr_partitionkey, pkey);
680 ipath_dbg("port %u, all pkeys already in use 2nd pass, "
681 "can't set %x\n", pd->port_port, key);
689 * ipath_manage_rcvq - manage a port's receive queue
691 * @subport: the subport
692 * @start_stop: action to carry out
694 * start_stop == 0 disables receive on the port, for use in queue
695 * overflow conditions. start_stop==1 re-enables, to be used to
696 * re-init the software copy of the head register
698 static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
701 struct ipath_devdata *dd = pd->port_dd;
704 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
705 start_stop ? "en" : "dis", dd->ipath_unit,
706 pd->port_port, subport);
709 /* atomically clear receive enable port. */
712 * On enable, force in-memory copy of the tail register to
713 * 0, so that protocol code doesn't have to worry about
714 * whether or not the chip has yet updated the in-memory
715 * copy or not on return from the system call. The chip
716 * always resets it's tail register back to 0 on a
717 * transition from disabled to enabled. This could cause a
718 * problem if software was broken, and did the enable w/o
719 * the disable, but eventually the in-memory copy will be
720 * updated and correct itself, even in the face of software
723 *(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0;
724 set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
727 clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
729 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
731 /* now be sure chip saw it before we return */
732 tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
735 * And try to be sure that tail reg update has happened too.
736 * This should in theory interlock with the RXE changes to
737 * the tail register. Don't assign it to the tail register
738 * in memory copy, since we could overwrite an update by the
741 tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
743 /* always; new head should be equal to new tail; see above */
748 static void ipath_clean_part_key(struct ipath_portdata *pd,
749 struct ipath_devdata *dd)
751 int i, j, pchanged = 0;
754 /* for debugging only */
755 oldpkey = (u64) dd->ipath_pkeys[0] |
756 ((u64) dd->ipath_pkeys[1] << 16) |
757 ((u64) dd->ipath_pkeys[2] << 32) |
758 ((u64) dd->ipath_pkeys[3] << 48);
760 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
761 if (!pd->port_pkeys[i])
763 ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i,
765 for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) {
766 /* check for match independent of the global bit */
767 if ((dd->ipath_pkeys[j] & 0x7fff) !=
768 (pd->port_pkeys[i] & 0x7fff))
770 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) {
771 ipath_cdbg(VERBOSE, "p%u clear key "
774 pd->port_pkeys[i], j);
775 ipath_stats.sps_pkeys[j] =
776 dd->ipath_pkeys[j] = 0;
780 VERBOSE, "p%u key %x matches #%d, "
781 "but ref still %d\n", pd->port_port,
782 pd->port_pkeys[i], j,
783 atomic_read(&dd->ipath_pkeyrefs[j]));
786 pd->port_pkeys[i] = 0;
789 u64 pkey = (u64) dd->ipath_pkeys[0] |
790 ((u64) dd->ipath_pkeys[1] << 16) |
791 ((u64) dd->ipath_pkeys[2] << 32) |
792 ((u64) dd->ipath_pkeys[3] << 48);
793 ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, "
794 "new pkey reg %llx\n", pd->port_port,
795 (unsigned long long) oldpkey,
796 (unsigned long long) pkey);
797 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
803 * Initialize the port data with the receive buffer sizes
804 * so this can be done while the master port is locked.
805 * Otherwise, there is a race with a slave opening the port
806 * and seeing these fields uninitialized.
808 static void init_user_egr_sizes(struct ipath_portdata *pd)
810 struct ipath_devdata *dd = pd->port_dd;
811 unsigned egrperchunk, egrcnt, size;
814 * to avoid wasting a lot of memory, we allocate 32KB chunks of
815 * physically contiguous memory, advance through it until used up
816 * and then allocate more. Of course, we need memory to store those
817 * extra pointers, now. Started out with 256KB, but under heavy
818 * memory pressure (creating large files and then copying them over
819 * NFS while doing lots of MPI jobs), we hit some allocation
820 * failures, even though we can sleep... (2.6.10) Still get
821 * failures at 64K. 32K is the lowest we can go without wasting
825 egrperchunk = size / dd->ipath_rcvegrbufsize;
826 egrcnt = dd->ipath_rcvegrcnt;
827 pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk;
828 pd->port_rcvegrbufs_perchunk = egrperchunk;
829 pd->port_rcvegrbuf_size = size;
833 * ipath_create_user_egr - allocate eager TID buffers
834 * @pd: the port to allocate TID buffers for
836 * This routine is now quite different for user and kernel, because
837 * the kernel uses skb's, for the accelerated network performance
838 * This is the user port version
840 * Allocate the eager TID buffers and program them into infinipath
841 * They are no longer completely contiguous, we do multiple allocation
844 static int ipath_create_user_egr(struct ipath_portdata *pd)
846 struct ipath_devdata *dd = pd->port_dd;
847 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
853 * GFP_USER, but without GFP_FS, so buffer cache can be
854 * coalesced (we hope); otherwise, even at order 4,
855 * heavy filesystem activity makes these fail, and we can
856 * use compound pages.
858 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
860 egrcnt = dd->ipath_rcvegrcnt;
861 /* TID number offset for this port */
862 egroff = pd->port_port * egrcnt;
863 egrsize = dd->ipath_rcvegrbufsize;
864 ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
865 "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
867 chunk = pd->port_rcvegrbuf_chunks;
868 egrperchunk = pd->port_rcvegrbufs_perchunk;
869 size = pd->port_rcvegrbuf_size;
870 pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]),
872 if (!pd->port_rcvegrbuf) {
876 pd->port_rcvegrbuf_phys =
877 kmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]),
879 if (!pd->port_rcvegrbuf_phys) {
883 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
885 pd->port_rcvegrbuf[e] = dma_alloc_coherent(
886 &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
889 if (!pd->port_rcvegrbuf[e]) {
891 goto bail_rcvegrbuf_phys;
895 pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0];
897 for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) {
898 dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk];
901 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
902 dd->ipath_f_put_tid(dd, e + egroff +
906 dd->ipath_rcvegrbase), 0, pa);
909 cond_resched(); /* don't hog the cpu */
916 for (e = 0; e < pd->port_rcvegrbuf_chunks &&
917 pd->port_rcvegrbuf[e]; e++) {
918 dma_free_coherent(&dd->pcidev->dev, size,
919 pd->port_rcvegrbuf[e],
920 pd->port_rcvegrbuf_phys[e]);
923 kfree(pd->port_rcvegrbuf_phys);
924 pd->port_rcvegrbuf_phys = NULL;
926 kfree(pd->port_rcvegrbuf);
927 pd->port_rcvegrbuf = NULL;
933 /* common code for the mappings on dma_alloc_coherent mem */
934 static int ipath_mmap_mem(struct vm_area_struct *vma,
935 struct ipath_portdata *pd, unsigned len, int write_ok,
936 void *kvaddr, char *what)
938 struct ipath_devdata *dd = pd->port_dd;
942 if ((vma->vm_end - vma->vm_start) > len) {
943 dev_info(&dd->pcidev->dev,
944 "FAIL on %s: len %lx > %x\n", what,
945 vma->vm_end - vma->vm_start, len);
951 if (vma->vm_flags & VM_WRITE) {
952 dev_info(&dd->pcidev->dev,
953 "%s must be mapped readonly\n", what);
958 /* don't allow them to later change with mprotect */
959 vma->vm_flags &= ~VM_MAYWRITE;
962 pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
963 ret = remap_pfn_range(vma, vma->vm_start, pfn,
964 len, vma->vm_page_prot);
966 dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x "
967 "bytes r%c failed: %d\n", what, pd->port_port,
968 pfn, len, write_ok?'w':'o', ret);
970 ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes "
971 "r%c\n", what, pd->port_port, pfn, len,
977 static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
984 * This is real hardware, so use io_remap. This is the mechanism
985 * for the user process to update the head registers for their port
988 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
989 dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
990 "%lx > PAGE\n", vma->vm_end - vma->vm_start);
993 phys = dd->ipath_physaddr + ureg;
994 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
996 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
997 ret = io_remap_pfn_range(vma, vma->vm_start,
999 vma->vm_end - vma->vm_start,
1005 static int mmap_piobufs(struct vm_area_struct *vma,
1006 struct ipath_devdata *dd,
1007 struct ipath_portdata *pd,
1008 unsigned piobufs, unsigned piocnt)
1014 * When we map the PIO buffers in the chip, we want to map them as
1015 * writeonly, no read possible. This prevents access to previous
1016 * process data, and catches users who might try to read the i/o
1017 * space due to a bug.
1019 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) {
1020 dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
1021 "reqlen %lx > PAGE\n",
1022 vma->vm_end - vma->vm_start);
1027 phys = dd->ipath_physaddr + piobufs;
1030 * Don't mark this as non-cached, or we don't get the
1031 * write combining behavior we want on the PIO buffers!
1034 #if defined(__powerpc__)
1035 /* There isn't a generic way to specify writethrough mappings */
1036 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
1037 pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
1038 pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
1042 * don't allow them to later change to readable with mprotect (for when
1043 * not initially mapped readable, as is normally the case)
1045 vma->vm_flags &= ~VM_MAYREAD;
1046 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
1048 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
1049 vma->vm_end - vma->vm_start,
1055 static int mmap_rcvegrbufs(struct vm_area_struct *vma,
1056 struct ipath_portdata *pd)
1058 struct ipath_devdata *dd = pd->port_dd;
1059 unsigned long start, size;
1060 size_t total_size, i;
1064 size = pd->port_rcvegrbuf_size;
1065 total_size = pd->port_rcvegrbuf_chunks * size;
1066 if ((vma->vm_end - vma->vm_start) > total_size) {
1067 dev_info(&dd->pcidev->dev, "FAIL on egr bufs: "
1068 "reqlen %lx > actual %lx\n",
1069 vma->vm_end - vma->vm_start,
1070 (unsigned long) total_size);
1075 if (vma->vm_flags & VM_WRITE) {
1076 dev_info(&dd->pcidev->dev, "Can't map eager buffers as "
1077 "writable (flags=%lx)\n", vma->vm_flags);
1081 /* don't allow them to later change to writeable with mprotect */
1082 vma->vm_flags &= ~VM_MAYWRITE;
1084 start = vma->vm_start;
1086 for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
1087 pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT;
1088 ret = remap_pfn_range(vma, start, pfn, size,
1100 * ipath_file_vma_nopage - handle a VMA page fault.
1102 static struct page *ipath_file_vma_nopage(struct vm_area_struct *vma,
1103 unsigned long address, int *type)
1105 unsigned long offset = address - vma->vm_start;
1106 struct page *page = NOPAGE_SIGBUS;
1110 * Convert the vmalloc address into a struct page.
1112 pageptr = (void *)(offset + (vma->vm_pgoff << PAGE_SHIFT));
1113 page = vmalloc_to_page(pageptr);
1117 /* Increment the reference count. */
1120 *type = VM_FAULT_MINOR;
1125 static struct vm_operations_struct ipath_file_vm_ops = {
1126 .nopage = ipath_file_vma_nopage,
1129 static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
1130 struct ipath_portdata *pd, unsigned subport)
1133 struct ipath_devdata *dd;
1138 /* If the port is not shared, all addresses should be physical */
1139 if (!pd->port_subport_cnt) {
1145 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
1148 * Master has all the slave uregbase, rcvhdrq, and
1149 * rcvegrbufs mmapped.
1152 unsigned num_slaves = pd->port_subport_cnt - 1;
1154 if (pgaddr == ((u64) pd->subport_uregbase & MMAP64_MASK)) {
1155 addr = pd->subport_uregbase;
1156 size = PAGE_SIZE * num_slaves;
1157 } else if (pgaddr == ((u64) pd->subport_rcvhdr_base &
1159 addr = pd->subport_rcvhdr_base;
1160 size = pd->port_rcvhdrq_size * num_slaves;
1161 } else if (pgaddr == ((u64) pd->subport_rcvegrbuf &
1163 addr = pd->subport_rcvegrbuf;
1169 } else if (pgaddr == (((u64) pd->subport_uregbase +
1170 PAGE_SIZE * (subport - 1)) & MMAP64_MASK)) {
1171 addr = pd->subport_uregbase + PAGE_SIZE * (subport - 1);
1173 } else if (pgaddr == (((u64) pd->subport_rcvhdr_base +
1174 pd->port_rcvhdrq_size * (subport - 1)) &
1176 addr = pd->subport_rcvhdr_base +
1177 pd->port_rcvhdrq_size * (subport - 1);
1178 size = pd->port_rcvhdrq_size;
1179 } else if (pgaddr == (((u64) pd->subport_rcvegrbuf +
1180 size * (subport - 1)) & MMAP64_MASK)) {
1181 addr = pd->subport_rcvegrbuf + size * (subport - 1);
1182 /* rcvegrbufs are read-only on the slave */
1183 if (vma->vm_flags & VM_WRITE) {
1184 dev_info(&dd->pcidev->dev,
1185 "Can't map eager buffers as "
1186 "writable (flags=%lx)\n", vma->vm_flags);
1191 * Don't allow permission to later change to writeable
1194 vma->vm_flags &= ~VM_MAYWRITE;
1199 len = vma->vm_end - vma->vm_start;
1201 ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size);
1206 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
1207 vma->vm_ops = &ipath_file_vm_ops;
1208 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
1216 * ipath_mmap - mmap various structures into user space
1217 * @fp: the file pointer
1220 * We use this to have a shared buffer between the kernel and the user code
1221 * for the rcvhdr queue, egr buffers, and the per-port user regs and pio
1222 * buffers in the chip. We have the open and close entries so we can bump
1223 * the ref count and keep the driver from being unloaded while still mapped.
1225 static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1227 struct ipath_portdata *pd;
1228 struct ipath_devdata *dd;
1230 unsigned piobufs, piocnt;
1241 * This is the ipath_do_user_init() code, mapping the shared buffers
1242 * into the user process. The address referred to by vm_pgoff is the
1243 * file offset passed via mmap(). For shared ports, this is the
1244 * kernel vmalloc() address of the pages to share with the master.
1245 * For non-shared or master ports, this is a physical address.
1246 * We only do one mmap for each space mapped.
1248 pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1251 * Check for 0 in case one of the allocations failed, but user
1252 * called mmap anyway.
1259 ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n",
1260 (unsigned long long) pgaddr, vma->vm_start,
1261 vma->vm_end - vma->vm_start, dd->ipath_unit,
1262 pd->port_port, subport_fp(fp));
1265 * Physical addresses must fit in 40 bits for our hardware.
1266 * Check for kernel virtual addresses first, anything else must
1267 * match a HW or memory address.
1269 if (pgaddr >= (1ULL<<40)) {
1270 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
1274 if (!pd->port_subport_cnt) {
1275 /* port is not shared */
1276 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
1277 piocnt = dd->ipath_pbufsport;
1278 piobufs = pd->port_piobufs;
1279 } else if (!subport_fp(fp)) {
1280 /* caller is the master */
1281 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
1282 piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) +
1283 (dd->ipath_pbufsport % pd->port_subport_cnt);
1284 piobufs = pd->port_piobufs +
1285 dd->ipath_palign * (dd->ipath_pbufsport - piocnt);
1287 unsigned slave = subport_fp(fp) - 1;
1289 /* caller is a slave */
1291 piocnt = dd->ipath_pbufsport / pd->port_subport_cnt;
1292 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
1296 ret = mmap_ureg(vma, dd, ureg);
1297 else if (pgaddr == piobufs)
1298 ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt);
1299 else if (pgaddr == dd->ipath_pioavailregs_phys)
1300 /* in-memory copy of pioavail registers */
1301 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1302 (void *) dd->ipath_pioavailregs_dma,
1303 "pioavail registers");
1304 else if (subport_fp(fp))
1305 /* Subports don't mmap the physical receive buffers */
1307 else if (pgaddr == pd->port_rcvegr_phys)
1308 ret = mmap_rcvegrbufs(vma, pd);
1309 else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
1311 * The rcvhdrq itself; readonly except on HT (so have
1312 * to allow writable mapping), multiple pages, contiguous
1313 * from an i/o perspective.
1315 ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1,
1318 else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys)
1319 /* in-memory copy of rcvhdrq tail register */
1320 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1321 pd->port_rcvhdrtail_kvaddr,
1326 vma->vm_private_data = NULL;
1329 dev_info(&dd->pcidev->dev,
1330 "Failure %d on off %llx len %lx\n",
1331 -ret, (unsigned long long)pgaddr,
1332 vma->vm_end - vma->vm_start);
1337 static unsigned int ipath_poll(struct file *fp,
1338 struct poll_table_struct *pt)
1340 struct ipath_portdata *pd;
1343 unsigned pollflag = 0;
1344 struct ipath_devdata *dd;
1351 bit = pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT;
1352 set_bit(bit, &dd->ipath_rcvctrl);
1355 * Before blocking, make sure that head is still == tail,
1356 * reading from the chip, so we can be sure the interrupt
1357 * enable has made it to the chip. If not equal, disable
1358 * interrupt again and return immediately. This avoids races,
1359 * and the overhead of the chip read doesn't matter much at
1360 * this point, since we are waiting for something anyway.
1363 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1366 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
1367 tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
1370 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
1371 if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
1372 (void)ipath_write_ureg(dd, ur_rcvhdrhead,
1373 dd->ipath_rhdrhead_intr_off
1374 | head, pd->port_port);
1375 poll_wait(fp, &pd->port_wait, pt);
1377 if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
1378 /* timed out, no packets received */
1379 clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
1380 pd->port_rcvwait_to++;
1383 pollflag = POLLIN | POLLRDNORM;
1386 /* it's already happened; don't do wait_event overhead */
1387 pollflag = POLLIN | POLLRDNORM;
1388 pd->port_rcvnowait++;
1391 clear_bit(bit, &dd->ipath_rcvctrl);
1392 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1399 static int init_subports(struct ipath_devdata *dd,
1400 struct ipath_portdata *pd,
1401 const struct ipath_user_info *uinfo)
1404 unsigned num_slaves;
1407 /* Old user binaries don't know about subports */
1408 if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR)
1411 * If the user is requesting zero or one port,
1412 * skip the subport allocation.
1414 if (uinfo->spu_subport_cnt <= 1)
1416 if (uinfo->spu_subport_cnt > 4) {
1421 num_slaves = uinfo->spu_subport_cnt - 1;
1422 pd->subport_uregbase = vmalloc(PAGE_SIZE * num_slaves);
1423 if (!pd->subport_uregbase) {
1427 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
1428 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1429 sizeof(u32), PAGE_SIZE) * num_slaves;
1430 pd->subport_rcvhdr_base = vmalloc(size);
1431 if (!pd->subport_rcvhdr_base) {
1436 pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks *
1437 pd->port_rcvegrbuf_size *
1439 if (!pd->subport_rcvegrbuf) {
1444 pd->port_subport_cnt = uinfo->spu_subport_cnt;
1445 pd->port_subport_id = uinfo->spu_subport_id;
1446 pd->active_slaves = 1;
1450 vfree(pd->subport_rcvhdr_base);
1452 vfree(pd->subport_uregbase);
1453 pd->subport_uregbase = NULL;
1458 static int try_alloc_port(struct ipath_devdata *dd, int port,
1460 const struct ipath_user_info *uinfo)
1462 struct ipath_portdata *pd;
1465 if (!(pd = dd->ipath_pd[port])) {
1468 pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
1471 * Allocate memory for use in ipath_tid_update() just once
1472 * at open, not per call. Reduces cost of expected send
1475 ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) +
1476 dd->ipath_rcvtidcnt * sizeof(struct page **),
1479 ipath_dev_err(dd, "Unable to allocate portdata "
1480 "memory, failing open\n");
1486 dd->ipath_pd[port] = pd;
1487 dd->ipath_pd[port]->port_port = port;
1488 dd->ipath_pd[port]->port_dd = dd;
1489 dd->ipath_pd[port]->port_tid_pg_list = ptmp;
1490 init_waitqueue_head(&dd->ipath_pd[port]->port_wait);
1492 if (!pd->port_cnt) {
1493 pd->userversion = uinfo->spu_userversion;
1494 init_user_egr_sizes(pd);
1495 if ((ret = init_subports(dd, pd, uinfo)) != 0)
1497 ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n",
1498 current->comm, current->pid, dd->ipath_unit,
1502 pd->port_pid = current->pid;
1503 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
1504 ipath_stats.sps_ports++;
1513 static inline int usable(struct ipath_devdata *dd)
1516 (dd->ipath_flags & IPATH_PRESENT) &&
1517 dd->ipath_kregbase &&
1519 !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED
1523 static int find_free_port(int unit, struct file *fp,
1524 const struct ipath_user_info *uinfo)
1526 struct ipath_devdata *dd = ipath_lookup(unit);
1539 for (i = 1; i < dd->ipath_cfgports; i++) {
1540 ret = try_alloc_port(dd, i, fp, uinfo);
1550 static int find_best_unit(struct file *fp,
1551 const struct ipath_user_info *uinfo)
1553 int ret = 0, i, prefunit = -1, devmax;
1554 int maxofallports, npresent, nup;
1557 devmax = ipath_count_units(&npresent, &nup, &maxofallports);
1560 * This code is present to allow a knowledgeable person to
1561 * specify the layout of processes to processors before opening
1562 * this driver, and then we'll assign the process to the "closest"
1563 * InfiniPath chip to that processor (we assume reasonable connectivity,
1564 * for now). This code assumes that if affinity has been set
1565 * before this point, that at most one cpu is set; for now this
1566 * is reasonable. I check for both cpus_empty() and cpus_full(),
1567 * in case some kernel variant sets none of the bits when no
1568 * affinity is set. 2.6.11 and 12 kernels have all present
1569 * cpus set. Some day we'll have to fix it up further to handle
1570 * a cpu subset. This algorithm fails for two HT chips connected
1571 * in tunnel fashion. Eventually this needs real topology
1572 * information. There may be some issues with dual core numbering
1573 * as well. This needs more work prior to release.
1575 if (!cpus_empty(current->cpus_allowed) &&
1576 !cpus_full(current->cpus_allowed)) {
1577 int ncpus = num_online_cpus(), curcpu = -1;
1578 for (i = 0; i < ncpus; i++)
1579 if (cpu_isset(i, current->cpus_allowed)) {
1580 ipath_cdbg(PROC, "%s[%u] affinity set for "
1581 "cpu %d\n", current->comm,
1587 prefunit = curcpu / (ncpus / npresent);
1588 ipath_dbg("%s[%u] %d chips, %d cpus, "
1589 "%d cpus/chip, select unit %d\n",
1590 current->comm, current->pid,
1591 npresent, ncpus, ncpus / npresent,
1598 * user ports start at 1, kernel port is 0
1599 * For now, we do round-robin access across all chips
1603 devmax = prefunit + 1;
1605 for (i = 1; i < maxofallports; i++) {
1606 for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
1608 struct ipath_devdata *dd = ipath_lookup(ndev);
1611 continue; /* can't use this unit */
1612 if (i >= dd->ipath_cfgports)
1614 * Maxed out on users of this unit. Try
1618 ret = try_alloc_port(dd, i, fp, uinfo);
1627 ipath_dbg("No ports available (none initialized "
1631 /* if started above 0, retry from 0 */
1633 "%s[%u] no ports on prefunit "
1634 "%d, clear and re-check\n",
1635 current->comm, current->pid,
1637 devmax = ipath_count_units(NULL, NULL,
1643 ipath_dbg("No ports available\n");
1647 ipath_dbg("No boards found\n");
1654 static int find_shared_port(struct file *fp,
1655 const struct ipath_user_info *uinfo)
1657 int devmax, ndev, i;
1660 devmax = ipath_count_units(NULL, NULL, NULL);
1662 for (ndev = 0; ndev < devmax; ndev++) {
1663 struct ipath_devdata *dd = ipath_lookup(ndev);
1667 for (i = 1; i < dd->ipath_cfgports; i++) {
1668 struct ipath_portdata *pd = dd->ipath_pd[i];
1670 /* Skip ports which are not yet open */
1671 if (!pd || !pd->port_cnt)
1673 /* Skip port if it doesn't match the requested one */
1674 if (pd->port_subport_id != uinfo->spu_subport_id)
1676 /* Verify the sharing process matches the master */
1677 if (pd->port_subport_cnt != uinfo->spu_subport_cnt ||
1678 pd->userversion != uinfo->spu_userversion ||
1679 pd->port_cnt >= pd->port_subport_cnt) {
1684 subport_fp(fp) = pd->port_cnt++;
1685 tidcursor_fp(fp) = 0;
1686 pd->active_slaves |= 1 << subport_fp(fp);
1688 "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
1689 current->comm, current->pid,
1691 pd->port_comm, pd->port_pid,
1692 dd->ipath_unit, pd->port_port);
1702 static int ipath_open(struct inode *in, struct file *fp)
1704 /* The real work is performed later in ipath_assign_port() */
1705 fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL);
1706 return fp->private_data ? 0 : -ENOMEM;
1710 /* Get port early, so can set affinity prior to memory allocation */
1711 static int ipath_assign_port(struct file *fp,
1712 const struct ipath_user_info *uinfo)
1718 /* Check to be sure we haven't already initialized this file */
1724 /* for now, if major version is different, bail */
1725 if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) {
1726 ipath_dbg("User major version %d not same as driver "
1727 "major %d\n", uinfo->spu_userversion >> 16,
1728 IPATH_USER_SWMAJOR);
1733 swminor = uinfo->spu_userversion & 0xffff;
1734 if (swminor != IPATH_USER_SWMINOR)
1735 ipath_dbg("User minor version %d not same as driver "
1736 "minor %d\n", swminor, IPATH_USER_SWMINOR);
1738 mutex_lock(&ipath_mutex);
1740 if (swminor == IPATH_USER_SWMINOR && uinfo->spu_subport_cnt &&
1741 (ret = find_shared_port(fp, uinfo))) {
1742 mutex_unlock(&ipath_mutex);
1748 i_minor = iminor(fp->f_dentry->d_inode) - IPATH_USER_MINOR_BASE;
1749 ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
1750 (long)fp->f_dentry->d_inode->i_rdev, i_minor);
1753 ret = find_free_port(i_minor - 1, fp, uinfo);
1755 ret = find_best_unit(fp, uinfo);
1757 mutex_unlock(&ipath_mutex);
1764 static int ipath_do_user_init(struct file *fp,
1765 const struct ipath_user_info *uinfo)
1768 struct ipath_portdata *pd;
1769 struct ipath_devdata *dd;
1775 if (uinfo->spu_rcvhdrsize) {
1776 ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
1781 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
1783 /* for right now, kernel piobufs are at end, so port 1 is at 0 */
1784 pd->port_piobufs = dd->ipath_piobufbase +
1785 dd->ipath_pbufsport * (pd->port_port - 1) * dd->ipath_palign;
1786 ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n",
1787 pd->port_port, pd->port_piobufs);
1790 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1791 * array for time being. If pd->port_port > chip-supported,
1792 * we need to do extra stuff here to handle by handling overflow
1793 * through port 0, someday
1795 ret = ipath_create_rcvhdrq(dd, pd);
1797 ret = ipath_create_user_egr(pd);
1802 * set the eager head register for this port to the current values
1803 * of the tail pointers, since we don't know if they were
1804 * updated on last use of the port.
1806 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
1807 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
1808 dd->ipath_lastegrheads[pd->port_port] = -1;
1809 dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
1810 ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
1811 pd->port_port, head32);
1812 pd->port_tidcursor = 0; /* start at beginning after open */
1814 * now enable the port; the tail registers will be written to memory
1815 * by the chip as soon as it sees the write to
1816 * dd->ipath_kregs->kr_rcvctrl. The update only happens on
1817 * transition from 0 to 1, so clear it first, then set it as part of
1818 * enabling the port. This will (very briefly) affect any other
1819 * open ports, but it shouldn't be long enough to be an issue.
1820 * We explictly set the in-memory copy to 0 beforehand, so we don't
1821 * have to wait to be sure the DMA update has happened.
1823 *(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0ULL;
1824 set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
1825 &dd->ipath_rcvctrl);
1826 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1827 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
1828 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1835 * unlock_exptid - unlock any expected TID entries port still had in use
1838 * We don't actually update the chip here, because we do a bulk update
1839 * below, using ipath_f_clear_tids.
1841 static void unlock_expected_tids(struct ipath_portdata *pd)
1843 struct ipath_devdata *dd = pd->port_dd;
1844 int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt;
1845 int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt;
1847 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
1849 for (i = port_tidbase; i < maxtid; i++) {
1850 if (!dd->ipath_pageshadow[i])
1853 pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
1854 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1855 ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i],
1857 dd->ipath_pageshadow[i] = NULL;
1859 ipath_stats.sps_pageunlocks++;
1862 ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n",
1863 pd->port_port, cnt);
1865 if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks)
1866 ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n",
1867 (unsigned long long) ipath_stats.sps_pagelocks,
1868 (unsigned long long)
1869 ipath_stats.sps_pageunlocks);
1872 static int ipath_close(struct inode *in, struct file *fp)
1875 struct ipath_filedata *fd;
1876 struct ipath_portdata *pd;
1877 struct ipath_devdata *dd;
1880 ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
1881 (long)in->i_rdev, fp->private_data);
1883 mutex_lock(&ipath_mutex);
1885 fd = (struct ipath_filedata *) fp->private_data;
1886 fp->private_data = NULL;
1889 mutex_unlock(&ipath_mutex);
1892 if (--pd->port_cnt) {
1894 * XXX If the master closes the port before the slave(s),
1895 * revoke the mmap for the eager receive queue so
1896 * the slave(s) don't wait for receive data forever.
1898 pd->active_slaves &= ~(1 << fd->subport);
1899 mutex_unlock(&ipath_mutex);
1902 port = pd->port_port;
1905 if (pd->port_hdrqfull) {
1906 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
1907 "during run\n", pd->port_comm, pd->port_pid,
1909 pd->port_hdrqfull = 0;
1912 if (pd->port_rcvwait_to || pd->port_piowait_to
1913 || pd->port_rcvnowait || pd->port_pionowait) {
1914 ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; "
1915 "%u rcv %u, pio already\n",
1916 pd->port_port, pd->port_rcvwait_to,
1917 pd->port_piowait_to, pd->port_rcvnowait,
1918 pd->port_pionowait);
1919 pd->port_rcvwait_to = pd->port_piowait_to =
1920 pd->port_rcvnowait = pd->port_pionowait = 0;
1922 if (pd->port_flag) {
1923 ipath_dbg("port %u port_flag still set to 0x%lx\n",
1924 pd->port_port, pd->port_flag);
1928 if (dd->ipath_kregbase) {
1930 /* atomically clear receive enable port. */
1931 clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
1932 &dd->ipath_rcvctrl);
1933 ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
1935 /* and read back from chip to be sure that nothing
1936 * else is in flight when we do the rest */
1937 (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1939 /* clean up the pkeys for this port user */
1940 ipath_clean_part_key(pd, dd);
1942 * be paranoid, and never write 0's to these, just use an
1943 * unused part of the port 0 tail page. Of course,
1944 * rcvhdraddr points to a large chunk of memory, so this
1945 * could still trash things, but at least it won't trash
1946 * page 0, and by disabling the port, it should stop "soon",
1947 * even if a packet or two is in already in flight after we
1948 * disabled the port.
1950 ipath_write_kreg_port(dd,
1951 dd->ipath_kregs->kr_rcvhdrtailaddr, port,
1952 dd->ipath_dummy_hdrq_phys);
1953 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1954 pd->port_port, dd->ipath_dummy_hdrq_phys);
1956 i = dd->ipath_pbufsport * (port - 1);
1957 ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
1959 dd->ipath_f_clear_tids(dd, pd->port_port);
1961 if (dd->ipath_pageshadow)
1962 unlock_expected_tids(pd);
1963 ipath_stats.sps_ports--;
1964 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
1965 pd->port_comm, pd->port_pid,
1966 dd->ipath_unit, port);
1970 dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
1971 mutex_unlock(&ipath_mutex);
1972 ipath_free_pddata(dd, pd); /* after releasing the mutex */
1979 static int ipath_port_info(struct ipath_portdata *pd, u16 subport,
1980 struct ipath_port_info __user *uinfo)
1982 struct ipath_port_info info;
1987 (void) ipath_count_units(NULL, &nup, NULL);
1988 info.num_active = nup;
1989 info.unit = pd->port_dd->ipath_unit;
1990 info.port = pd->port_port;
1991 info.subport = subport;
1992 /* Don't return new fields if old library opened the port. */
1993 if ((pd->userversion & 0xffff) == IPATH_USER_SWMINOR) {
1994 /* Number of user ports available for this device. */
1995 info.num_ports = pd->port_dd->ipath_cfgports - 1;
1996 info.num_subports = pd->port_subport_cnt;
1999 sz = sizeof(info) - 2 * sizeof(u16);
2001 if (copy_to_user(uinfo, &info, sz)) {
2011 static int ipath_get_slave_info(struct ipath_portdata *pd,
2012 void __user *slave_mask_addr)
2016 if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32)))
2021 static ssize_t ipath_write(struct file *fp, const char __user *data,
2022 size_t count, loff_t *off)
2024 const struct ipath_cmd __user *ucmd;
2025 struct ipath_portdata *pd;
2026 const void __user *src;
2027 size_t consumed, copy;
2028 struct ipath_cmd cmd;
2032 if (count < sizeof(cmd.type)) {
2037 ucmd = (const struct ipath_cmd __user *) data;
2039 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
2044 consumed = sizeof(cmd.type);
2047 case IPATH_CMD_ASSIGN_PORT:
2048 case __IPATH_CMD_USER_INIT:
2049 case IPATH_CMD_USER_INIT:
2050 copy = sizeof(cmd.cmd.user_info);
2051 dest = &cmd.cmd.user_info;
2052 src = &ucmd->cmd.user_info;
2054 case IPATH_CMD_RECV_CTRL:
2055 copy = sizeof(cmd.cmd.recv_ctrl);
2056 dest = &cmd.cmd.recv_ctrl;
2057 src = &ucmd->cmd.recv_ctrl;
2059 case IPATH_CMD_PORT_INFO:
2060 copy = sizeof(cmd.cmd.port_info);
2061 dest = &cmd.cmd.port_info;
2062 src = &ucmd->cmd.port_info;
2064 case IPATH_CMD_TID_UPDATE:
2065 case IPATH_CMD_TID_FREE:
2066 copy = sizeof(cmd.cmd.tid_info);
2067 dest = &cmd.cmd.tid_info;
2068 src = &ucmd->cmd.tid_info;
2070 case IPATH_CMD_SET_PART_KEY:
2071 copy = sizeof(cmd.cmd.part_key);
2072 dest = &cmd.cmd.part_key;
2073 src = &ucmd->cmd.part_key;
2075 case IPATH_CMD_SLAVE_INFO:
2076 copy = sizeof(cmd.cmd.slave_mask_addr);
2077 dest = &cmd.cmd.slave_mask_addr;
2078 src = &ucmd->cmd.slave_mask_addr;
2085 if ((count - consumed) < copy) {
2090 if (copy_from_user(dest, src, copy)) {
2097 if (!pd && cmd.type != __IPATH_CMD_USER_INIT &&
2098 cmd.type != IPATH_CMD_ASSIGN_PORT) {
2104 case IPATH_CMD_ASSIGN_PORT:
2105 ret = ipath_assign_port(fp, &cmd.cmd.user_info);
2109 case __IPATH_CMD_USER_INIT:
2110 /* backwards compatibility, get port first */
2111 ret = ipath_assign_port(fp, &cmd.cmd.user_info);
2114 /* and fall through to current version. */
2115 case IPATH_CMD_USER_INIT:
2116 ret = ipath_do_user_init(fp, &cmd.cmd.user_info);
2119 ret = ipath_get_base_info(
2120 fp, (void __user *) (unsigned long)
2121 cmd.cmd.user_info.spu_base_info,
2122 cmd.cmd.user_info.spu_base_info_size);
2124 case IPATH_CMD_RECV_CTRL:
2125 ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl);
2127 case IPATH_CMD_PORT_INFO:
2128 ret = ipath_port_info(pd, subport_fp(fp),
2129 (struct ipath_port_info __user *)
2130 (unsigned long) cmd.cmd.port_info);
2132 case IPATH_CMD_TID_UPDATE:
2133 ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info);
2135 case IPATH_CMD_TID_FREE:
2136 ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info);
2138 case IPATH_CMD_SET_PART_KEY:
2139 ret = ipath_set_part_key(pd, cmd.cmd.part_key);
2141 case IPATH_CMD_SLAVE_INFO:
2142 ret = ipath_get_slave_info(pd,
2143 (void __user *) (unsigned long)
2144 cmd.cmd.slave_mask_addr);
2155 static struct class *ipath_class;
2157 static int init_cdev(int minor, char *name, struct file_operations *fops,
2158 struct cdev **cdevp, struct class_device **class_devp)
2160 const dev_t dev = MKDEV(IPATH_MAJOR, minor);
2161 struct cdev *cdev = NULL;
2162 struct class_device *class_dev = NULL;
2165 cdev = cdev_alloc();
2167 printk(KERN_ERR IPATH_DRV_NAME
2168 ": Could not allocate cdev for minor %d, %s\n",
2174 cdev->owner = THIS_MODULE;
2176 kobject_set_name(&cdev->kobj, name);
2178 ret = cdev_add(cdev, dev, 1);
2180 printk(KERN_ERR IPATH_DRV_NAME
2181 ": Could not add cdev for minor %d, %s (err %d)\n",
2186 class_dev = class_device_create(ipath_class, NULL, dev, NULL, name);
2188 if (IS_ERR(class_dev)) {
2189 ret = PTR_ERR(class_dev);
2190 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
2191 "class_dev for minor %d, %s (err %d)\n",
2205 *class_devp = class_dev;
2214 int ipath_cdev_init(int minor, char *name, struct file_operations *fops,
2215 struct cdev **cdevp, struct class_device **class_devp)
2217 return init_cdev(minor, name, fops, cdevp, class_devp);
2220 static void cleanup_cdev(struct cdev **cdevp,
2221 struct class_device **class_devp)
2223 struct class_device *class_dev = *class_devp;
2226 class_device_unregister(class_dev);
2236 void ipath_cdev_cleanup(struct cdev **cdevp,
2237 struct class_device **class_devp)
2239 cleanup_cdev(cdevp, class_devp);
2242 static struct cdev *wildcard_cdev;
2243 static struct class_device *wildcard_class_dev;
2245 static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
2247 static int user_init(void)
2251 ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME);
2253 printk(KERN_ERR IPATH_DRV_NAME ": Could not register "
2254 "chrdev region (err %d)\n", -ret);
2258 ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME);
2260 if (IS_ERR(ipath_class)) {
2261 ret = PTR_ERR(ipath_class);
2262 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
2263 "device class (err %d)\n", -ret);
2269 unregister_chrdev_region(dev, IPATH_NMINORS);
2274 static void user_cleanup(void)
2277 class_destroy(ipath_class);
2281 unregister_chrdev_region(dev, IPATH_NMINORS);
2284 static atomic_t user_count = ATOMIC_INIT(0);
2285 static atomic_t user_setup = ATOMIC_INIT(0);
2287 int ipath_user_add(struct ipath_devdata *dd)
2292 if (atomic_inc_return(&user_count) == 1) {
2295 ipath_dev_err(dd, "Unable to set up user support: "
2296 "error %d\n", -ret);
2299 ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
2300 &wildcard_class_dev);
2302 ipath_dev_err(dd, "Could not create wildcard "
2303 "minor: error %d\n", -ret);
2307 atomic_set(&user_setup, 1);
2310 snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
2312 ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
2313 &dd->user_cdev, &dd->user_class_dev);
2315 ipath_dev_err(dd, "Could not create user minor %d, %s\n",
2316 dd->ipath_unit + 1, name);
2326 void ipath_user_remove(struct ipath_devdata *dd)
2328 cleanup_cdev(&dd->user_cdev, &dd->user_class_dev);
2330 if (atomic_dec_return(&user_count) == 0) {
2331 if (atomic_read(&user_setup) == 0)
2334 cleanup_cdev(&wildcard_cdev, &wildcard_class_dev);
2337 atomic_set(&user_setup, 0);