|
@@ -1,5 +1,5 @@
|
|
|
/*
|
|
|
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
|
|
|
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
|
|
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
|
|
*
|
|
|
* This software is available to you under a choice of one of two
|
|
@@ -41,12 +41,6 @@
|
|
|
#include "ipath_kernel.h"
|
|
|
#include "ipath_common.h"
|
|
|
|
|
|
-/*
|
|
|
- * mmap64 doesn't allow all 64 bits for 32-bit applications
|
|
|
- * so only use the low 43 bits.
|
|
|
- */
|
|
|
-#define MMAP64_MASK 0x7FFFFFFFFFFUL
|
|
|
-
|
|
|
static int ipath_open(struct inode *, struct file *);
|
|
|
static int ipath_close(struct inode *, struct file *);
|
|
|
static ssize_t ipath_write(struct file *, const char __user *, size_t,
|
|
@@ -63,6 +57,24 @@ static const struct file_operations ipath_file_ops = {
|
|
|
.mmap = ipath_mmap
|
|
|
};
|
|
|
|
|
|
+/*
|
|
|
+ * Convert kernel virtual addresses to physical addresses so they don't
|
|
|
+ * potentially conflict with the chip addresses used as mmap offsets.
|
|
|
+ * It doesn't really matter what mmap offset we use as long as we can
|
|
|
+ * interpret it correctly.
|
|
|
+ */
|
|
|
+static u64 cvt_kvaddr(void *p)
|
|
|
+{
|
|
|
+ struct page *page;
|
|
|
+ u64 paddr = 0;
|
|
|
+
|
|
|
+ page = vmalloc_to_page(p);
|
|
|
+ if (page)
|
|
|
+ paddr = page_to_pfn(page) << PAGE_SHIFT;
|
|
|
+
|
|
|
+ return paddr;
|
|
|
+}
|
|
|
+
|
|
|
static int ipath_get_base_info(struct file *fp,
|
|
|
void __user *ubase, size_t ubase_size)
|
|
|
{
|
|
@@ -87,7 +99,7 @@ static int ipath_get_base_info(struct file *fp,
|
|
|
sz = sizeof(*kinfo);
|
|
|
/* If port sharing is not requested, allow the old size structure */
|
|
|
if (!shared)
|
|
|
- sz -= 3 * sizeof(u64);
|
|
|
+ sz -= 7 * sizeof(u64);
|
|
|
if (ubase_size < sz) {
|
|
|
ipath_cdbg(PROC,
|
|
|
"Base size %zu, need %zu (version mismatch?)\n",
|
|
@@ -165,24 +177,41 @@ static int ipath_get_base_info(struct file *fp,
|
|
|
kinfo->spi_piobufbase = (u64) pd->port_piobufs +
|
|
|
dd->ipath_palign *
|
|
|
(dd->ipath_pbufsport - kinfo->spi_piocnt);
|
|
|
- kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
|
|
|
- dd->ipath_palign * pd->port_port;
|
|
|
} else {
|
|
|
unsigned slave = subport_fp(fp) - 1;
|
|
|
|
|
|
kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt;
|
|
|
kinfo->spi_piobufbase = (u64) pd->port_piobufs +
|
|
|
dd->ipath_palign * kinfo->spi_piocnt * slave;
|
|
|
- kinfo->__spi_uregbase = ((u64) pd->subport_uregbase +
|
|
|
- PAGE_SIZE * slave) & MMAP64_MASK;
|
|
|
+ }
|
|
|
+ if (shared) {
|
|
|
+ kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
|
|
|
+ dd->ipath_palign * pd->port_port;
|
|
|
+ kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
|
|
|
+ kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
|
|
|
+ kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
|
|
|
|
|
|
- kinfo->spi_rcvhdr_base = ((u64) pd->subport_rcvhdr_base +
|
|
|
- pd->port_rcvhdrq_size * slave) & MMAP64_MASK;
|
|
|
- kinfo->spi_rcvhdr_tailaddr =
|
|
|
- (u64) pd->port_rcvhdrqtailaddr_phys & MMAP64_MASK;
|
|
|
- kinfo->spi_rcv_egrbufs = ((u64) pd->subport_rcvegrbuf +
|
|
|
- dd->ipath_rcvegrcnt * dd->ipath_rcvegrbufsize * slave) &
|
|
|
- MMAP64_MASK;
|
|
|
+ kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase +
|
|
|
+ PAGE_SIZE * subport_fp(fp));
|
|
|
+
|
|
|
+ kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base +
|
|
|
+ pd->port_rcvhdrq_size * subport_fp(fp));
|
|
|
+ kinfo->spi_rcvhdr_tailaddr = 0;
|
|
|
+ kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf +
|
|
|
+ pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size *
|
|
|
+ subport_fp(fp));
|
|
|
+
|
|
|
+ kinfo->spi_subport_uregbase =
|
|
|
+ cvt_kvaddr(pd->subport_uregbase);
|
|
|
+ kinfo->spi_subport_rcvegrbuf =
|
|
|
+ cvt_kvaddr(pd->subport_rcvegrbuf);
|
|
|
+ kinfo->spi_subport_rcvhdr_base =
|
|
|
+ cvt_kvaddr(pd->subport_rcvhdr_base);
|
|
|
+ ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
|
|
|
+ kinfo->spi_port, kinfo->spi_runtime_flags,
|
|
|
+ (unsigned long long) kinfo->spi_subport_uregbase,
|
|
|
+ (unsigned long long) kinfo->spi_subport_rcvegrbuf,
|
|
|
+ (unsigned long long) kinfo->spi_subport_rcvhdr_base);
|
|
|
}
|
|
|
|
|
|
kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) /
|
|
@@ -199,20 +228,10 @@ static int ipath_get_base_info(struct file *fp,
|
|
|
|
|
|
if (master) {
|
|
|
kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
|
|
|
- kinfo->spi_subport_uregbase =
|
|
|
- (u64) pd->subport_uregbase & MMAP64_MASK;
|
|
|
- kinfo->spi_subport_rcvegrbuf =
|
|
|
- (u64) pd->subport_rcvegrbuf & MMAP64_MASK;
|
|
|
- kinfo->spi_subport_rcvhdr_base =
|
|
|
- (u64) pd->subport_rcvhdr_base & MMAP64_MASK;
|
|
|
- ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
|
|
|
- kinfo->spi_port, kinfo->spi_runtime_flags,
|
|
|
- (unsigned long long) kinfo->spi_subport_uregbase,
|
|
|
- (unsigned long long) kinfo->spi_subport_rcvegrbuf,
|
|
|
- (unsigned long long) kinfo->spi_subport_rcvhdr_base);
|
|
|
}
|
|
|
|
|
|
- if (copy_to_user(ubase, kinfo, sizeof(*kinfo)))
|
|
|
+ sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
|
|
|
+ if (copy_to_user(ubase, kinfo, sz))
|
|
|
ret = -EFAULT;
|
|
|
|
|
|
bail:
|
|
@@ -1132,67 +1151,55 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
|
|
|
struct ipath_devdata *dd;
|
|
|
void *addr;
|
|
|
size_t size;
|
|
|
- int ret;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
/* If the port is not shared, all addresses should be physical */
|
|
|
- if (!pd->port_subport_cnt) {
|
|
|
- ret = -EINVAL;
|
|
|
+ if (!pd->port_subport_cnt)
|
|
|
goto bail;
|
|
|
- }
|
|
|
|
|
|
dd = pd->port_dd;
|
|
|
size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
|
|
|
|
|
|
/*
|
|
|
- * Master has all the slave uregbase, rcvhdrq, and
|
|
|
- * rcvegrbufs mmapped.
|
|
|
+ * Each process has all the subport uregbase, rcvhdrq, and
|
|
|
+ * rcvegrbufs mmapped - as an array for all the processes,
|
|
|
+ * and also separately for this process.
|
|
|
*/
|
|
|
- if (subport == 0) {
|
|
|
- unsigned num_slaves = pd->port_subport_cnt - 1;
|
|
|
-
|
|
|
- if (pgaddr == ((u64) pd->subport_uregbase & MMAP64_MASK)) {
|
|
|
- addr = pd->subport_uregbase;
|
|
|
- size = PAGE_SIZE * num_slaves;
|
|
|
- } else if (pgaddr == ((u64) pd->subport_rcvhdr_base &
|
|
|
- MMAP64_MASK)) {
|
|
|
- addr = pd->subport_rcvhdr_base;
|
|
|
- size = pd->port_rcvhdrq_size * num_slaves;
|
|
|
- } else if (pgaddr == ((u64) pd->subport_rcvegrbuf &
|
|
|
- MMAP64_MASK)) {
|
|
|
- addr = pd->subport_rcvegrbuf;
|
|
|
- size *= num_slaves;
|
|
|
- } else {
|
|
|
- ret = -EINVAL;
|
|
|
- goto bail;
|
|
|
- }
|
|
|
- } else if (pgaddr == (((u64) pd->subport_uregbase +
|
|
|
- PAGE_SIZE * (subport - 1)) & MMAP64_MASK)) {
|
|
|
- addr = pd->subport_uregbase + PAGE_SIZE * (subport - 1);
|
|
|
- size = PAGE_SIZE;
|
|
|
- } else if (pgaddr == (((u64) pd->subport_rcvhdr_base +
|
|
|
- pd->port_rcvhdrq_size * (subport - 1)) &
|
|
|
- MMAP64_MASK)) {
|
|
|
- addr = pd->subport_rcvhdr_base +
|
|
|
- pd->port_rcvhdrq_size * (subport - 1);
|
|
|
- size = pd->port_rcvhdrq_size;
|
|
|
- } else if (pgaddr == (((u64) pd->subport_rcvegrbuf +
|
|
|
- size * (subport - 1)) & MMAP64_MASK)) {
|
|
|
- addr = pd->subport_rcvegrbuf + size * (subport - 1);
|
|
|
- /* rcvegrbufs are read-only on the slave */
|
|
|
- if (vma->vm_flags & VM_WRITE) {
|
|
|
- dev_info(&dd->pcidev->dev,
|
|
|
- "Can't map eager buffers as "
|
|
|
- "writable (flags=%lx)\n", vma->vm_flags);
|
|
|
- ret = -EPERM;
|
|
|
- goto bail;
|
|
|
- }
|
|
|
- /*
|
|
|
- * Don't allow permission to later change to writeable
|
|
|
- * with mprotect.
|
|
|
- */
|
|
|
- vma->vm_flags &= ~VM_MAYWRITE;
|
|
|
+ if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) {
|
|
|
+ addr = pd->subport_uregbase;
|
|
|
+ size = PAGE_SIZE * pd->port_subport_cnt;
|
|
|
+ } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) {
|
|
|
+ addr = pd->subport_rcvhdr_base;
|
|
|
+ size = pd->port_rcvhdrq_size * pd->port_subport_cnt;
|
|
|
+ } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) {
|
|
|
+ addr = pd->subport_rcvegrbuf;
|
|
|
+ size *= pd->port_subport_cnt;
|
|
|
+ } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase +
|
|
|
+ PAGE_SIZE * subport)) {
|
|
|
+ addr = pd->subport_uregbase + PAGE_SIZE * subport;
|
|
|
+ size = PAGE_SIZE;
|
|
|
+ } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base +
|
|
|
+ pd->port_rcvhdrq_size * subport)) {
|
|
|
+ addr = pd->subport_rcvhdr_base +
|
|
|
+ pd->port_rcvhdrq_size * subport;
|
|
|
+ size = pd->port_rcvhdrq_size;
|
|
|
+ } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf +
|
|
|
+ size * subport)) {
|
|
|
+ addr = pd->subport_rcvegrbuf + size * subport;
|
|
|
+ /* rcvegrbufs are read-only on the slave */
|
|
|
+ if (vma->vm_flags & VM_WRITE) {
|
|
|
+ dev_info(&dd->pcidev->dev,
|
|
|
+ "Can't map eager buffers as "
|
|
|
+ "writable (flags=%lx)\n", vma->vm_flags);
|
|
|
+ ret = -EPERM;
|
|
|
+ goto bail;
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * Don't allow permission to later change to writeable
|
|
|
+ * with mprotect.
|
|
|
+ */
|
|
|
+ vma->vm_flags &= ~VM_MAYWRITE;
|
|
|
} else {
|
|
|
- ret = -EINVAL;
|
|
|
goto bail;
|
|
|
}
|
|
|
len = vma->vm_end - vma->vm_start;
|
|
@@ -1205,7 +1212,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
|
|
|
vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
|
|
|
vma->vm_ops = &ipath_file_vm_ops;
|
|
|
vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
|
|
|
- ret = 0;
|
|
|
+ ret = 1;
|
|
|
|
|
|
bail:
|
|
|
return ret;
|
|
@@ -1265,19 +1272,20 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
|
|
|
* Check for kernel virtual addresses first, anything else must
|
|
|
* match a HW or memory address.
|
|
|
*/
|
|
|
- if (pgaddr >= (1ULL<<40)) {
|
|
|
- ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
|
|
|
+ ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
|
|
|
+ if (ret) {
|
|
|
+ if (ret > 0)
|
|
|
+ ret = 0;
|
|
|
goto bail;
|
|
|
}
|
|
|
|
|
|
+ ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
|
|
|
if (!pd->port_subport_cnt) {
|
|
|
/* port is not shared */
|
|
|
- ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
|
|
|
piocnt = dd->ipath_pbufsport;
|
|
|
piobufs = pd->port_piobufs;
|
|
|
} else if (!subport_fp(fp)) {
|
|
|
/* caller is the master */
|
|
|
- ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
|
|
|
piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) +
|
|
|
(dd->ipath_pbufsport % pd->port_subport_cnt);
|
|
|
piobufs = pd->port_piobufs +
|
|
@@ -1286,7 +1294,6 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
|
|
|
unsigned slave = subport_fp(fp) - 1;
|
|
|
|
|
|
/* caller is a slave */
|
|
|
- ureg = 0;
|
|
|
piocnt = dd->ipath_pbufsport / pd->port_subport_cnt;
|
|
|
piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
|
|
|
}
|
|
@@ -1300,9 +1307,6 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
|
|
|
ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
|
|
|
(void *) dd->ipath_pioavailregs_dma,
|
|
|
"pioavail registers");
|
|
|
- else if (subport_fp(fp))
|
|
|
- /* Subports don't mmap the physical receive buffers */
|
|
|
- ret = -EINVAL;
|
|
|
else if (pgaddr == pd->port_rcvegr_phys)
|
|
|
ret = mmap_rcvegrbufs(vma, pd);
|
|
|
else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
|
|
@@ -1400,32 +1404,41 @@ static int init_subports(struct ipath_devdata *dd,
|
|
|
const struct ipath_user_info *uinfo)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
- unsigned num_slaves;
|
|
|
+ unsigned num_subports;
|
|
|
size_t size;
|
|
|
|
|
|
- /* Old user binaries don't know about subports */
|
|
|
- if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR)
|
|
|
- goto bail;
|
|
|
/*
|
|
|
* If the user is requesting zero or one port,
|
|
|
* skip the subport allocation.
|
|
|
*/
|
|
|
if (uinfo->spu_subport_cnt <= 1)
|
|
|
goto bail;
|
|
|
- if (uinfo->spu_subport_cnt > 4) {
|
|
|
+
|
|
|
+ /* Old user binaries don't know about new subport implementation */
|
|
|
+ if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR) {
|
|
|
+ dev_info(&dd->pcidev->dev,
|
|
|
+ "Mismatched user minor version (%d) and driver "
|
|
|
+ "minor version (%d) while port sharing. Ensure "
|
|
|
+ "that driver and library are from the same "
|
|
|
+ "release.\n",
|
|
|
+ (int) (uinfo->spu_userversion & 0xffff),
|
|
|
+ IPATH_USER_SWMINOR);
|
|
|
+ goto bail;
|
|
|
+ }
|
|
|
+ if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) {
|
|
|
ret = -EINVAL;
|
|
|
goto bail;
|
|
|
}
|
|
|
|
|
|
- num_slaves = uinfo->spu_subport_cnt - 1;
|
|
|
- pd->subport_uregbase = vmalloc(PAGE_SIZE * num_slaves);
|
|
|
+ num_subports = uinfo->spu_subport_cnt;
|
|
|
+ pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports);
|
|
|
if (!pd->subport_uregbase) {
|
|
|
ret = -ENOMEM;
|
|
|
goto bail;
|
|
|
}
|
|
|
/* Note: pd->port_rcvhdrq_size isn't initialized yet. */
|
|
|
size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
|
|
|
- sizeof(u32), PAGE_SIZE) * num_slaves;
|
|
|
+ sizeof(u32), PAGE_SIZE) * num_subports;
|
|
|
pd->subport_rcvhdr_base = vmalloc(size);
|
|
|
if (!pd->subport_rcvhdr_base) {
|
|
|
ret = -ENOMEM;
|
|
@@ -1434,7 +1447,7 @@ static int init_subports(struct ipath_devdata *dd,
|
|
|
|
|
|
pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks *
|
|
|
pd->port_rcvegrbuf_size *
|
|
|
- num_slaves);
|
|
|
+ num_subports);
|
|
|
if (!pd->subport_rcvegrbuf) {
|
|
|
ret = -ENOMEM;
|
|
|
goto bail_rhdr;
|
|
@@ -1443,6 +1456,12 @@ static int init_subports(struct ipath_devdata *dd,
|
|
|
pd->port_subport_cnt = uinfo->spu_subport_cnt;
|
|
|
pd->port_subport_id = uinfo->spu_subport_id;
|
|
|
pd->active_slaves = 1;
|
|
|
+ set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
|
|
|
+ memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports);
|
|
|
+ memset(pd->subport_rcvhdr_base, 0, size);
|
|
|
+ memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks *
|
|
|
+ pd->port_rcvegrbuf_size *
|
|
|
+ num_subports);
|
|
|
goto bail;
|
|
|
|
|
|
bail_rhdr:
|
|
@@ -1573,18 +1592,19 @@ static int find_best_unit(struct file *fp,
|
|
|
*/
|
|
|
if (!cpus_empty(current->cpus_allowed) &&
|
|
|
!cpus_full(current->cpus_allowed)) {
|
|
|
- int ncpus = num_online_cpus(), curcpu = -1;
|
|
|
+ int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
|
|
|
for (i = 0; i < ncpus; i++)
|
|
|
if (cpu_isset(i, current->cpus_allowed)) {
|
|
|
ipath_cdbg(PROC, "%s[%u] affinity set for "
|
|
|
- "cpu %d\n", current->comm,
|
|
|
- current->pid, i);
|
|
|
+ "cpu %d/%d\n", current->comm,
|
|
|
+ current->pid, i, ncpus);
|
|
|
curcpu = i;
|
|
|
+ nset++;
|
|
|
}
|
|
|
- if (curcpu != -1) {
|
|
|
+ if (curcpu != -1 && nset != ncpus) {
|
|
|
if (npresent) {
|
|
|
prefunit = curcpu / (ncpus / npresent);
|
|
|
- ipath_dbg("%s[%u] %d chips, %d cpus, "
|
|
|
+ ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, "
|
|
|
"%d cpus/chip, select unit %d\n",
|
|
|
current->comm, current->pid,
|
|
|
npresent, ncpus, ncpus / npresent,
|
|
@@ -1764,11 +1784,17 @@ static int ipath_do_user_init(struct file *fp,
|
|
|
const struct ipath_user_info *uinfo)
|
|
|
{
|
|
|
int ret;
|
|
|
- struct ipath_portdata *pd;
|
|
|
+ struct ipath_portdata *pd = port_fp(fp);
|
|
|
struct ipath_devdata *dd;
|
|
|
u32 head32;
|
|
|
|
|
|
- pd = port_fp(fp);
|
|
|
+ /* Subports don't need to initialize anything since master did it. */
|
|
|
+ if (subport_fp(fp)) {
|
|
|
+ ret = wait_event_interruptible(pd->port_wait,
|
|
|
+ !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag));
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
+
|
|
|
dd = pd->port_dd;
|
|
|
|
|
|
if (uinfo->spu_rcvhdrsize) {
|
|
@@ -1826,6 +1852,11 @@ static int ipath_do_user_init(struct file *fp,
|
|
|
dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
|
|
dd->ipath_rcvctrl);
|
|
|
+ /* Notify any waiting slaves */
|
|
|
+ if (pd->port_subport_cnt) {
|
|
|
+ clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
|
|
|
+ wake_up(&pd->port_wait);
|
|
|
+ }
|
|
|
done:
|
|
|
return ret;
|
|
|
}
|
|
@@ -2017,6 +2048,17 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
|
|
|
+{
|
|
|
+ u64 reg = dd->ipath_sendctrl;
|
|
|
+
|
|
|
+ clear_bit(IPATH_S_PIOBUFAVAILUPD, ®);
|
|
|
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, reg);
|
|
|
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static ssize_t ipath_write(struct file *fp, const char __user *data,
|
|
|
size_t count, loff_t *off)
|
|
|
{
|
|
@@ -2071,27 +2113,35 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
|
|
|
dest = &cmd.cmd.part_key;
|
|
|
src = &ucmd->cmd.part_key;
|
|
|
break;
|
|
|
- case IPATH_CMD_SLAVE_INFO:
|
|
|
+ case __IPATH_CMD_SLAVE_INFO:
|
|
|
copy = sizeof(cmd.cmd.slave_mask_addr);
|
|
|
dest = &cmd.cmd.slave_mask_addr;
|
|
|
src = &ucmd->cmd.slave_mask_addr;
|
|
|
break;
|
|
|
+ case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg
|
|
|
+ copy = 0;
|
|
|
+ src = NULL;
|
|
|
+ dest = NULL;
|
|
|
+ break;
|
|
|
default:
|
|
|
ret = -EINVAL;
|
|
|
goto bail;
|
|
|
}
|
|
|
|
|
|
- if ((count - consumed) < copy) {
|
|
|
- ret = -EINVAL;
|
|
|
- goto bail;
|
|
|
- }
|
|
|
+ if (copy) {
|
|
|
+ if ((count - consumed) < copy) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto bail;
|
|
|
+ }
|
|
|
|
|
|
- if (copy_from_user(dest, src, copy)) {
|
|
|
- ret = -EFAULT;
|
|
|
- goto bail;
|
|
|
+ if (copy_from_user(dest, src, copy)) {
|
|
|
+ ret = -EFAULT;
|
|
|
+ goto bail;
|
|
|
+ }
|
|
|
+
|
|
|
+ consumed += copy;
|
|
|
}
|
|
|
|
|
|
- consumed += copy;
|
|
|
pd = port_fp(fp);
|
|
|
if (!pd && cmd.type != __IPATH_CMD_USER_INIT &&
|
|
|
cmd.type != IPATH_CMD_ASSIGN_PORT) {
|
|
@@ -2137,11 +2187,14 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
|
|
|
case IPATH_CMD_SET_PART_KEY:
|
|
|
ret = ipath_set_part_key(pd, cmd.cmd.part_key);
|
|
|
break;
|
|
|
- case IPATH_CMD_SLAVE_INFO:
|
|
|
+ case __IPATH_CMD_SLAVE_INFO:
|
|
|
ret = ipath_get_slave_info(pd,
|
|
|
(void __user *) (unsigned long)
|
|
|
cmd.cmd.slave_mask_addr);
|
|
|
break;
|
|
|
+ case IPATH_CMD_PIOAVAILUPD:
|
|
|
+ ret = ipath_force_pio_avail_update(pd->port_dd);
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
if (ret >= 0)
|