|
@@ -148,15 +148,6 @@ static unsigned int pfvfres_pmask(struct adapter *adapter,
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-enum {
|
|
|
- MEMWIN0_APERTURE = 65536,
|
|
|
- MEMWIN0_BASE = 0x30000,
|
|
|
- MEMWIN1_APERTURE = 32768,
|
|
|
- MEMWIN1_BASE = 0x28000,
|
|
|
- MEMWIN2_APERTURE = 2048,
|
|
|
- MEMWIN2_BASE = 0x1b800,
|
|
|
-};
|
|
|
-
|
|
|
enum {
|
|
|
MAX_TXQ_ENTRIES = 16384,
|
|
|
MAX_CTRL_TXQ_ENTRIES = 1024,
|
|
@@ -371,6 +362,15 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
|
|
|
uhash | mhash, sleep);
|
|
|
}
|
|
|
|
|
|
+int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
|
|
|
+module_param(dbfifo_int_thresh, int, 0644);
|
|
|
+MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
|
|
|
+
|
|
|
+int dbfifo_drain_delay = 1000; /* usecs to sleep while draining the dbfifo */
|
|
|
+module_param(dbfifo_drain_delay, int, 0644);
|
|
|
+MODULE_PARM_DESC(dbfifo_drain_delay,
|
|
|
+ "usecs to sleep while draining the dbfifo");
|
|
|
+
|
|
|
/*
|
|
|
* Set Rx properties of a port, such as promiscruity, address filters, and MTU.
|
|
|
* If @mtu is -1 it is left unchanged.
|
|
@@ -389,6 +389,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static struct workqueue_struct *workq;
|
|
|
+
|
|
|
/**
|
|
|
* link_start - enable a port
|
|
|
* @dev: the port to enable
|
|
@@ -2196,7 +2198,7 @@ static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
|
|
|
adap->tid_release_head = (void **)((uintptr_t)p | chan);
|
|
|
if (!adap->tid_release_task_busy) {
|
|
|
adap->tid_release_task_busy = true;
|
|
|
- schedule_work(&adap->tid_release_task);
|
|
|
+ queue_work(workq, &adap->tid_release_task);
|
|
|
}
|
|
|
spin_unlock_bh(&adap->tid_release_lock);
|
|
|
}
|
|
@@ -2423,6 +2425,59 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
|
|
|
}
|
|
|
EXPORT_SYMBOL(cxgb4_iscsi_init);
|
|
|
|
|
|
+int cxgb4_flush_eq_cache(struct net_device *dev)
|
|
|
+{
|
|
|
+ struct adapter *adap = netdev2adap(dev);
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = t4_fwaddrspace_write(adap, adap->mbox,
|
|
|
+ 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(cxgb4_flush_eq_cache);
|
|
|
+
|
|
|
+static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
|
|
|
+{
|
|
|
+ u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
|
|
|
+ __be64 indices;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
|
|
|
+ if (!ret) {
|
|
|
+ indices = be64_to_cpu(indices);
|
|
|
+ *cidx = (indices >> 25) & 0xffff;
|
|
|
+ *pidx = (indices >> 9) & 0xffff;
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
|
|
|
+ u16 size)
|
|
|
+{
|
|
|
+ struct adapter *adap = netdev2adap(dev);
|
|
|
+ u16 hw_pidx, hw_cidx;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
|
|
|
+ if (ret)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ if (pidx != hw_pidx) {
|
|
|
+ u16 delta;
|
|
|
+
|
|
|
+ if (pidx >= hw_pidx)
|
|
|
+ delta = pidx - hw_pidx;
|
|
|
+ else
|
|
|
+ delta = size - hw_pidx + pidx;
|
|
|
+ wmb();
|
|
|
+ t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
|
|
|
+ V_QID(qid) | V_PIDX(delta));
|
|
|
+ }
|
|
|
+out:
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
|
|
|
+
|
|
|
static struct pci_driver cxgb4_driver;
|
|
|
|
|
|
static void check_neigh_update(struct neighbour *neigh)
|
|
@@ -2456,6 +2511,95 @@ static struct notifier_block cxgb4_netevent_nb = {
|
|
|
.notifier_call = netevent_cb
|
|
|
};
|
|
|
|
|
|
+static void drain_db_fifo(struct adapter *adap, int usecs)
|
|
|
+{
|
|
|
+ u32 v;
|
|
|
+
|
|
|
+ do {
|
|
|
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
+ schedule_timeout(usecs_to_jiffies(usecs));
|
|
|
+ v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
|
|
|
+ if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
|
|
|
+ break;
|
|
|
+ } while (1);
|
|
|
+}
|
|
|
+
|
|
|
+static void disable_txq_db(struct sge_txq *q)
|
|
|
+{
|
|
|
+ spin_lock_irq(&q->db_lock);
|
|
|
+ q->db_disabled = 1;
|
|
|
+ spin_unlock_irq(&q->db_lock);
|
|
|
+}
|
|
|
+
|
|
|
+static void enable_txq_db(struct sge_txq *q)
|
|
|
+{
|
|
|
+ spin_lock_irq(&q->db_lock);
|
|
|
+ q->db_disabled = 0;
|
|
|
+ spin_unlock_irq(&q->db_lock);
|
|
|
+}
|
|
|
+
|
|
|
+static void disable_dbs(struct adapter *adap)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_ethrxq(&adap->sge, i)
|
|
|
+ disable_txq_db(&adap->sge.ethtxq[i].q);
|
|
|
+ for_each_ofldrxq(&adap->sge, i)
|
|
|
+ disable_txq_db(&adap->sge.ofldtxq[i].q);
|
|
|
+ for_each_port(adap, i)
|
|
|
+ disable_txq_db(&adap->sge.ctrlq[i].q);
|
|
|
+}
|
|
|
+
|
|
|
+static void enable_dbs(struct adapter *adap)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_ethrxq(&adap->sge, i)
|
|
|
+ enable_txq_db(&adap->sge.ethtxq[i].q);
|
|
|
+ for_each_ofldrxq(&adap->sge, i)
|
|
|
+ enable_txq_db(&adap->sge.ofldtxq[i].q);
|
|
|
+ for_each_port(adap, i)
|
|
|
+ enable_txq_db(&adap->sge.ctrlq[i].q);
|
|
|
+}
|
|
|
+
|
|
|
+static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
|
|
|
+{
|
|
|
+ u16 hw_pidx, hw_cidx;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ spin_lock_bh(&q->db_lock);
|
|
|
+ ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
|
|
|
+ if (ret)
|
|
|
+ goto out;
|
|
|
+ if (q->db_pidx != hw_pidx) {
|
|
|
+ u16 delta;
|
|
|
+
|
|
|
+ if (q->db_pidx >= hw_pidx)
|
|
|
+ delta = q->db_pidx - hw_pidx;
|
|
|
+ else
|
|
|
+ delta = q->size - hw_pidx + q->db_pidx;
|
|
|
+ wmb();
|
|
|
+ t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
|
|
|
+ V_QID(q->cntxt_id) | V_PIDX(delta));
|
|
|
+ }
|
|
|
+out:
|
|
|
+ q->db_disabled = 0;
|
|
|
+ spin_unlock_bh(&q->db_lock);
|
|
|
+ if (ret)
|
|
|
+ CH_WARN(adap, "DB drop recovery failed.\n");
|
|
|
+}
|
|
|
+static void recover_all_queues(struct adapter *adap)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_ethrxq(&adap->sge, i)
|
|
|
+ sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
|
|
|
+ for_each_ofldrxq(&adap->sge, i)
|
|
|
+ sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
|
|
|
+ for_each_port(adap, i)
|
|
|
+ sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
|
|
|
+}
|
|
|
+
|
|
|
static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
|
|
|
{
|
|
|
mutex_lock(&uld_mutex);
|
|
@@ -2468,55 +2612,41 @@ static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
|
|
|
static void process_db_full(struct work_struct *work)
|
|
|
{
|
|
|
struct adapter *adap;
|
|
|
- static int delay = 1000;
|
|
|
- u32 v;
|
|
|
|
|
|
adap = container_of(work, struct adapter, db_full_task);
|
|
|
|
|
|
-
|
|
|
- /* stop LLD queues */
|
|
|
-
|
|
|
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
|
|
|
- do {
|
|
|
- set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
- schedule_timeout(usecs_to_jiffies(delay));
|
|
|
- v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
|
|
|
- if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
|
|
|
- break;
|
|
|
- } while (1);
|
|
|
+ drain_db_fifo(adap, dbfifo_drain_delay);
|
|
|
+ t4_set_reg_field(adap, A_SGE_INT_ENABLE3,
|
|
|
+ F_DBFIFO_HP_INT | F_DBFIFO_LP_INT,
|
|
|
+ F_DBFIFO_HP_INT | F_DBFIFO_LP_INT);
|
|
|
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
|
|
|
-
|
|
|
-
|
|
|
- /*
|
|
|
- * The more we get db full interrupts, the more we'll delay
|
|
|
- * in re-enabling db rings on queues, capped off at 200ms.
|
|
|
- */
|
|
|
- delay = min(delay << 1, 200000);
|
|
|
-
|
|
|
- /* resume LLD queues */
|
|
|
}
|
|
|
|
|
|
static void process_db_drop(struct work_struct *work)
|
|
|
{
|
|
|
struct adapter *adap;
|
|
|
- adap = container_of(work, struct adapter, db_drop_task);
|
|
|
|
|
|
+ adap = container_of(work, struct adapter, db_drop_task);
|
|
|
|
|
|
- /*
|
|
|
- * sync the PIDX values in HW and SW for LLD queues.
|
|
|
- */
|
|
|
-
|
|
|
+ t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
|
|
|
+ disable_dbs(adap);
|
|
|
notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
|
|
|
+ drain_db_fifo(adap, 1);
|
|
|
+ recover_all_queues(adap);
|
|
|
+ enable_dbs(adap);
|
|
|
}
|
|
|
|
|
|
void t4_db_full(struct adapter *adap)
|
|
|
{
|
|
|
- schedule_work(&adap->db_full_task);
|
|
|
+ t4_set_reg_field(adap, A_SGE_INT_ENABLE3,
|
|
|
+ F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 0);
|
|
|
+ queue_work(workq, &adap->db_full_task);
|
|
|
}
|
|
|
|
|
|
void t4_db_dropped(struct adapter *adap)
|
|
|
{
|
|
|
- schedule_work(&adap->db_drop_task);
|
|
|
+ queue_work(workq, &adap->db_drop_task);
|
|
|
}
|
|
|
|
|
|
static void uld_attach(struct adapter *adap, unsigned int uld)
|
|
@@ -2552,6 +2682,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
|
|
|
lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
|
|
|
lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
|
|
|
lli.fw_vers = adap->params.fw_vers;
|
|
|
+ lli.dbfifo_int_thresh = dbfifo_int_thresh;
|
|
|
|
|
|
handle = ulds[uld].add(&lli);
|
|
|
if (IS_ERR(handle)) {
|
|
@@ -3668,6 +3799,7 @@ static int __devinit init_one(struct pci_dev *pdev,
|
|
|
|
|
|
adapter->pdev = pdev;
|
|
|
adapter->pdev_dev = &pdev->dev;
|
|
|
+ adapter->mbox = func;
|
|
|
adapter->fn = func;
|
|
|
adapter->msg_enable = dflt_msg_enable;
|
|
|
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
|
|
@@ -3865,6 +3997,10 @@ static int __init cxgb4_init_module(void)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
+ workq = create_singlethread_workqueue("cxgb4");
|
|
|
+ if (!workq)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
/* Debugfs support is optional, just warn if this fails */
|
|
|
cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
|
|
|
if (!cxgb4_debugfs_root)
|
|
@@ -3880,6 +4016,8 @@ static void __exit cxgb4_cleanup_module(void)
|
|
|
{
|
|
|
pci_unregister_driver(&cxgb4_driver);
|
|
|
debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
|
|
|
+ flush_workqueue(workq);
|
|
|
+ destroy_workqueue(workq);
|
|
|
}
|
|
|
|
|
|
module_init(cxgb4_init_module);
|