|
@@ -73,14 +73,8 @@ struct fc_exch_pool {
|
|
|
struct fc_exch_mgr {
|
|
|
enum fc_class class; /* default class for sequences */
|
|
|
struct kref kref; /* exchange mgr reference count */
|
|
|
- spinlock_t em_lock; /* exchange manager lock,
|
|
|
- must be taken before ex_lock */
|
|
|
- u16 next_xid; /* next possible free exchange ID */
|
|
|
u16 min_xid; /* min exchange ID */
|
|
|
u16 max_xid; /* max exchange ID */
|
|
|
- u16 max_read; /* max exchange ID for read */
|
|
|
- u16 last_read; /* last xid allocated for read */
|
|
|
- u32 total_exches; /* total allocated exchanges */
|
|
|
struct list_head ex_list; /* allocated exchanges list */
|
|
|
mempool_t *ep_pool; /* reserve ep's */
|
|
|
u16 pool_max_index; /* max exch array index in exch pool */
|
|
@@ -99,7 +93,6 @@ struct fc_exch_mgr {
|
|
|
atomic_t seq_not_found;
|
|
|
atomic_t non_bls_resp;
|
|
|
} stats;
|
|
|
- struct fc_exch **exches; /* for exch pointers indexed by xid */
|
|
|
};
|
|
|
#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
|
|
|
|
|
@@ -192,8 +185,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
|
|
|
* sequence allocation and deallocation must be locked.
|
|
|
* - exchange refcnt can be done atomicly without locks.
|
|
|
* - sequence allocation must be locked by exch lock.
|
|
|
- * - If the em_lock and ex_lock must be taken at the same time, then the
|
|
|
- * em_lock must be taken before the ex_lock.
|
|
|
+ * - If the EM pool lock and ex_lock must be taken at the same time, then the
|
|
|
+ * EM pool lock must be taken before the ex_lock.
|
|
|
*/
|
|
|
|
|
|
/*
|
|
@@ -335,17 +328,18 @@ static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
|
|
|
((struct fc_exch **)(pool + 1))[index] = ep;
|
|
|
}
|
|
|
|
|
|
-static void fc_exch_mgr_delete_ep(struct fc_exch *ep)
|
|
|
+static void fc_exch_delete(struct fc_exch *ep)
|
|
|
{
|
|
|
- struct fc_exch_mgr *mp;
|
|
|
+ struct fc_exch_pool *pool;
|
|
|
|
|
|
- mp = ep->em;
|
|
|
- spin_lock_bh(&mp->em_lock);
|
|
|
- WARN_ON(mp->total_exches <= 0);
|
|
|
- mp->total_exches--;
|
|
|
- mp->exches[ep->xid - mp->min_xid] = NULL;
|
|
|
+ pool = ep->pool;
|
|
|
+ spin_lock_bh(&pool->lock);
|
|
|
+ WARN_ON(pool->total_exches <= 0);
|
|
|
+ pool->total_exches--;
|
|
|
+ fc_exch_ptr_set(pool, (ep->xid - ep->em->min_xid) >> fc_cpu_order,
|
|
|
+ NULL);
|
|
|
list_del(&ep->ex_list);
|
|
|
- spin_unlock_bh(&mp->em_lock);
|
|
|
+ spin_unlock_bh(&pool->lock);
|
|
|
fc_exch_release(ep); /* drop hold for exch in mp */
|
|
|
}
|
|
|
|
|
@@ -465,7 +459,7 @@ static void fc_exch_timeout(struct work_struct *work)
|
|
|
rc = fc_exch_done_locked(ep);
|
|
|
spin_unlock_bh(&ep->ex_lock);
|
|
|
if (!rc)
|
|
|
- fc_exch_mgr_delete_ep(ep);
|
|
|
+ fc_exch_delete(ep);
|
|
|
if (resp)
|
|
|
resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
|
|
|
fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
|
|
@@ -509,10 +503,9 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
|
|
|
struct fc_exch_mgr *mp)
|
|
|
{
|
|
|
struct fc_exch *ep;
|
|
|
- u16 min, max, xid;
|
|
|
-
|
|
|
- min = mp->min_xid;
|
|
|
- max = mp->max_xid;
|
|
|
+ unsigned int cpu;
|
|
|
+ u16 index;
|
|
|
+ struct fc_exch_pool *pool;
|
|
|
|
|
|
/* allocate memory for exchange */
|
|
|
ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
|
|
@@ -522,15 +515,17 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
|
|
|
}
|
|
|
memset(ep, 0, sizeof(*ep));
|
|
|
|
|
|
- spin_lock_bh(&mp->em_lock);
|
|
|
- xid = mp->next_xid;
|
|
|
- /* alloc a new xid */
|
|
|
- while (mp->exches[xid - min]) {
|
|
|
- xid = (xid == max) ? min : xid + 1;
|
|
|
- if (xid == mp->next_xid)
|
|
|
+ cpu = smp_processor_id();
|
|
|
+ pool = per_cpu_ptr(mp->pool, cpu);
|
|
|
+ spin_lock_bh(&pool->lock);
|
|
|
+ index = pool->next_index;
|
|
|
+ /* allocate new exch from pool */
|
|
|
+ while (fc_exch_ptr_get(pool, index)) {
|
|
|
+ index = index == mp->pool_max_index ? 0 : index + 1;
|
|
|
+ if (index == pool->next_index)
|
|
|
goto err;
|
|
|
}
|
|
|
- mp->next_xid = (xid == max) ? min : xid + 1;
|
|
|
+ pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
|
|
|
|
|
|
fc_exch_hold(ep); /* hold for exch in mp */
|
|
|
spin_lock_init(&ep->ex_lock);
|
|
@@ -541,17 +536,18 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
|
|
|
*/
|
|
|
spin_lock_bh(&ep->ex_lock);
|
|
|
|
|
|
- mp->exches[xid - mp->min_xid] = ep;
|
|
|
- list_add_tail(&ep->ex_list, &mp->ex_list);
|
|
|
+ fc_exch_ptr_set(pool, index, ep);
|
|
|
+ list_add_tail(&ep->ex_list, &pool->ex_list);
|
|
|
fc_seq_alloc(ep, ep->seq_id++);
|
|
|
- mp->total_exches++;
|
|
|
- spin_unlock_bh(&mp->em_lock);
|
|
|
+ pool->total_exches++;
|
|
|
+ spin_unlock_bh(&pool->lock);
|
|
|
|
|
|
/*
|
|
|
* update exchange
|
|
|
*/
|
|
|
- ep->oxid = ep->xid = xid;
|
|
|
+ ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
|
|
|
ep->em = mp;
|
|
|
+ ep->pool = pool;
|
|
|
ep->lp = lport;
|
|
|
ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
|
|
|
ep->rxid = FC_XID_UNKNOWN;
|
|
@@ -560,7 +556,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
|
|
|
out:
|
|
|
return ep;
|
|
|
err:
|
|
|
- spin_unlock_bh(&mp->em_lock);
|
|
|
+ spin_unlock_bh(&pool->lock);
|
|
|
atomic_inc(&mp->stats.no_free_exch_xid);
|
|
|
mempool_free(ep, mp->ep_pool);
|
|
|
return NULL;
|
|
@@ -597,16 +593,18 @@ EXPORT_SYMBOL(fc_exch_alloc);
|
|
|
*/
|
|
|
static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
|
|
|
{
|
|
|
+ struct fc_exch_pool *pool;
|
|
|
struct fc_exch *ep = NULL;
|
|
|
|
|
|
if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
|
|
|
- spin_lock_bh(&mp->em_lock);
|
|
|
- ep = mp->exches[xid - mp->min_xid];
|
|
|
+ pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
|
|
|
+ spin_lock_bh(&pool->lock);
|
|
|
+ ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
|
|
|
if (ep) {
|
|
|
fc_exch_hold(ep);
|
|
|
WARN_ON(ep->xid != xid);
|
|
|
}
|
|
|
- spin_unlock_bh(&mp->em_lock);
|
|
|
+ spin_unlock_bh(&pool->lock);
|
|
|
}
|
|
|
return ep;
|
|
|
}
|
|
@@ -620,7 +618,7 @@ void fc_exch_done(struct fc_seq *sp)
|
|
|
rc = fc_exch_done_locked(ep);
|
|
|
spin_unlock_bh(&ep->ex_lock);
|
|
|
if (!rc)
|
|
|
- fc_exch_mgr_delete_ep(ep);
|
|
|
+ fc_exch_delete(ep);
|
|
|
}
|
|
|
EXPORT_SYMBOL(fc_exch_done);
|
|
|
|
|
@@ -1213,7 +1211,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
|
|
|
WARN_ON(fc_seq_exch(sp) != ep);
|
|
|
spin_unlock_bh(&ep->ex_lock);
|
|
|
if (!rc)
|
|
|
- fc_exch_mgr_delete_ep(ep);
|
|
|
+ fc_exch_delete(ep);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1323,7 +1321,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
|
|
|
rc = fc_exch_done_locked(ep);
|
|
|
spin_unlock_bh(&ep->ex_lock);
|
|
|
if (!rc)
|
|
|
- fc_exch_mgr_delete_ep(ep);
|
|
|
+ fc_exch_delete(ep);
|
|
|
|
|
|
if (resp)
|
|
|
resp(sp, fp, ex_resp_arg);
|
|
@@ -1466,48 +1464,76 @@ static void fc_exch_reset(struct fc_exch *ep)
|
|
|
rc = fc_exch_done_locked(ep);
|
|
|
spin_unlock_bh(&ep->ex_lock);
|
|
|
if (!rc)
|
|
|
- fc_exch_mgr_delete_ep(ep);
|
|
|
+ fc_exch_delete(ep);
|
|
|
|
|
|
if (resp)
|
|
|
resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Reset an exchange manager, releasing all sequences and exchanges.
|
|
|
- * If sid is non-zero, reset only exchanges we source from that FID.
|
|
|
- * If did is non-zero, reset only exchanges destined to that FID.
|
|
|
+/**
|
|
|
+ * fc_exch_pool_reset() - Resets an per cpu exches pool.
|
|
|
+ * @lport: ptr to the local port
|
|
|
+ * @pool: ptr to the per cpu exches pool
|
|
|
+ * @sid: source FC ID
|
|
|
+ * @did: destination FC ID
|
|
|
+ *
|
|
|
+ * Resets an per cpu exches pool, releasing its all sequences
|
|
|
+ * and exchanges. If sid is non-zero, then reset only exchanges
|
|
|
+ * we sourced from that FID. If did is non-zero, reset only
|
|
|
+ * exchanges destined to that FID.
|
|
|
*/
|
|
|
-void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
|
|
|
+static void fc_exch_pool_reset(struct fc_lport *lport,
|
|
|
+ struct fc_exch_pool *pool,
|
|
|
+ u32 sid, u32 did)
|
|
|
{
|
|
|
struct fc_exch *ep;
|
|
|
struct fc_exch *next;
|
|
|
- struct fc_exch_mgr *mp;
|
|
|
- struct fc_exch_mgr_anchor *ema;
|
|
|
|
|
|
- list_for_each_entry(ema, &lp->ema_list, ema_list) {
|
|
|
- mp = ema->mp;
|
|
|
- spin_lock_bh(&mp->em_lock);
|
|
|
+ spin_lock_bh(&pool->lock);
|
|
|
restart:
|
|
|
- list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
|
|
|
- if ((lp == ep->lp) &&
|
|
|
- (sid == 0 || sid == ep->sid) &&
|
|
|
- (did == 0 || did == ep->did)) {
|
|
|
- fc_exch_hold(ep);
|
|
|
- spin_unlock_bh(&mp->em_lock);
|
|
|
-
|
|
|
- fc_exch_reset(ep);
|
|
|
-
|
|
|
- fc_exch_release(ep);
|
|
|
- spin_lock_bh(&mp->em_lock);
|
|
|
-
|
|
|
- /*
|
|
|
- * must restart loop incase while lock
|
|
|
- * was down multiple eps were released.
|
|
|
- */
|
|
|
- goto restart;
|
|
|
- }
|
|
|
+ list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
|
|
|
+ if ((lport == ep->lp) &&
|
|
|
+ (sid == 0 || sid == ep->sid) &&
|
|
|
+ (did == 0 || did == ep->did)) {
|
|
|
+ fc_exch_hold(ep);
|
|
|
+ spin_unlock_bh(&pool->lock);
|
|
|
+
|
|
|
+ fc_exch_reset(ep);
|
|
|
+
|
|
|
+ fc_exch_release(ep);
|
|
|
+ spin_lock_bh(&pool->lock);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * must restart loop incase while lock
|
|
|
+ * was down multiple eps were released.
|
|
|
+ */
|
|
|
+ goto restart;
|
|
|
}
|
|
|
- spin_unlock_bh(&mp->em_lock);
|
|
|
+ }
|
|
|
+ spin_unlock_bh(&pool->lock);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * fc_exch_mgr_reset() - Resets all EMs of a lport
|
|
|
+ * @lport: ptr to the local port
|
|
|
+ * @sid: source FC ID
|
|
|
+ * @did: destination FC ID
|
|
|
+ *
|
|
|
+ * Reset all EMs of a lport, releasing its all sequences and
|
|
|
+ * exchanges. If sid is non-zero, then reset only exchanges
|
|
|
+ * we sourced from that FID. If did is non-zero, reset only
|
|
|
+ * exchanges destined to that FID.
|
|
|
+ */
|
|
|
+void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
|
|
|
+{
|
|
|
+ struct fc_exch_mgr_anchor *ema;
|
|
|
+ unsigned int cpu;
|
|
|
+
|
|
|
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ fc_exch_pool_reset(lport,
|
|
|
+ per_cpu_ptr(ema->mp->pool, cpu),
|
|
|
+ sid, did);
|
|
|
}
|
|
|
}
|
|
|
EXPORT_SYMBOL(fc_exch_mgr_reset);
|
|
@@ -1777,11 +1803,6 @@ static void fc_exch_mgr_destroy(struct kref *kref)
|
|
|
{
|
|
|
struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
|
|
|
|
|
|
- /*
|
|
|
- * The total exch count must be zero
|
|
|
- * before freeing exchange manager.
|
|
|
- */
|
|
|
- WARN_ON(mp->total_exches != 0);
|
|
|
mempool_destroy(mp->ep_pool);
|
|
|
free_percpu(mp->pool);
|
|
|
kfree(mp);
|
|
@@ -1802,7 +1823,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
|
|
|
bool (*match)(struct fc_frame *))
|
|
|
{
|
|
|
struct fc_exch_mgr *mp;
|
|
|
- size_t len;
|
|
|
u16 pool_exch_range;
|
|
|
size_t pool_size;
|
|
|
unsigned int cpu;
|
|
@@ -1816,25 +1836,16 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Memory need for EM
|
|
|
+ * allocate memory for EM
|
|
|
*/
|
|
|
- len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
|
|
|
- len += sizeof(struct fc_exch_mgr);
|
|
|
-
|
|
|
- mp = kzalloc(len, GFP_ATOMIC);
|
|
|
+ mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
|
|
|
if (!mp)
|
|
|
return NULL;
|
|
|
|
|
|
mp->class = class;
|
|
|
- mp->total_exches = 0;
|
|
|
- mp->exches = (struct fc_exch **)(mp + 1);
|
|
|
/* adjust em exch xid range for offload */
|
|
|
mp->min_xid = min_xid;
|
|
|
mp->max_xid = max_xid;
|
|
|
- mp->next_xid = min_xid;
|
|
|
-
|
|
|
- INIT_LIST_HEAD(&mp->ex_list);
|
|
|
- spin_lock_init(&mp->em_lock);
|
|
|
|
|
|
mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
|
|
|
if (!mp->ep_pool)
|
|
@@ -1944,7 +1955,7 @@ err:
|
|
|
rc = fc_exch_done_locked(ep);
|
|
|
spin_unlock_bh(&ep->ex_lock);
|
|
|
if (!rc)
|
|
|
- fc_exch_mgr_delete_ep(ep);
|
|
|
+ fc_exch_delete(ep);
|
|
|
return NULL;
|
|
|
}
|
|
|
EXPORT_SYMBOL(fc_exch_seq_send);
|