cxgb3_offload.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222
  1. /*
  2. * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
  3. * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/list.h>
  34. #include <net/neighbour.h>
  35. #include <linux/notifier.h>
  36. #include <asm/atomic.h>
  37. #include <linux/proc_fs.h>
  38. #include <linux/if_vlan.h>
  39. #include <net/netevent.h>
  40. #include <linux/highmem.h>
  41. #include <linux/vmalloc.h>
  42. #include "common.h"
  43. #include "regs.h"
  44. #include "cxgb3_ioctl.h"
  45. #include "cxgb3_ctl_defs.h"
  46. #include "cxgb3_defs.h"
  47. #include "l2t.h"
  48. #include "firmware_exports.h"
  49. #include "cxgb3_offload.h"
  50. static LIST_HEAD(client_list);
  51. static LIST_HEAD(ofld_dev_list);
  52. static DEFINE_MUTEX(cxgb3_db_lock);
  53. static DEFINE_RWLOCK(adapter_list_lock);
  54. static LIST_HEAD(adapter_list);
  55. static const unsigned int MAX_ATIDS = 64 * 1024;
  56. static const unsigned int ATID_BASE = 0x100000;
  57. static inline int offload_activated(struct t3cdev *tdev)
  58. {
  59. const struct adapter *adapter = tdev2adap(tdev);
  60. return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map));
  61. }
  62. /**
  63. * cxgb3_register_client - register an offload client
  64. * @client: the client
  65. *
  66. * Add the client to the client list,
  67. * and call backs the client for each activated offload device
  68. */
  69. void cxgb3_register_client(struct cxgb3_client *client)
  70. {
  71. struct t3cdev *tdev;
  72. mutex_lock(&cxgb3_db_lock);
  73. list_add_tail(&client->client_list, &client_list);
  74. if (client->add) {
  75. list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
  76. if (offload_activated(tdev))
  77. client->add(tdev);
  78. }
  79. }
  80. mutex_unlock(&cxgb3_db_lock);
  81. }
  82. EXPORT_SYMBOL(cxgb3_register_client);
  83. /**
  84. * cxgb3_unregister_client - unregister an offload client
  85. * @client: the client
  86. *
  87. * Remove the client to the client list,
  88. * and call backs the client for each activated offload device.
  89. */
  90. void cxgb3_unregister_client(struct cxgb3_client *client)
  91. {
  92. struct t3cdev *tdev;
  93. mutex_lock(&cxgb3_db_lock);
  94. list_del(&client->client_list);
  95. if (client->remove) {
  96. list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
  97. if (offload_activated(tdev))
  98. client->remove(tdev);
  99. }
  100. }
  101. mutex_unlock(&cxgb3_db_lock);
  102. }
  103. EXPORT_SYMBOL(cxgb3_unregister_client);
  104. /**
  105. * cxgb3_add_clients - activate registered clients for an offload device
  106. * @tdev: the offload device
  107. *
  108. * Call backs all registered clients once a offload device is activated
  109. */
  110. void cxgb3_add_clients(struct t3cdev *tdev)
  111. {
  112. struct cxgb3_client *client;
  113. mutex_lock(&cxgb3_db_lock);
  114. list_for_each_entry(client, &client_list, client_list) {
  115. if (client->add)
  116. client->add(tdev);
  117. }
  118. mutex_unlock(&cxgb3_db_lock);
  119. }
  120. /**
  121. * cxgb3_remove_clients - deactivates registered clients
  122. * for an offload device
  123. * @tdev: the offload device
  124. *
  125. * Call backs all registered clients once a offload device is deactivated
  126. */
  127. void cxgb3_remove_clients(struct t3cdev *tdev)
  128. {
  129. struct cxgb3_client *client;
  130. mutex_lock(&cxgb3_db_lock);
  131. list_for_each_entry(client, &client_list, client_list) {
  132. if (client->remove)
  133. client->remove(tdev);
  134. }
  135. mutex_unlock(&cxgb3_db_lock);
  136. }
  137. static struct net_device *get_iff_from_mac(struct adapter *adapter,
  138. const unsigned char *mac,
  139. unsigned int vlan)
  140. {
  141. int i;
  142. for_each_port(adapter, i) {
  143. const struct vlan_group *grp;
  144. struct net_device *dev = adapter->port[i];
  145. const struct port_info *p = netdev_priv(dev);
  146. if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
  147. if (vlan && vlan != VLAN_VID_MASK) {
  148. grp = p->vlan_grp;
  149. dev = grp ? grp->vlan_devices[vlan] : NULL;
  150. } else
  151. while (dev->master)
  152. dev = dev->master;
  153. return dev;
  154. }
  155. }
  156. return NULL;
  157. }
  158. static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
  159. void *data)
  160. {
  161. int ret = 0;
  162. struct ulp_iscsi_info *uiip = data;
  163. switch (req) {
  164. case ULP_ISCSI_GET_PARAMS:
  165. uiip->pdev = adapter->pdev;
  166. uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
  167. uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
  168. uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
  169. /*
  170. * On tx, the iscsi pdu has to be <= tx page size and has to
  171. * fit into the Tx PM FIFO.
  172. */
  173. uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
  174. t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
  175. /* on rx, the iscsi pdu has to be < rx page size and the
  176. whole pdu + cpl headers has to fit into one sge buffer */
  177. uiip->max_rxsz = min_t(unsigned int,
  178. adapter->params.tp.rx_pg_size,
  179. (adapter->sge.qs[0].fl[1].buf_size -
  180. sizeof(struct cpl_rx_data) * 2 -
  181. sizeof(struct cpl_rx_data_ddp)));
  182. break;
  183. case ULP_ISCSI_SET_PARAMS:
  184. t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
  185. break;
  186. default:
  187. ret = -EOPNOTSUPP;
  188. }
  189. return ret;
  190. }
  191. /* Response queue used for RDMA events. */
  192. #define ASYNC_NOTIF_RSPQ 0
  193. static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
  194. {
  195. int ret = 0;
  196. switch (req) {
  197. case RDMA_GET_PARAMS:{
  198. struct rdma_info *req = data;
  199. struct pci_dev *pdev = adapter->pdev;
  200. req->udbell_physbase = pci_resource_start(pdev, 2);
  201. req->udbell_len = pci_resource_len(pdev, 2);
  202. req->tpt_base =
  203. t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
  204. req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
  205. req->pbl_base =
  206. t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
  207. req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
  208. req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
  209. req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
  210. req->kdb_addr = adapter->regs + A_SG_KDOORBELL;
  211. req->pdev = pdev;
  212. break;
  213. }
  214. case RDMA_CQ_OP:{
  215. unsigned long flags;
  216. struct rdma_cq_op *req = data;
  217. /* may be called in any context */
  218. spin_lock_irqsave(&adapter->sge.reg_lock, flags);
  219. ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
  220. req->credits);
  221. spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
  222. break;
  223. }
  224. case RDMA_GET_MEM:{
  225. struct ch_mem_range *t = data;
  226. struct mc7 *mem;
  227. if ((t->addr & 7) || (t->len & 7))
  228. return -EINVAL;
  229. if (t->mem_id == MEM_CM)
  230. mem = &adapter->cm;
  231. else if (t->mem_id == MEM_PMRX)
  232. mem = &adapter->pmrx;
  233. else if (t->mem_id == MEM_PMTX)
  234. mem = &adapter->pmtx;
  235. else
  236. return -EINVAL;
  237. ret =
  238. t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
  239. (u64 *) t->buf);
  240. if (ret)
  241. return ret;
  242. break;
  243. }
  244. case RDMA_CQ_SETUP:{
  245. struct rdma_cq_setup *req = data;
  246. spin_lock_irq(&adapter->sge.reg_lock);
  247. ret =
  248. t3_sge_init_cqcntxt(adapter, req->id,
  249. req->base_addr, req->size,
  250. ASYNC_NOTIF_RSPQ,
  251. req->ovfl_mode, req->credits,
  252. req->credit_thres);
  253. spin_unlock_irq(&adapter->sge.reg_lock);
  254. break;
  255. }
  256. case RDMA_CQ_DISABLE:
  257. spin_lock_irq(&adapter->sge.reg_lock);
  258. ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
  259. spin_unlock_irq(&adapter->sge.reg_lock);
  260. break;
  261. case RDMA_CTRL_QP_SETUP:{
  262. struct rdma_ctrlqp_setup *req = data;
  263. spin_lock_irq(&adapter->sge.reg_lock);
  264. ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
  265. SGE_CNTXT_RDMA,
  266. ASYNC_NOTIF_RSPQ,
  267. req->base_addr, req->size,
  268. FW_RI_TID_START, 1, 0);
  269. spin_unlock_irq(&adapter->sge.reg_lock);
  270. break;
  271. }
  272. default:
  273. ret = -EOPNOTSUPP;
  274. }
  275. return ret;
  276. }
  277. static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
  278. {
  279. struct adapter *adapter = tdev2adap(tdev);
  280. struct tid_range *tid;
  281. struct mtutab *mtup;
  282. struct iff_mac *iffmacp;
  283. struct ddp_params *ddpp;
  284. struct adap_ports *ports;
  285. int i;
  286. switch (req) {
  287. case GET_MAX_OUTSTANDING_WR:
  288. *(unsigned int *)data = FW_WR_NUM;
  289. break;
  290. case GET_WR_LEN:
  291. *(unsigned int *)data = WR_FLITS;
  292. break;
  293. case GET_TX_MAX_CHUNK:
  294. *(unsigned int *)data = 1 << 20; /* 1MB */
  295. break;
  296. case GET_TID_RANGE:
  297. tid = data;
  298. tid->num = t3_mc5_size(&adapter->mc5) -
  299. adapter->params.mc5.nroutes -
  300. adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
  301. tid->base = 0;
  302. break;
  303. case GET_STID_RANGE:
  304. tid = data;
  305. tid->num = adapter->params.mc5.nservers;
  306. tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
  307. adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
  308. break;
  309. case GET_L2T_CAPACITY:
  310. *(unsigned int *)data = 2048;
  311. break;
  312. case GET_MTUS:
  313. mtup = data;
  314. mtup->size = NMTUS;
  315. mtup->mtus = adapter->params.mtus;
  316. break;
  317. case GET_IFF_FROM_MAC:
  318. iffmacp = data;
  319. iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
  320. iffmacp->vlan_tag &
  321. VLAN_VID_MASK);
  322. break;
  323. case GET_DDP_PARAMS:
  324. ddpp = data;
  325. ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
  326. ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
  327. ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
  328. break;
  329. case GET_PORTS:
  330. ports = data;
  331. ports->nports = adapter->params.nports;
  332. for_each_port(adapter, i)
  333. ports->lldevs[i] = adapter->port[i];
  334. break;
  335. case ULP_ISCSI_GET_PARAMS:
  336. case ULP_ISCSI_SET_PARAMS:
  337. if (!offload_running(adapter))
  338. return -EAGAIN;
  339. return cxgb_ulp_iscsi_ctl(adapter, req, data);
  340. case RDMA_GET_PARAMS:
  341. case RDMA_CQ_OP:
  342. case RDMA_CQ_SETUP:
  343. case RDMA_CQ_DISABLE:
  344. case RDMA_CTRL_QP_SETUP:
  345. case RDMA_GET_MEM:
  346. if (!offload_running(adapter))
  347. return -EAGAIN;
  348. return cxgb_rdma_ctl(adapter, req, data);
  349. default:
  350. return -EOPNOTSUPP;
  351. }
  352. return 0;
  353. }
  354. /*
  355. * Dummy handler for Rx offload packets in case we get an offload packet before
  356. * proper processing is setup. This complains and drops the packet as it isn't
  357. * normal to get offload packets at this stage.
  358. */
  359. static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
  360. int n)
  361. {
  362. CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n",
  363. n, ntohl(*(__be32 *)skbs[0]->data));
  364. while (n--)
  365. dev_kfree_skb_any(skbs[n]);
  366. return 0;
  367. }
  368. static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
  369. {
  370. }
  371. void cxgb3_set_dummy_ops(struct t3cdev *dev)
  372. {
  373. dev->recv = rx_offload_blackhole;
  374. dev->neigh_update = dummy_neigh_update;
  375. }
  376. /*
  377. * Free an active-open TID.
  378. */
  379. void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
  380. {
  381. struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
  382. union active_open_entry *p = atid2entry(t, atid);
  383. void *ctx = p->t3c_tid.ctx;
  384. spin_lock_bh(&t->atid_lock);
  385. p->next = t->afree;
  386. t->afree = p;
  387. t->atids_in_use--;
  388. spin_unlock_bh(&t->atid_lock);
  389. return ctx;
  390. }
  391. EXPORT_SYMBOL(cxgb3_free_atid);
  392. /*
  393. * Free a server TID and return it to the free pool.
  394. */
  395. void cxgb3_free_stid(struct t3cdev *tdev, int stid)
  396. {
  397. struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
  398. union listen_entry *p = stid2entry(t, stid);
  399. spin_lock_bh(&t->stid_lock);
  400. p->next = t->sfree;
  401. t->sfree = p;
  402. t->stids_in_use--;
  403. spin_unlock_bh(&t->stid_lock);
  404. }
  405. EXPORT_SYMBOL(cxgb3_free_stid);
  406. void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
  407. void *ctx, unsigned int tid)
  408. {
  409. struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
  410. t->tid_tab[tid].client = client;
  411. t->tid_tab[tid].ctx = ctx;
  412. atomic_inc(&t->tids_in_use);
  413. }
  414. EXPORT_SYMBOL(cxgb3_insert_tid);
  415. /*
  416. * Populate a TID_RELEASE WR. The skb must be already propely sized.
  417. */
  418. static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
  419. {
  420. struct cpl_tid_release *req;
  421. skb->priority = CPL_PRIORITY_SETUP;
  422. req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
  423. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  424. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
  425. }
  426. static void t3_process_tid_release_list(struct work_struct *work)
  427. {
  428. struct t3c_data *td = container_of(work, struct t3c_data,
  429. tid_release_task);
  430. struct sk_buff *skb;
  431. struct t3cdev *tdev = td->dev;
  432. spin_lock_bh(&td->tid_release_lock);
  433. while (td->tid_release_list) {
  434. struct t3c_tid_entry *p = td->tid_release_list;
  435. td->tid_release_list = (struct t3c_tid_entry *)p->ctx;
  436. spin_unlock_bh(&td->tid_release_lock);
  437. skb = alloc_skb(sizeof(struct cpl_tid_release),
  438. GFP_KERNEL | __GFP_NOFAIL);
  439. mk_tid_release(skb, p - td->tid_maps.tid_tab);
  440. cxgb3_ofld_send(tdev, skb);
  441. p->ctx = NULL;
  442. spin_lock_bh(&td->tid_release_lock);
  443. }
  444. spin_unlock_bh(&td->tid_release_lock);
  445. }
  446. /* use ctx as a next pointer in the tid release list */
  447. void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
  448. {
  449. struct t3c_data *td = T3C_DATA(tdev);
  450. struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
  451. spin_lock_bh(&td->tid_release_lock);
  452. p->ctx = (void *)td->tid_release_list;
  453. td->tid_release_list = p;
  454. if (!p->ctx)
  455. schedule_work(&td->tid_release_task);
  456. spin_unlock_bh(&td->tid_release_lock);
  457. }
  458. EXPORT_SYMBOL(cxgb3_queue_tid_release);
  459. /*
  460. * Remove a tid from the TID table. A client may defer processing its last
  461. * CPL message if it is locked at the time it arrives, and while the message
  462. * sits in the client's backlog the TID may be reused for another connection.
  463. * To handle this we atomically switch the TID association if it still points
  464. * to the original client context.
  465. */
  466. void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
  467. {
  468. struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
  469. BUG_ON(tid >= t->ntids);
  470. if (tdev->type == T3A)
  471. (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
  472. else {
  473. struct sk_buff *skb;
  474. skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
  475. if (likely(skb)) {
  476. mk_tid_release(skb, tid);
  477. cxgb3_ofld_send(tdev, skb);
  478. t->tid_tab[tid].ctx = NULL;
  479. } else
  480. cxgb3_queue_tid_release(tdev, tid);
  481. }
  482. atomic_dec(&t->tids_in_use);
  483. }
  484. EXPORT_SYMBOL(cxgb3_remove_tid);
  485. int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
  486. void *ctx)
  487. {
  488. int atid = -1;
  489. struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
  490. spin_lock_bh(&t->atid_lock);
  491. if (t->afree) {
  492. union active_open_entry *p = t->afree;
  493. atid = (p - t->atid_tab) + t->atid_base;
  494. t->afree = p->next;
  495. p->t3c_tid.ctx = ctx;
  496. p->t3c_tid.client = client;
  497. t->atids_in_use++;
  498. }
  499. spin_unlock_bh(&t->atid_lock);
  500. return atid;
  501. }
  502. EXPORT_SYMBOL(cxgb3_alloc_atid);
  503. int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
  504. void *ctx)
  505. {
  506. int stid = -1;
  507. struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
  508. spin_lock_bh(&t->stid_lock);
  509. if (t->sfree) {
  510. union listen_entry *p = t->sfree;
  511. stid = (p - t->stid_tab) + t->stid_base;
  512. t->sfree = p->next;
  513. p->t3c_tid.ctx = ctx;
  514. p->t3c_tid.client = client;
  515. t->stids_in_use++;
  516. }
  517. spin_unlock_bh(&t->stid_lock);
  518. return stid;
  519. }
  520. EXPORT_SYMBOL(cxgb3_alloc_stid);
  521. static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
  522. {
  523. struct cpl_smt_write_rpl *rpl = cplhdr(skb);
  524. if (rpl->status != CPL_ERR_NONE)
  525. printk(KERN_ERR
  526. "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
  527. rpl->status, GET_TID(rpl));
  528. return CPL_RET_BUF_DONE;
  529. }
  530. static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
  531. {
  532. struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
  533. if (rpl->status != CPL_ERR_NONE)
  534. printk(KERN_ERR
  535. "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
  536. rpl->status, GET_TID(rpl));
  537. return CPL_RET_BUF_DONE;
  538. }
  539. static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
  540. {
  541. struct cpl_act_open_rpl *rpl = cplhdr(skb);
  542. unsigned int atid = G_TID(ntohl(rpl->atid));
  543. struct t3c_tid_entry *t3c_tid;
  544. t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
  545. if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
  546. t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
  547. return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
  548. t3c_tid->
  549. ctx);
  550. } else {
  551. printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
  552. dev->name, CPL_ACT_OPEN_RPL);
  553. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  554. }
  555. }
  556. static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
  557. {
  558. union opcode_tid *p = cplhdr(skb);
  559. unsigned int stid = G_TID(ntohl(p->opcode_tid));
  560. struct t3c_tid_entry *t3c_tid;
  561. t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
  562. if (t3c_tid->ctx && t3c_tid->client->handlers &&
  563. t3c_tid->client->handlers[p->opcode]) {
  564. return t3c_tid->client->handlers[p->opcode] (dev, skb,
  565. t3c_tid->ctx);
  566. } else {
  567. printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
  568. dev->name, p->opcode);
  569. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  570. }
  571. }
  572. static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
  573. {
  574. union opcode_tid *p = cplhdr(skb);
  575. unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
  576. struct t3c_tid_entry *t3c_tid;
  577. t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
  578. if (t3c_tid->ctx && t3c_tid->client->handlers &&
  579. t3c_tid->client->handlers[p->opcode]) {
  580. return t3c_tid->client->handlers[p->opcode]
  581. (dev, skb, t3c_tid->ctx);
  582. } else {
  583. printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
  584. dev->name, p->opcode);
  585. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  586. }
  587. }
  588. static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
  589. {
  590. struct cpl_pass_accept_req *req = cplhdr(skb);
  591. unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
  592. struct t3c_tid_entry *t3c_tid;
  593. t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
  594. if (t3c_tid->ctx && t3c_tid->client->handlers &&
  595. t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
  596. return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
  597. (dev, skb, t3c_tid->ctx);
  598. } else {
  599. printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
  600. dev->name, CPL_PASS_ACCEPT_REQ);
  601. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  602. }
  603. }
  604. static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
  605. {
  606. union opcode_tid *p = cplhdr(skb);
  607. unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
  608. struct t3c_tid_entry *t3c_tid;
  609. t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
  610. if (t3c_tid->ctx && t3c_tid->client->handlers &&
  611. t3c_tid->client->handlers[p->opcode]) {
  612. return t3c_tid->client->handlers[p->opcode]
  613. (dev, skb, t3c_tid->ctx);
  614. } else {
  615. struct cpl_abort_req_rss *req = cplhdr(skb);
  616. struct cpl_abort_rpl *rpl;
  617. struct sk_buff *skb =
  618. alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
  619. if (!skb) {
  620. printk("do_abort_req_rss: couldn't get skb!\n");
  621. goto out;
  622. }
  623. skb->priority = CPL_PRIORITY_DATA;
  624. __skb_put(skb, sizeof(struct cpl_abort_rpl));
  625. rpl = cplhdr(skb);
  626. rpl->wr.wr_hi =
  627. htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
  628. rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
  629. OPCODE_TID(rpl) =
  630. htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
  631. rpl->cmd = req->status;
  632. cxgb3_ofld_send(dev, skb);
  633. out:
  634. return CPL_RET_BUF_DONE;
  635. }
  636. }
  637. static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
  638. {
  639. struct cpl_act_establish *req = cplhdr(skb);
  640. unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
  641. struct t3c_tid_entry *t3c_tid;
  642. t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
  643. if (t3c_tid->ctx && t3c_tid->client->handlers &&
  644. t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
  645. return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
  646. (dev, skb, t3c_tid->ctx);
  647. } else {
  648. printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
  649. dev->name, CPL_PASS_ACCEPT_REQ);
  650. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  651. }
  652. }
  653. static int do_set_tcb_rpl(struct t3cdev *dev, struct sk_buff *skb)
  654. {
  655. struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
  656. if (rpl->status != CPL_ERR_NONE)
  657. printk(KERN_ERR
  658. "Unexpected SET_TCB_RPL status %u for tid %u\n",
  659. rpl->status, GET_TID(rpl));
  660. return CPL_RET_BUF_DONE;
  661. }
  662. static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
  663. {
  664. struct cpl_trace_pkt *p = cplhdr(skb);
  665. skb->protocol = htons(0xffff);
  666. skb->dev = dev->lldev;
  667. skb_pull(skb, sizeof(*p));
  668. skb->mac.raw = skb->data;
  669. netif_receive_skb(skb);
  670. return 0;
  671. }
  672. static int do_term(struct t3cdev *dev, struct sk_buff *skb)
  673. {
  674. unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff;
  675. unsigned int opcode = G_OPCODE(ntohl(skb->csum));
  676. struct t3c_tid_entry *t3c_tid;
  677. t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
  678. if (t3c_tid->ctx && t3c_tid->client->handlers &&
  679. t3c_tid->client->handlers[opcode]) {
  680. return t3c_tid->client->handlers[opcode] (dev, skb,
  681. t3c_tid->ctx);
  682. } else {
  683. printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
  684. dev->name, opcode);
  685. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  686. }
  687. }
  688. static int nb_callback(struct notifier_block *self, unsigned long event,
  689. void *ctx)
  690. {
  691. switch (event) {
  692. case (NETEVENT_NEIGH_UPDATE):{
  693. cxgb_neigh_update((struct neighbour *)ctx);
  694. break;
  695. }
  696. case (NETEVENT_PMTU_UPDATE):
  697. break;
  698. case (NETEVENT_REDIRECT):{
  699. struct netevent_redirect *nr = ctx;
  700. cxgb_redirect(nr->old, nr->new);
  701. cxgb_neigh_update(nr->new->neighbour);
  702. break;
  703. }
  704. default:
  705. break;
  706. }
  707. return 0;
  708. }
  709. static struct notifier_block nb = {
  710. .notifier_call = nb_callback
  711. };
  712. /*
  713. * Process a received packet with an unknown/unexpected CPL opcode.
  714. */
  715. static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
  716. {
  717. printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
  718. *skb->data);
  719. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  720. }
  721. /*
  722. * Handlers for each CPL opcode
  723. */
  724. static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
  725. /*
  726. * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
  727. * to unregister an existing handler.
  728. */
  729. void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
  730. {
  731. if (opcode < NUM_CPL_CMDS)
  732. cpl_handlers[opcode] = h ? h : do_bad_cpl;
  733. else
  734. printk(KERN_ERR "T3C: handler registration for "
  735. "opcode %x failed\n", opcode);
  736. }
  737. EXPORT_SYMBOL(t3_register_cpl_handler);
  738. /*
  739. * T3CDEV's receive method.
  740. */
  741. int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
  742. {
  743. while (n--) {
  744. struct sk_buff *skb = *skbs++;
  745. unsigned int opcode = G_OPCODE(ntohl(skb->csum));
  746. int ret = cpl_handlers[opcode] (dev, skb);
  747. #if VALIDATE_TID
  748. if (ret & CPL_RET_UNKNOWN_TID) {
  749. union opcode_tid *p = cplhdr(skb);
  750. printk(KERN_ERR "%s: CPL message (opcode %u) had "
  751. "unknown TID %u\n", dev->name, opcode,
  752. G_TID(ntohl(p->opcode_tid)));
  753. }
  754. #endif
  755. if (ret & CPL_RET_BUF_DONE)
  756. kfree_skb(skb);
  757. }
  758. return 0;
  759. }
  760. /*
  761. * Sends an sk_buff to a T3C driver after dealing with any active network taps.
  762. */
  763. int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
  764. {
  765. int r;
  766. local_bh_disable();
  767. r = dev->send(dev, skb);
  768. local_bh_enable();
  769. return r;
  770. }
  771. EXPORT_SYMBOL(cxgb3_ofld_send);
  772. static int is_offloading(struct net_device *dev)
  773. {
  774. struct adapter *adapter;
  775. int i;
  776. read_lock_bh(&adapter_list_lock);
  777. list_for_each_entry(adapter, &adapter_list, adapter_list) {
  778. for_each_port(adapter, i) {
  779. if (dev == adapter->port[i]) {
  780. read_unlock_bh(&adapter_list_lock);
  781. return 1;
  782. }
  783. }
  784. }
  785. read_unlock_bh(&adapter_list_lock);
  786. return 0;
  787. }
  788. void cxgb_neigh_update(struct neighbour *neigh)
  789. {
  790. struct net_device *dev = neigh->dev;
  791. if (dev && (is_offloading(dev))) {
  792. struct t3cdev *tdev = T3CDEV(dev);
  793. BUG_ON(!tdev);
  794. t3_l2t_update(tdev, neigh);
  795. }
  796. }
  797. static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
  798. {
  799. struct sk_buff *skb;
  800. struct cpl_set_tcb_field *req;
  801. skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
  802. if (!skb) {
  803. printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__);
  804. return;
  805. }
  806. skb->priority = CPL_PRIORITY_CONTROL;
  807. req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
  808. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  809. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
  810. req->reply = 0;
  811. req->cpu_idx = 0;
  812. req->word = htons(W_TCB_L2T_IX);
  813. req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
  814. req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
  815. tdev->send(tdev, skb);
  816. }
  817. void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
  818. {
  819. struct net_device *olddev, *newdev;
  820. struct tid_info *ti;
  821. struct t3cdev *tdev;
  822. u32 tid;
  823. int update_tcb;
  824. struct l2t_entry *e;
  825. struct t3c_tid_entry *te;
  826. olddev = old->neighbour->dev;
  827. newdev = new->neighbour->dev;
  828. if (!is_offloading(olddev))
  829. return;
  830. if (!is_offloading(newdev)) {
  831. printk(KERN_WARNING "%s: Redirect to non-offload"
  832. "device ignored.\n", __FUNCTION__);
  833. return;
  834. }
  835. tdev = T3CDEV(olddev);
  836. BUG_ON(!tdev);
  837. if (tdev != T3CDEV(newdev)) {
  838. printk(KERN_WARNING "%s: Redirect to different "
  839. "offload device ignored.\n", __FUNCTION__);
  840. return;
  841. }
  842. /* Add new L2T entry */
  843. e = t3_l2t_get(tdev, new->neighbour, newdev);
  844. if (!e) {
  845. printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
  846. __FUNCTION__);
  847. return;
  848. }
  849. /* Walk tid table and notify clients of dst change. */
  850. ti = &(T3C_DATA(tdev))->tid_maps;
  851. for (tid = 0; tid < ti->ntids; tid++) {
  852. te = lookup_tid(ti, tid);
  853. BUG_ON(!te);
  854. if (te->ctx && te->client && te->client->redirect) {
  855. update_tcb = te->client->redirect(te->ctx, old, new, e);
  856. if (update_tcb) {
  857. l2t_hold(L2DATA(tdev), e);
  858. set_l2t_ix(tdev, tid, e);
  859. }
  860. }
  861. }
  862. l2t_release(L2DATA(tdev), e);
  863. }
  864. /*
  865. * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
  866. * The allocated memory is cleared.
  867. */
  868. void *cxgb_alloc_mem(unsigned long size)
  869. {
  870. void *p = kmalloc(size, GFP_KERNEL);
  871. if (!p)
  872. p = vmalloc(size);
  873. if (p)
  874. memset(p, 0, size);
  875. return p;
  876. }
  877. /*
  878. * Free memory allocated through t3_alloc_mem().
  879. */
  880. void cxgb_free_mem(void *addr)
  881. {
  882. unsigned long p = (unsigned long)addr;
  883. if (p >= VMALLOC_START && p < VMALLOC_END)
  884. vfree(addr);
  885. else
  886. kfree(addr);
  887. }
  888. /*
  889. * Allocate and initialize the TID tables. Returns 0 on success.
  890. */
  891. static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
  892. unsigned int natids, unsigned int nstids,
  893. unsigned int atid_base, unsigned int stid_base)
  894. {
  895. unsigned long size = ntids * sizeof(*t->tid_tab) +
  896. natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
  897. t->tid_tab = cxgb_alloc_mem(size);
  898. if (!t->tid_tab)
  899. return -ENOMEM;
  900. t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
  901. t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
  902. t->ntids = ntids;
  903. t->nstids = nstids;
  904. t->stid_base = stid_base;
  905. t->sfree = NULL;
  906. t->natids = natids;
  907. t->atid_base = atid_base;
  908. t->afree = NULL;
  909. t->stids_in_use = t->atids_in_use = 0;
  910. atomic_set(&t->tids_in_use, 0);
  911. spin_lock_init(&t->stid_lock);
  912. spin_lock_init(&t->atid_lock);
  913. /*
  914. * Setup the free lists for stid_tab and atid_tab.
  915. */
  916. if (nstids) {
  917. while (--nstids)
  918. t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
  919. t->sfree = t->stid_tab;
  920. }
  921. if (natids) {
  922. while (--natids)
  923. t->atid_tab[natids - 1].next = &t->atid_tab[natids];
  924. t->afree = t->atid_tab;
  925. }
  926. return 0;
  927. }
  928. static void free_tid_maps(struct tid_info *t)
  929. {
  930. cxgb_free_mem(t->tid_tab);
  931. }
  932. static inline void add_adapter(struct adapter *adap)
  933. {
  934. write_lock_bh(&adapter_list_lock);
  935. list_add_tail(&adap->adapter_list, &adapter_list);
  936. write_unlock_bh(&adapter_list_lock);
  937. }
  938. static inline void remove_adapter(struct adapter *adap)
  939. {
  940. write_lock_bh(&adapter_list_lock);
  941. list_del(&adap->adapter_list);
  942. write_unlock_bh(&adapter_list_lock);
  943. }
  944. int cxgb3_offload_activate(struct adapter *adapter)
  945. {
  946. struct t3cdev *dev = &adapter->tdev;
  947. int natids, err;
  948. struct t3c_data *t;
  949. struct tid_range stid_range, tid_range;
  950. struct mtutab mtutab;
  951. unsigned int l2t_capacity;
  952. t = kcalloc(1, sizeof(*t), GFP_KERNEL);
  953. if (!t)
  954. return -ENOMEM;
  955. err = -EOPNOTSUPP;
  956. if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
  957. dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
  958. dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
  959. dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
  960. dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
  961. dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
  962. goto out_free;
  963. err = -ENOMEM;
  964. L2DATA(dev) = t3_init_l2t(l2t_capacity);
  965. if (!L2DATA(dev))
  966. goto out_free;
  967. natids = min(tid_range.num / 2, MAX_ATIDS);
  968. err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
  969. stid_range.num, ATID_BASE, stid_range.base);
  970. if (err)
  971. goto out_free_l2t;
  972. t->mtus = mtutab.mtus;
  973. t->nmtus = mtutab.size;
  974. INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
  975. spin_lock_init(&t->tid_release_lock);
  976. INIT_LIST_HEAD(&t->list_node);
  977. t->dev = dev;
  978. T3C_DATA(dev) = t;
  979. dev->recv = process_rx;
  980. dev->neigh_update = t3_l2t_update;
  981. /* Register netevent handler once */
  982. if (list_empty(&adapter_list))
  983. register_netevent_notifier(&nb);
  984. add_adapter(adapter);
  985. return 0;
  986. out_free_l2t:
  987. t3_free_l2t(L2DATA(dev));
  988. L2DATA(dev) = NULL;
  989. out_free:
  990. kfree(t);
  991. return err;
  992. }
  993. void cxgb3_offload_deactivate(struct adapter *adapter)
  994. {
  995. struct t3cdev *tdev = &adapter->tdev;
  996. struct t3c_data *t = T3C_DATA(tdev);
  997. remove_adapter(adapter);
  998. if (list_empty(&adapter_list))
  999. unregister_netevent_notifier(&nb);
  1000. free_tid_maps(&t->tid_maps);
  1001. T3C_DATA(tdev) = NULL;
  1002. t3_free_l2t(L2DATA(tdev));
  1003. L2DATA(tdev) = NULL;
  1004. kfree(t);
  1005. }
  1006. static inline void register_tdev(struct t3cdev *tdev)
  1007. {
  1008. static int unit;
  1009. mutex_lock(&cxgb3_db_lock);
  1010. snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
  1011. list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
  1012. mutex_unlock(&cxgb3_db_lock);
  1013. }
  1014. static inline void unregister_tdev(struct t3cdev *tdev)
  1015. {
  1016. mutex_lock(&cxgb3_db_lock);
  1017. list_del(&tdev->ofld_dev_list);
  1018. mutex_unlock(&cxgb3_db_lock);
  1019. }
  1020. void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
  1021. {
  1022. struct t3cdev *tdev = &adapter->tdev;
  1023. INIT_LIST_HEAD(&tdev->ofld_dev_list);
  1024. cxgb3_set_dummy_ops(tdev);
  1025. tdev->send = t3_offload_tx;
  1026. tdev->ctl = cxgb_offload_ctl;
  1027. tdev->type = adapter->params.rev == 0 ? T3A : T3B;
  1028. register_tdev(tdev);
  1029. }
  1030. void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
  1031. {
  1032. struct t3cdev *tdev = &adapter->tdev;
  1033. tdev->recv = NULL;
  1034. tdev->neigh_update = NULL;
  1035. unregister_tdev(tdev);
  1036. }
  1037. void __init cxgb3_offload_init(void)
  1038. {
  1039. int i;
  1040. for (i = 0; i < NUM_CPL_CMDS; ++i)
  1041. cpl_handlers[i] = do_bad_cpl;
  1042. t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
  1043. t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
  1044. t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
  1045. t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
  1046. t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
  1047. t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
  1048. t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
  1049. t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
  1050. t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
  1051. t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
  1052. t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
  1053. t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
  1054. t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
  1055. t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
  1056. t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
  1057. t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
  1058. t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
  1059. t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
  1060. t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
  1061. t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
  1062. t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
  1063. t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
  1064. t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
  1065. t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
  1066. }