cxgb3_offload.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283
  1. /*
  2. * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/list.h>
  33. #include <net/neighbour.h>
  34. #include <linux/notifier.h>
  35. #include <asm/atomic.h>
  36. #include <linux/proc_fs.h>
  37. #include <linux/if_vlan.h>
  38. #include <net/netevent.h>
  39. #include <linux/highmem.h>
  40. #include <linux/vmalloc.h>
  41. #include "common.h"
  42. #include "regs.h"
  43. #include "cxgb3_ioctl.h"
  44. #include "cxgb3_ctl_defs.h"
  45. #include "cxgb3_defs.h"
  46. #include "l2t.h"
  47. #include "firmware_exports.h"
  48. #include "cxgb3_offload.h"
  49. static LIST_HEAD(client_list);
  50. static LIST_HEAD(ofld_dev_list);
  51. static DEFINE_MUTEX(cxgb3_db_lock);
  52. static DEFINE_RWLOCK(adapter_list_lock);
  53. static LIST_HEAD(adapter_list);
  54. static const unsigned int MAX_ATIDS = 64 * 1024;
  55. static const unsigned int ATID_BASE = 0x10000;
  56. static inline int offload_activated(struct t3cdev *tdev)
  57. {
  58. const struct adapter *adapter = tdev2adap(tdev);
  59. return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map));
  60. }
  61. /**
  62. * cxgb3_register_client - register an offload client
  63. * @client: the client
  64. *
  65. * Add the client to the client list,
  66. * and call backs the client for each activated offload device
  67. */
  68. void cxgb3_register_client(struct cxgb3_client *client)
  69. {
  70. struct t3cdev *tdev;
  71. mutex_lock(&cxgb3_db_lock);
  72. list_add_tail(&client->client_list, &client_list);
  73. if (client->add) {
  74. list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
  75. if (offload_activated(tdev))
  76. client->add(tdev);
  77. }
  78. }
  79. mutex_unlock(&cxgb3_db_lock);
  80. }
  81. EXPORT_SYMBOL(cxgb3_register_client);
  82. /**
  83. * cxgb3_unregister_client - unregister an offload client
  84. * @client: the client
  85. *
  86. * Remove the client to the client list,
  87. * and call backs the client for each activated offload device.
  88. */
  89. void cxgb3_unregister_client(struct cxgb3_client *client)
  90. {
  91. struct t3cdev *tdev;
  92. mutex_lock(&cxgb3_db_lock);
  93. list_del(&client->client_list);
  94. if (client->remove) {
  95. list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
  96. if (offload_activated(tdev))
  97. client->remove(tdev);
  98. }
  99. }
  100. mutex_unlock(&cxgb3_db_lock);
  101. }
  102. EXPORT_SYMBOL(cxgb3_unregister_client);
  103. /**
  104. * cxgb3_add_clients - activate registered clients for an offload device
  105. * @tdev: the offload device
  106. *
  107. * Call backs all registered clients once a offload device is activated
  108. */
  109. void cxgb3_add_clients(struct t3cdev *tdev)
  110. {
  111. struct cxgb3_client *client;
  112. mutex_lock(&cxgb3_db_lock);
  113. list_for_each_entry(client, &client_list, client_list) {
  114. if (client->add)
  115. client->add(tdev);
  116. }
  117. mutex_unlock(&cxgb3_db_lock);
  118. }
  119. /**
  120. * cxgb3_remove_clients - deactivates registered clients
  121. * for an offload device
  122. * @tdev: the offload device
  123. *
  124. * Call backs all registered clients once a offload device is deactivated
  125. */
  126. void cxgb3_remove_clients(struct t3cdev *tdev)
  127. {
  128. struct cxgb3_client *client;
  129. mutex_lock(&cxgb3_db_lock);
  130. list_for_each_entry(client, &client_list, client_list) {
  131. if (client->remove)
  132. client->remove(tdev);
  133. }
  134. mutex_unlock(&cxgb3_db_lock);
  135. }
  136. static struct net_device *get_iff_from_mac(struct adapter *adapter,
  137. const unsigned char *mac,
  138. unsigned int vlan)
  139. {
  140. int i;
  141. for_each_port(adapter, i) {
  142. struct vlan_group *grp;
  143. struct net_device *dev = adapter->port[i];
  144. const struct port_info *p = netdev_priv(dev);
  145. if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
  146. if (vlan && vlan != VLAN_VID_MASK) {
  147. grp = p->vlan_grp;
  148. dev = NULL;
  149. if (grp)
  150. dev = vlan_group_get_device(grp, vlan);
  151. } else
  152. while (dev->master)
  153. dev = dev->master;
  154. return dev;
  155. }
  156. }
  157. return NULL;
  158. }
  159. static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
  160. void *data)
  161. {
  162. int ret = 0;
  163. struct ulp_iscsi_info *uiip = data;
  164. switch (req) {
  165. case ULP_ISCSI_GET_PARAMS:
  166. uiip->pdev = adapter->pdev;
  167. uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
  168. uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
  169. uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
  170. /*
  171. * On tx, the iscsi pdu has to be <= tx page size and has to
  172. * fit into the Tx PM FIFO.
  173. */
  174. uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
  175. t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
  176. /* on rx, the iscsi pdu has to be < rx page size and the
  177. whole pdu + cpl headers has to fit into one sge buffer */
  178. uiip->max_rxsz = min_t(unsigned int,
  179. adapter->params.tp.rx_pg_size,
  180. (adapter->sge.qs[0].fl[1].buf_size -
  181. sizeof(struct cpl_rx_data) * 2 -
  182. sizeof(struct cpl_rx_data_ddp)));
  183. break;
  184. case ULP_ISCSI_SET_PARAMS:
  185. t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
  186. break;
  187. default:
  188. ret = -EOPNOTSUPP;
  189. }
  190. return ret;
  191. }
  192. /* Response queue used for RDMA events. */
  193. #define ASYNC_NOTIF_RSPQ 0
  194. static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
  195. {
  196. int ret = 0;
  197. switch (req) {
  198. case RDMA_GET_PARAMS: {
  199. struct rdma_info *rdma = data;
  200. struct pci_dev *pdev = adapter->pdev;
  201. rdma->udbell_physbase = pci_resource_start(pdev, 2);
  202. rdma->udbell_len = pci_resource_len(pdev, 2);
  203. rdma->tpt_base =
  204. t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
  205. rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
  206. rdma->pbl_base =
  207. t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
  208. rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
  209. rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
  210. rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
  211. rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
  212. rdma->pdev = pdev;
  213. break;
  214. }
  215. case RDMA_CQ_OP:{
  216. unsigned long flags;
  217. struct rdma_cq_op *rdma = data;
  218. /* may be called in any context */
  219. spin_lock_irqsave(&adapter->sge.reg_lock, flags);
  220. ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
  221. rdma->credits);
  222. spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
  223. break;
  224. }
  225. case RDMA_GET_MEM:{
  226. struct ch_mem_range *t = data;
  227. struct mc7 *mem;
  228. if ((t->addr & 7) || (t->len & 7))
  229. return -EINVAL;
  230. if (t->mem_id == MEM_CM)
  231. mem = &adapter->cm;
  232. else if (t->mem_id == MEM_PMRX)
  233. mem = &adapter->pmrx;
  234. else if (t->mem_id == MEM_PMTX)
  235. mem = &adapter->pmtx;
  236. else
  237. return -EINVAL;
  238. ret =
  239. t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
  240. (u64 *) t->buf);
  241. if (ret)
  242. return ret;
  243. break;
  244. }
  245. case RDMA_CQ_SETUP:{
  246. struct rdma_cq_setup *rdma = data;
  247. spin_lock_irq(&adapter->sge.reg_lock);
  248. ret =
  249. t3_sge_init_cqcntxt(adapter, rdma->id,
  250. rdma->base_addr, rdma->size,
  251. ASYNC_NOTIF_RSPQ,
  252. rdma->ovfl_mode, rdma->credits,
  253. rdma->credit_thres);
  254. spin_unlock_irq(&adapter->sge.reg_lock);
  255. break;
  256. }
  257. case RDMA_CQ_DISABLE:
  258. spin_lock_irq(&adapter->sge.reg_lock);
  259. ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
  260. spin_unlock_irq(&adapter->sge.reg_lock);
  261. break;
  262. case RDMA_CTRL_QP_SETUP:{
  263. struct rdma_ctrlqp_setup *rdma = data;
  264. spin_lock_irq(&adapter->sge.reg_lock);
  265. ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
  266. SGE_CNTXT_RDMA,
  267. ASYNC_NOTIF_RSPQ,
  268. rdma->base_addr, rdma->size,
  269. FW_RI_TID_START, 1, 0);
  270. spin_unlock_irq(&adapter->sge.reg_lock);
  271. break;
  272. }
  273. default:
  274. ret = -EOPNOTSUPP;
  275. }
  276. return ret;
  277. }
  278. static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
  279. {
  280. struct adapter *adapter = tdev2adap(tdev);
  281. struct tid_range *tid;
  282. struct mtutab *mtup;
  283. struct iff_mac *iffmacp;
  284. struct ddp_params *ddpp;
  285. struct adap_ports *ports;
  286. struct ofld_page_info *rx_page_info;
  287. struct tp_params *tp = &adapter->params.tp;
  288. int i;
  289. switch (req) {
  290. case GET_MAX_OUTSTANDING_WR:
  291. *(unsigned int *)data = FW_WR_NUM;
  292. break;
  293. case GET_WR_LEN:
  294. *(unsigned int *)data = WR_FLITS;
  295. break;
  296. case GET_TX_MAX_CHUNK:
  297. *(unsigned int *)data = 1 << 20; /* 1MB */
  298. break;
  299. case GET_TID_RANGE:
  300. tid = data;
  301. tid->num = t3_mc5_size(&adapter->mc5) -
  302. adapter->params.mc5.nroutes -
  303. adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
  304. tid->base = 0;
  305. break;
  306. case GET_STID_RANGE:
  307. tid = data;
  308. tid->num = adapter->params.mc5.nservers;
  309. tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
  310. adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
  311. break;
  312. case GET_L2T_CAPACITY:
  313. *(unsigned int *)data = 2048;
  314. break;
  315. case GET_MTUS:
  316. mtup = data;
  317. mtup->size = NMTUS;
  318. mtup->mtus = adapter->params.mtus;
  319. break;
  320. case GET_IFF_FROM_MAC:
  321. iffmacp = data;
  322. iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
  323. iffmacp->vlan_tag &
  324. VLAN_VID_MASK);
  325. break;
  326. case GET_DDP_PARAMS:
  327. ddpp = data;
  328. ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
  329. ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
  330. ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
  331. break;
  332. case GET_PORTS:
  333. ports = data;
  334. ports->nports = adapter->params.nports;
  335. for_each_port(adapter, i)
  336. ports->lldevs[i] = adapter->port[i];
  337. break;
  338. case ULP_ISCSI_GET_PARAMS:
  339. case ULP_ISCSI_SET_PARAMS:
  340. if (!offload_running(adapter))
  341. return -EAGAIN;
  342. return cxgb_ulp_iscsi_ctl(adapter, req, data);
  343. case RDMA_GET_PARAMS:
  344. case RDMA_CQ_OP:
  345. case RDMA_CQ_SETUP:
  346. case RDMA_CQ_DISABLE:
  347. case RDMA_CTRL_QP_SETUP:
  348. case RDMA_GET_MEM:
  349. if (!offload_running(adapter))
  350. return -EAGAIN;
  351. return cxgb_rdma_ctl(adapter, req, data);
  352. case GET_RX_PAGE_INFO:
  353. rx_page_info = data;
  354. rx_page_info->page_size = tp->rx_pg_size;
  355. rx_page_info->num = tp->rx_num_pgs;
  356. break;
  357. default:
  358. return -EOPNOTSUPP;
  359. }
  360. return 0;
  361. }
  362. /*
  363. * Dummy handler for Rx offload packets in case we get an offload packet before
  364. * proper processing is setup. This complains and drops the packet as it isn't
  365. * normal to get offload packets at this stage.
  366. */
  367. static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
  368. int n)
  369. {
  370. CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n",
  371. n, ntohl(*(__be32 *)skbs[0]->data));
  372. while (n--)
  373. dev_kfree_skb_any(skbs[n]);
  374. return 0;
  375. }
  376. static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
  377. {
  378. }
  379. void cxgb3_set_dummy_ops(struct t3cdev *dev)
  380. {
  381. dev->recv = rx_offload_blackhole;
  382. dev->neigh_update = dummy_neigh_update;
  383. }
  384. /*
  385. * Free an active-open TID.
  386. */
  387. void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
  388. {
  389. struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
  390. union active_open_entry *p = atid2entry(t, atid);
  391. void *ctx = p->t3c_tid.ctx;
  392. spin_lock_bh(&t->atid_lock);
  393. p->next = t->afree;
  394. t->afree = p;
  395. t->atids_in_use--;
  396. spin_unlock_bh(&t->atid_lock);
  397. return ctx;
  398. }
  399. EXPORT_SYMBOL(cxgb3_free_atid);
  400. /*
  401. * Free a server TID and return it to the free pool.
  402. */
  403. void cxgb3_free_stid(struct t3cdev *tdev, int stid)
  404. {
  405. struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
  406. union listen_entry *p = stid2entry(t, stid);
  407. spin_lock_bh(&t->stid_lock);
  408. p->next = t->sfree;
  409. t->sfree = p;
  410. t->stids_in_use--;
  411. spin_unlock_bh(&t->stid_lock);
  412. }
  413. EXPORT_SYMBOL(cxgb3_free_stid);
  414. void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
  415. void *ctx, unsigned int tid)
  416. {
  417. struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
  418. t->tid_tab[tid].client = client;
  419. t->tid_tab[tid].ctx = ctx;
  420. atomic_inc(&t->tids_in_use);
  421. }
  422. EXPORT_SYMBOL(cxgb3_insert_tid);
  423. /*
  424. * Populate a TID_RELEASE WR. The skb must be already propely sized.
  425. */
  426. static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
  427. {
  428. struct cpl_tid_release *req;
  429. skb->priority = CPL_PRIORITY_SETUP;
  430. req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
  431. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  432. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
  433. }
  434. static void t3_process_tid_release_list(struct work_struct *work)
  435. {
  436. struct t3c_data *td = container_of(work, struct t3c_data,
  437. tid_release_task);
  438. struct sk_buff *skb;
  439. struct t3cdev *tdev = td->dev;
  440. spin_lock_bh(&td->tid_release_lock);
  441. while (td->tid_release_list) {
  442. struct t3c_tid_entry *p = td->tid_release_list;
  443. td->tid_release_list = (struct t3c_tid_entry *)p->ctx;
  444. spin_unlock_bh(&td->tid_release_lock);
  445. skb = alloc_skb(sizeof(struct cpl_tid_release),
  446. GFP_KERNEL | __GFP_NOFAIL);
  447. mk_tid_release(skb, p - td->tid_maps.tid_tab);
  448. cxgb3_ofld_send(tdev, skb);
  449. p->ctx = NULL;
  450. spin_lock_bh(&td->tid_release_lock);
  451. }
  452. spin_unlock_bh(&td->tid_release_lock);
  453. }
  454. /* use ctx as a next pointer in the tid release list */
  455. void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
  456. {
  457. struct t3c_data *td = T3C_DATA(tdev);
  458. struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
  459. spin_lock_bh(&td->tid_release_lock);
  460. p->ctx = (void *)td->tid_release_list;
  461. p->client = NULL;
  462. td->tid_release_list = p;
  463. if (!p->ctx)
  464. schedule_work(&td->tid_release_task);
  465. spin_unlock_bh(&td->tid_release_lock);
  466. }
  467. EXPORT_SYMBOL(cxgb3_queue_tid_release);
  468. /*
  469. * Remove a tid from the TID table. A client may defer processing its last
  470. * CPL message if it is locked at the time it arrives, and while the message
  471. * sits in the client's backlog the TID may be reused for another connection.
  472. * To handle this we atomically switch the TID association if it still points
  473. * to the original client context.
  474. */
  475. void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
  476. {
  477. struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
  478. BUG_ON(tid >= t->ntids);
  479. if (tdev->type == T3A)
  480. (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
  481. else {
  482. struct sk_buff *skb;
  483. skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
  484. if (likely(skb)) {
  485. mk_tid_release(skb, tid);
  486. cxgb3_ofld_send(tdev, skb);
  487. t->tid_tab[tid].ctx = NULL;
  488. } else
  489. cxgb3_queue_tid_release(tdev, tid);
  490. }
  491. atomic_dec(&t->tids_in_use);
  492. }
  493. EXPORT_SYMBOL(cxgb3_remove_tid);
  494. int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
  495. void *ctx)
  496. {
  497. int atid = -1;
  498. struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
  499. spin_lock_bh(&t->atid_lock);
  500. if (t->afree &&
  501. t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=
  502. t->ntids) {
  503. union active_open_entry *p = t->afree;
  504. atid = (p - t->atid_tab) + t->atid_base;
  505. t->afree = p->next;
  506. p->t3c_tid.ctx = ctx;
  507. p->t3c_tid.client = client;
  508. t->atids_in_use++;
  509. }
  510. spin_unlock_bh(&t->atid_lock);
  511. return atid;
  512. }
  513. EXPORT_SYMBOL(cxgb3_alloc_atid);
  514. int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
  515. void *ctx)
  516. {
  517. int stid = -1;
  518. struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
  519. spin_lock_bh(&t->stid_lock);
  520. if (t->sfree) {
  521. union listen_entry *p = t->sfree;
  522. stid = (p - t->stid_tab) + t->stid_base;
  523. t->sfree = p->next;
  524. p->t3c_tid.ctx = ctx;
  525. p->t3c_tid.client = client;
  526. t->stids_in_use++;
  527. }
  528. spin_unlock_bh(&t->stid_lock);
  529. return stid;
  530. }
  531. EXPORT_SYMBOL(cxgb3_alloc_stid);
  532. /* Get the t3cdev associated with a net_device */
  533. struct t3cdev *dev2t3cdev(struct net_device *dev)
  534. {
  535. const struct port_info *pi = netdev_priv(dev);
  536. return (struct t3cdev *)pi->adapter;
  537. }
  538. EXPORT_SYMBOL(dev2t3cdev);
  539. static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
  540. {
  541. struct cpl_smt_write_rpl *rpl = cplhdr(skb);
  542. if (rpl->status != CPL_ERR_NONE)
  543. printk(KERN_ERR
  544. "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
  545. rpl->status, GET_TID(rpl));
  546. return CPL_RET_BUF_DONE;
  547. }
  548. static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
  549. {
  550. struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
  551. if (rpl->status != CPL_ERR_NONE)
  552. printk(KERN_ERR
  553. "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
  554. rpl->status, GET_TID(rpl));
  555. return CPL_RET_BUF_DONE;
  556. }
  557. static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
  558. {
  559. struct cpl_act_open_rpl *rpl = cplhdr(skb);
  560. unsigned int atid = G_TID(ntohl(rpl->atid));
  561. struct t3c_tid_entry *t3c_tid;
  562. t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
  563. if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
  564. t3c_tid->client->handlers &&
  565. t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
  566. return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
  567. t3c_tid->
  568. ctx);
  569. } else {
  570. printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
  571. dev->name, CPL_ACT_OPEN_RPL);
  572. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  573. }
  574. }
  575. static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
  576. {
  577. union opcode_tid *p = cplhdr(skb);
  578. unsigned int stid = G_TID(ntohl(p->opcode_tid));
  579. struct t3c_tid_entry *t3c_tid;
  580. t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
  581. if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
  582. t3c_tid->client->handlers[p->opcode]) {
  583. return t3c_tid->client->handlers[p->opcode] (dev, skb,
  584. t3c_tid->ctx);
  585. } else {
  586. printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
  587. dev->name, p->opcode);
  588. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  589. }
  590. }
  591. static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
  592. {
  593. union opcode_tid *p = cplhdr(skb);
  594. unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
  595. struct t3c_tid_entry *t3c_tid;
  596. t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
  597. if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
  598. t3c_tid->client->handlers[p->opcode]) {
  599. return t3c_tid->client->handlers[p->opcode]
  600. (dev, skb, t3c_tid->ctx);
  601. } else {
  602. printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
  603. dev->name, p->opcode);
  604. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  605. }
  606. }
  607. static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
  608. {
  609. struct cpl_pass_accept_req *req = cplhdr(skb);
  610. unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
  611. struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
  612. struct t3c_tid_entry *t3c_tid;
  613. unsigned int tid = GET_TID(req);
  614. if (unlikely(tid >= t->ntids)) {
  615. printk("%s: passive open TID %u too large\n",
  616. dev->name, tid);
  617. t3_fatal_err(tdev2adap(dev));
  618. return CPL_RET_BUF_DONE;
  619. }
  620. t3c_tid = lookup_stid(t, stid);
  621. if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
  622. t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
  623. return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
  624. (dev, skb, t3c_tid->ctx);
  625. } else {
  626. printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
  627. dev->name, CPL_PASS_ACCEPT_REQ);
  628. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  629. }
  630. }
  631. /*
  632. * Returns an sk_buff for a reply CPL message of size len. If the input
  633. * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
  634. * is allocated. The input skb must be of size at least len. Note that this
  635. * operation does not destroy the original skb data even if it decides to reuse
  636. * the buffer.
  637. */
  638. static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
  639. gfp_t gfp)
  640. {
  641. if (likely(!skb_cloned(skb))) {
  642. BUG_ON(skb->len < len);
  643. __skb_trim(skb, len);
  644. skb_get(skb);
  645. } else {
  646. skb = alloc_skb(len, gfp);
  647. if (skb)
  648. __skb_put(skb, len);
  649. }
  650. return skb;
  651. }
  652. static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
  653. {
  654. union opcode_tid *p = cplhdr(skb);
  655. unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
  656. struct t3c_tid_entry *t3c_tid;
  657. t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
  658. if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
  659. t3c_tid->client->handlers[p->opcode]) {
  660. return t3c_tid->client->handlers[p->opcode]
  661. (dev, skb, t3c_tid->ctx);
  662. } else {
  663. struct cpl_abort_req_rss *req = cplhdr(skb);
  664. struct cpl_abort_rpl *rpl;
  665. struct sk_buff *reply_skb;
  666. unsigned int tid = GET_TID(req);
  667. u8 cmd = req->status;
  668. if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
  669. req->status == CPL_ERR_PERSIST_NEG_ADVICE)
  670. goto out;
  671. reply_skb = cxgb3_get_cpl_reply_skb(skb,
  672. sizeof(struct
  673. cpl_abort_rpl),
  674. GFP_ATOMIC);
  675. if (!reply_skb) {
  676. printk("do_abort_req_rss: couldn't get skb!\n");
  677. goto out;
  678. }
  679. reply_skb->priority = CPL_PRIORITY_DATA;
  680. __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
  681. rpl = cplhdr(reply_skb);
  682. rpl->wr.wr_hi =
  683. htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
  684. rpl->wr.wr_lo = htonl(V_WR_TID(tid));
  685. OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
  686. rpl->cmd = cmd;
  687. cxgb3_ofld_send(dev, reply_skb);
  688. out:
  689. return CPL_RET_BUF_DONE;
  690. }
  691. }
  692. static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
  693. {
  694. struct cpl_act_establish *req = cplhdr(skb);
  695. unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
  696. struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
  697. struct t3c_tid_entry *t3c_tid;
  698. unsigned int tid = GET_TID(req);
  699. if (unlikely(tid >= t->ntids)) {
  700. printk("%s: active establish TID %u too large\n",
  701. dev->name, tid);
  702. t3_fatal_err(tdev2adap(dev));
  703. return CPL_RET_BUF_DONE;
  704. }
  705. t3c_tid = lookup_atid(t, atid);
  706. if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
  707. t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
  708. return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
  709. (dev, skb, t3c_tid->ctx);
  710. } else {
  711. printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
  712. dev->name, CPL_ACT_ESTABLISH);
  713. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  714. }
  715. }
  716. static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
  717. {
  718. struct cpl_trace_pkt *p = cplhdr(skb);
  719. skb->protocol = htons(0xffff);
  720. skb->dev = dev->lldev;
  721. skb_pull(skb, sizeof(*p));
  722. skb_reset_mac_header(skb);
  723. netif_receive_skb(skb);
  724. return 0;
  725. }
  726. static int do_term(struct t3cdev *dev, struct sk_buff *skb)
  727. {
  728. unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff;
  729. unsigned int opcode = G_OPCODE(ntohl(skb->csum));
  730. struct t3c_tid_entry *t3c_tid;
  731. t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
  732. if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
  733. t3c_tid->client->handlers[opcode]) {
  734. return t3c_tid->client->handlers[opcode] (dev, skb,
  735. t3c_tid->ctx);
  736. } else {
  737. printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
  738. dev->name, opcode);
  739. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  740. }
  741. }
  742. static int nb_callback(struct notifier_block *self, unsigned long event,
  743. void *ctx)
  744. {
  745. switch (event) {
  746. case (NETEVENT_NEIGH_UPDATE):{
  747. cxgb_neigh_update((struct neighbour *)ctx);
  748. break;
  749. }
  750. case (NETEVENT_PMTU_UPDATE):
  751. break;
  752. case (NETEVENT_REDIRECT):{
  753. struct netevent_redirect *nr = ctx;
  754. cxgb_redirect(nr->old, nr->new);
  755. cxgb_neigh_update(nr->new->neighbour);
  756. break;
  757. }
  758. default:
  759. break;
  760. }
  761. return 0;
  762. }
  763. static struct notifier_block nb = {
  764. .notifier_call = nb_callback
  765. };
  766. /*
  767. * Process a received packet with an unknown/unexpected CPL opcode.
  768. */
  769. static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
  770. {
  771. printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
  772. *skb->data);
  773. return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
  774. }
  775. /*
  776. * Handlers for each CPL opcode
  777. */
  778. static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
  779. /*
  780. * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
  781. * to unregister an existing handler.
  782. */
  783. void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
  784. {
  785. if (opcode < NUM_CPL_CMDS)
  786. cpl_handlers[opcode] = h ? h : do_bad_cpl;
  787. else
  788. printk(KERN_ERR "T3C: handler registration for "
  789. "opcode %x failed\n", opcode);
  790. }
  791. EXPORT_SYMBOL(t3_register_cpl_handler);
  792. /*
  793. * T3CDEV's receive method.
  794. */
  795. int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
  796. {
  797. while (n--) {
  798. struct sk_buff *skb = *skbs++;
  799. unsigned int opcode = G_OPCODE(ntohl(skb->csum));
  800. int ret = cpl_handlers[opcode] (dev, skb);
  801. #if VALIDATE_TID
  802. if (ret & CPL_RET_UNKNOWN_TID) {
  803. union opcode_tid *p = cplhdr(skb);
  804. printk(KERN_ERR "%s: CPL message (opcode %u) had "
  805. "unknown TID %u\n", dev->name, opcode,
  806. G_TID(ntohl(p->opcode_tid)));
  807. }
  808. #endif
  809. if (ret & CPL_RET_BUF_DONE)
  810. kfree_skb(skb);
  811. }
  812. return 0;
  813. }
  814. /*
  815. * Sends an sk_buff to a T3C driver after dealing with any active network taps.
  816. */
  817. int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
  818. {
  819. int r;
  820. local_bh_disable();
  821. r = dev->send(dev, skb);
  822. local_bh_enable();
  823. return r;
  824. }
  825. EXPORT_SYMBOL(cxgb3_ofld_send);
  826. static int is_offloading(struct net_device *dev)
  827. {
  828. struct adapter *adapter;
  829. int i;
  830. read_lock_bh(&adapter_list_lock);
  831. list_for_each_entry(adapter, &adapter_list, adapter_list) {
  832. for_each_port(adapter, i) {
  833. if (dev == adapter->port[i]) {
  834. read_unlock_bh(&adapter_list_lock);
  835. return 1;
  836. }
  837. }
  838. }
  839. read_unlock_bh(&adapter_list_lock);
  840. return 0;
  841. }
  842. void cxgb_neigh_update(struct neighbour *neigh)
  843. {
  844. struct net_device *dev = neigh->dev;
  845. if (dev && (is_offloading(dev))) {
  846. struct t3cdev *tdev = dev2t3cdev(dev);
  847. BUG_ON(!tdev);
  848. t3_l2t_update(tdev, neigh);
  849. }
  850. }
  851. static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
  852. {
  853. struct sk_buff *skb;
  854. struct cpl_set_tcb_field *req;
  855. skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
  856. if (!skb) {
  857. printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__);
  858. return;
  859. }
  860. skb->priority = CPL_PRIORITY_CONTROL;
  861. req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
  862. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  863. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
  864. req->reply = 0;
  865. req->cpu_idx = 0;
  866. req->word = htons(W_TCB_L2T_IX);
  867. req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
  868. req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
  869. tdev->send(tdev, skb);
  870. }
  871. void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
  872. {
  873. struct net_device *olddev, *newdev;
  874. struct tid_info *ti;
  875. struct t3cdev *tdev;
  876. u32 tid;
  877. int update_tcb;
  878. struct l2t_entry *e;
  879. struct t3c_tid_entry *te;
  880. olddev = old->neighbour->dev;
  881. newdev = new->neighbour->dev;
  882. if (!is_offloading(olddev))
  883. return;
  884. if (!is_offloading(newdev)) {
  885. printk(KERN_WARNING "%s: Redirect to non-offload"
  886. "device ignored.\n", __FUNCTION__);
  887. return;
  888. }
  889. tdev = dev2t3cdev(olddev);
  890. BUG_ON(!tdev);
  891. if (tdev != dev2t3cdev(newdev)) {
  892. printk(KERN_WARNING "%s: Redirect to different "
  893. "offload device ignored.\n", __FUNCTION__);
  894. return;
  895. }
  896. /* Add new L2T entry */
  897. e = t3_l2t_get(tdev, new->neighbour, newdev);
  898. if (!e) {
  899. printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
  900. __FUNCTION__);
  901. return;
  902. }
  903. /* Walk tid table and notify clients of dst change. */
  904. ti = &(T3C_DATA(tdev))->tid_maps;
  905. for (tid = 0; tid < ti->ntids; tid++) {
  906. te = lookup_tid(ti, tid);
  907. BUG_ON(!te);
  908. if (te && te->ctx && te->client && te->client->redirect) {
  909. update_tcb = te->client->redirect(te->ctx, old, new, e);
  910. if (update_tcb) {
  911. l2t_hold(L2DATA(tdev), e);
  912. set_l2t_ix(tdev, tid, e);
  913. }
  914. }
  915. }
  916. l2t_release(L2DATA(tdev), e);
  917. }
  918. /*
  919. * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
  920. * The allocated memory is cleared.
  921. */
  922. void *cxgb_alloc_mem(unsigned long size)
  923. {
  924. void *p = kmalloc(size, GFP_KERNEL);
  925. if (!p)
  926. p = vmalloc(size);
  927. if (p)
  928. memset(p, 0, size);
  929. return p;
  930. }
  931. /*
  932. * Free memory allocated through t3_alloc_mem().
  933. */
  934. void cxgb_free_mem(void *addr)
  935. {
  936. unsigned long p = (unsigned long)addr;
  937. if (p >= VMALLOC_START && p < VMALLOC_END)
  938. vfree(addr);
  939. else
  940. kfree(addr);
  941. }
  942. /*
  943. * Allocate and initialize the TID tables. Returns 0 on success.
  944. */
  945. static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
  946. unsigned int natids, unsigned int nstids,
  947. unsigned int atid_base, unsigned int stid_base)
  948. {
  949. unsigned long size = ntids * sizeof(*t->tid_tab) +
  950. natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
  951. t->tid_tab = cxgb_alloc_mem(size);
  952. if (!t->tid_tab)
  953. return -ENOMEM;
  954. t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
  955. t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
  956. t->ntids = ntids;
  957. t->nstids = nstids;
  958. t->stid_base = stid_base;
  959. t->sfree = NULL;
  960. t->natids = natids;
  961. t->atid_base = atid_base;
  962. t->afree = NULL;
  963. t->stids_in_use = t->atids_in_use = 0;
  964. atomic_set(&t->tids_in_use, 0);
  965. spin_lock_init(&t->stid_lock);
  966. spin_lock_init(&t->atid_lock);
  967. /*
  968. * Setup the free lists for stid_tab and atid_tab.
  969. */
  970. if (nstids) {
  971. while (--nstids)
  972. t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
  973. t->sfree = t->stid_tab;
  974. }
  975. if (natids) {
  976. while (--natids)
  977. t->atid_tab[natids - 1].next = &t->atid_tab[natids];
  978. t->afree = t->atid_tab;
  979. }
  980. return 0;
  981. }
  982. static void free_tid_maps(struct tid_info *t)
  983. {
  984. cxgb_free_mem(t->tid_tab);
  985. }
  986. static inline void add_adapter(struct adapter *adap)
  987. {
  988. write_lock_bh(&adapter_list_lock);
  989. list_add_tail(&adap->adapter_list, &adapter_list);
  990. write_unlock_bh(&adapter_list_lock);
  991. }
  992. static inline void remove_adapter(struct adapter *adap)
  993. {
  994. write_lock_bh(&adapter_list_lock);
  995. list_del(&adap->adapter_list);
  996. write_unlock_bh(&adapter_list_lock);
  997. }
  998. int cxgb3_offload_activate(struct adapter *adapter)
  999. {
  1000. struct t3cdev *dev = &adapter->tdev;
  1001. int natids, err;
  1002. struct t3c_data *t;
  1003. struct tid_range stid_range, tid_range;
  1004. struct mtutab mtutab;
  1005. unsigned int l2t_capacity;
  1006. t = kcalloc(1, sizeof(*t), GFP_KERNEL);
  1007. if (!t)
  1008. return -ENOMEM;
  1009. err = -EOPNOTSUPP;
  1010. if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
  1011. dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
  1012. dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
  1013. dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
  1014. dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
  1015. dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
  1016. goto out_free;
  1017. err = -ENOMEM;
  1018. L2DATA(dev) = t3_init_l2t(l2t_capacity);
  1019. if (!L2DATA(dev))
  1020. goto out_free;
  1021. natids = min(tid_range.num / 2, MAX_ATIDS);
  1022. err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
  1023. stid_range.num, ATID_BASE, stid_range.base);
  1024. if (err)
  1025. goto out_free_l2t;
  1026. t->mtus = mtutab.mtus;
  1027. t->nmtus = mtutab.size;
  1028. INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
  1029. spin_lock_init(&t->tid_release_lock);
  1030. INIT_LIST_HEAD(&t->list_node);
  1031. t->dev = dev;
  1032. T3C_DATA(dev) = t;
  1033. dev->recv = process_rx;
  1034. dev->neigh_update = t3_l2t_update;
  1035. /* Register netevent handler once */
  1036. if (list_empty(&adapter_list))
  1037. register_netevent_notifier(&nb);
  1038. add_adapter(adapter);
  1039. return 0;
  1040. out_free_l2t:
  1041. t3_free_l2t(L2DATA(dev));
  1042. L2DATA(dev) = NULL;
  1043. out_free:
  1044. kfree(t);
  1045. return err;
  1046. }
  1047. void cxgb3_offload_deactivate(struct adapter *adapter)
  1048. {
  1049. struct t3cdev *tdev = &adapter->tdev;
  1050. struct t3c_data *t = T3C_DATA(tdev);
  1051. remove_adapter(adapter);
  1052. if (list_empty(&adapter_list))
  1053. unregister_netevent_notifier(&nb);
  1054. free_tid_maps(&t->tid_maps);
  1055. T3C_DATA(tdev) = NULL;
  1056. t3_free_l2t(L2DATA(tdev));
  1057. L2DATA(tdev) = NULL;
  1058. kfree(t);
  1059. }
  1060. static inline void register_tdev(struct t3cdev *tdev)
  1061. {
  1062. static int unit;
  1063. mutex_lock(&cxgb3_db_lock);
  1064. snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
  1065. list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
  1066. mutex_unlock(&cxgb3_db_lock);
  1067. }
  1068. static inline void unregister_tdev(struct t3cdev *tdev)
  1069. {
  1070. mutex_lock(&cxgb3_db_lock);
  1071. list_del(&tdev->ofld_dev_list);
  1072. mutex_unlock(&cxgb3_db_lock);
  1073. }
  1074. void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
  1075. {
  1076. struct t3cdev *tdev = &adapter->tdev;
  1077. INIT_LIST_HEAD(&tdev->ofld_dev_list);
  1078. cxgb3_set_dummy_ops(tdev);
  1079. tdev->send = t3_offload_tx;
  1080. tdev->ctl = cxgb_offload_ctl;
  1081. tdev->type = adapter->params.rev == 0 ? T3A : T3B;
  1082. register_tdev(tdev);
  1083. }
  1084. void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
  1085. {
  1086. struct t3cdev *tdev = &adapter->tdev;
  1087. tdev->recv = NULL;
  1088. tdev->neigh_update = NULL;
  1089. unregister_tdev(tdev);
  1090. }
  1091. void __init cxgb3_offload_init(void)
  1092. {
  1093. int i;
  1094. for (i = 0; i < NUM_CPL_CMDS; ++i)
  1095. cpl_handlers[i] = do_bad_cpl;
  1096. t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
  1097. t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
  1098. t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
  1099. t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
  1100. t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
  1101. t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
  1102. t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
  1103. t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
  1104. t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
  1105. t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
  1106. t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
  1107. t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
  1108. t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
  1109. t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
  1110. t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
  1111. t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
  1112. t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
  1113. t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
  1114. t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
  1115. t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
  1116. t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
  1117. t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
  1118. t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
  1119. t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
  1120. t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
  1121. }