qeth_l2_main.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333
  1. /*
  2. * drivers/s390/net/qeth_l2_main.c
  3. *
  4. * Copyright IBM Corp. 2007, 2009
  5. * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
  6. * Frank Pavlic <fpavlic@de.ibm.com>,
  7. * Thomas Spatzier <tspat@de.ibm.com>,
  8. * Frank Blaschka <frank.blaschka@de.ibm.com>
  9. */
  10. #define KMSG_COMPONENT "qeth"
  11. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/moduleparam.h>
  14. #include <linux/string.h>
  15. #include <linux/errno.h>
  16. #include <linux/kernel.h>
  17. #include <linux/slab.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/mii.h>
  20. #include <linux/ip.h>
  21. #include <linux/list.h>
  22. #include "qeth_core.h"
  23. static int qeth_l2_set_offline(struct ccwgroup_device *);
  24. static int qeth_l2_stop(struct net_device *);
  25. static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
  26. static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
  27. enum qeth_ipa_cmds,
  28. int (*reply_cb) (struct qeth_card *,
  29. struct qeth_reply*,
  30. unsigned long));
  31. static void qeth_l2_set_multicast_list(struct net_device *);
  32. static int qeth_l2_recover(void *);
  33. static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  34. {
  35. struct qeth_card *card = dev->ml_priv;
  36. struct mii_ioctl_data *mii_data;
  37. int rc = 0;
  38. if (!card)
  39. return -ENODEV;
  40. if ((card->state != CARD_STATE_UP) &&
  41. (card->state != CARD_STATE_SOFTSETUP))
  42. return -ENODEV;
  43. if (card->info.type == QETH_CARD_TYPE_OSN)
  44. return -EPERM;
  45. switch (cmd) {
  46. case SIOC_QETH_ADP_SET_SNMP_CONTROL:
  47. rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
  48. break;
  49. case SIOC_QETH_GET_CARD_TYPE:
  50. if ((card->info.type == QETH_CARD_TYPE_OSD ||
  51. card->info.type == QETH_CARD_TYPE_OSM ||
  52. card->info.type == QETH_CARD_TYPE_OSX) &&
  53. !card->info.guestlan)
  54. return 1;
  55. return 0;
  56. break;
  57. case SIOCGMIIPHY:
  58. mii_data = if_mii(rq);
  59. mii_data->phy_id = 0;
  60. break;
  61. case SIOCGMIIREG:
  62. mii_data = if_mii(rq);
  63. if (mii_data->phy_id != 0)
  64. rc = -EINVAL;
  65. else
  66. mii_data->val_out = qeth_mdio_read(dev,
  67. mii_data->phy_id, mii_data->reg_num);
  68. break;
  69. default:
  70. rc = -EOPNOTSUPP;
  71. }
  72. if (rc)
  73. QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
  74. return rc;
  75. }
  76. static int qeth_l2_verify_dev(struct net_device *dev)
  77. {
  78. struct qeth_card *card;
  79. unsigned long flags;
  80. int rc = 0;
  81. read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
  82. list_for_each_entry(card, &qeth_core_card_list.list, list) {
  83. if (card->dev == dev) {
  84. rc = QETH_REAL_CARD;
  85. break;
  86. }
  87. }
  88. read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
  89. return rc;
  90. }
  91. static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
  92. {
  93. struct qeth_card *card;
  94. struct net_device *ndev;
  95. __u16 temp_dev_no;
  96. unsigned long flags;
  97. struct ccw_dev_id read_devid;
  98. ndev = NULL;
  99. memcpy(&temp_dev_no, read_dev_no, 2);
  100. read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
  101. list_for_each_entry(card, &qeth_core_card_list.list, list) {
  102. ccw_device_get_id(CARD_RDEV(card), &read_devid);
  103. if (read_devid.devno == temp_dev_no) {
  104. ndev = card->dev;
  105. break;
  106. }
  107. }
  108. read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
  109. return ndev;
  110. }
  111. static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
  112. struct qeth_reply *reply,
  113. unsigned long data)
  114. {
  115. struct qeth_ipa_cmd *cmd;
  116. __u8 *mac;
  117. QETH_CARD_TEXT(card, 2, "L2Sgmacb");
  118. cmd = (struct qeth_ipa_cmd *) data;
  119. mac = &cmd->data.setdelmac.mac[0];
  120. /* MAC already registered, needed in couple/uncouple case */
  121. if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) {
  122. QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
  123. mac, QETH_CARD_IFNAME(card));
  124. cmd->hdr.return_code = 0;
  125. }
  126. if (cmd->hdr.return_code)
  127. QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
  128. mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
  129. return 0;
  130. }
  131. static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
  132. {
  133. QETH_CARD_TEXT(card, 2, "L2Sgmac");
  134. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
  135. qeth_l2_send_setgroupmac_cb);
  136. }
  137. static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
  138. struct qeth_reply *reply,
  139. unsigned long data)
  140. {
  141. struct qeth_ipa_cmd *cmd;
  142. __u8 *mac;
  143. QETH_CARD_TEXT(card, 2, "L2Dgmacb");
  144. cmd = (struct qeth_ipa_cmd *) data;
  145. mac = &cmd->data.setdelmac.mac[0];
  146. if (cmd->hdr.return_code)
  147. QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n",
  148. mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
  149. return 0;
  150. }
  151. static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
  152. {
  153. QETH_CARD_TEXT(card, 2, "L2Dgmac");
  154. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
  155. qeth_l2_send_delgroupmac_cb);
  156. }
  157. static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
  158. {
  159. struct qeth_mc_mac *mc;
  160. int rc;
  161. mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC);
  162. if (!mc)
  163. return;
  164. memcpy(mc->mc_addr, mac, OSA_ADDR_LEN);
  165. mc->mc_addrlen = OSA_ADDR_LEN;
  166. mc->is_vmac = vmac;
  167. if (vmac) {
  168. rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
  169. NULL);
  170. } else {
  171. rc = qeth_l2_send_setgroupmac(card, mac);
  172. }
  173. if (!rc)
  174. list_add_tail(&mc->list, &card->mc_list);
  175. else
  176. kfree(mc);
  177. }
  178. static void qeth_l2_del_all_mc(struct qeth_card *card)
  179. {
  180. struct qeth_mc_mac *mc, *tmp;
  181. spin_lock_bh(&card->mclock);
  182. list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
  183. if (mc->is_vmac)
  184. qeth_l2_send_setdelmac(card, mc->mc_addr,
  185. IPA_CMD_DELVMAC, NULL);
  186. else
  187. qeth_l2_send_delgroupmac(card, mc->mc_addr);
  188. list_del(&mc->list);
  189. kfree(mc);
  190. }
  191. spin_unlock_bh(&card->mclock);
  192. }
  193. static inline int qeth_l2_get_cast_type(struct qeth_card *card,
  194. struct sk_buff *skb)
  195. {
  196. if (card->info.type == QETH_CARD_TYPE_OSN)
  197. return RTN_UNSPEC;
  198. if (is_broadcast_ether_addr(skb->data))
  199. return RTN_BROADCAST;
  200. if (is_multicast_ether_addr(skb->data))
  201. return RTN_MULTICAST;
  202. return RTN_UNSPEC;
  203. }
  204. static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
  205. struct sk_buff *skb, int ipv, int cast_type)
  206. {
  207. struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
  208. memset(hdr, 0, sizeof(struct qeth_hdr));
  209. hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
  210. /* set byte byte 3 to casting flags */
  211. if (cast_type == RTN_MULTICAST)
  212. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
  213. else if (cast_type == RTN_BROADCAST)
  214. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
  215. else
  216. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
  217. hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
  218. /* VSWITCH relies on the VLAN
  219. * information to be present in
  220. * the QDIO header */
  221. if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
  222. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
  223. hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
  224. }
  225. }
  226. static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
  227. struct qeth_reply *reply, unsigned long data)
  228. {
  229. struct qeth_ipa_cmd *cmd;
  230. QETH_CARD_TEXT(card, 2, "L2sdvcb");
  231. cmd = (struct qeth_ipa_cmd *) data;
  232. if (cmd->hdr.return_code) {
  233. QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. "
  234. "Continuing\n", cmd->data.setdelvlan.vlan_id,
  235. QETH_CARD_IFNAME(card), cmd->hdr.return_code);
  236. QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
  237. QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
  238. }
  239. return 0;
  240. }
  241. static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
  242. enum qeth_ipa_cmds ipacmd)
  243. {
  244. struct qeth_ipa_cmd *cmd;
  245. struct qeth_cmd_buffer *iob;
  246. QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
  247. iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
  248. cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
  249. cmd->data.setdelvlan.vlan_id = i;
  250. return qeth_send_ipa_cmd(card, iob,
  251. qeth_l2_send_setdelvlan_cb, NULL);
  252. }
  253. static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
  254. {
  255. struct qeth_vlan_vid *id;
  256. QETH_CARD_TEXT(card, 3, "L2prcvln");
  257. spin_lock_bh(&card->vlanlock);
  258. list_for_each_entry(id, &card->vid_list, list) {
  259. if (clear)
  260. qeth_l2_send_setdelvlan(card, id->vid,
  261. IPA_CMD_DELVLAN);
  262. else
  263. qeth_l2_send_setdelvlan(card, id->vid,
  264. IPA_CMD_SETVLAN);
  265. }
  266. spin_unlock_bh(&card->vlanlock);
  267. }
  268. static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  269. {
  270. struct qeth_card *card = dev->ml_priv;
  271. struct qeth_vlan_vid *id;
  272. QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
  273. if (!vid)
  274. return;
  275. if (card->info.type == QETH_CARD_TYPE_OSM) {
  276. QETH_CARD_TEXT(card, 3, "aidOSM");
  277. return;
  278. }
  279. if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
  280. QETH_CARD_TEXT(card, 3, "aidREC");
  281. return;
  282. }
  283. id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
  284. if (id) {
  285. id->vid = vid;
  286. qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
  287. spin_lock_bh(&card->vlanlock);
  288. list_add_tail(&id->list, &card->vid_list);
  289. spin_unlock_bh(&card->vlanlock);
  290. }
  291. }
  292. static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  293. {
  294. struct qeth_vlan_vid *id, *tmpid = NULL;
  295. struct qeth_card *card = dev->ml_priv;
  296. QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
  297. if (card->info.type == QETH_CARD_TYPE_OSM) {
  298. QETH_CARD_TEXT(card, 3, "kidOSM");
  299. return;
  300. }
  301. if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
  302. QETH_CARD_TEXT(card, 3, "kidREC");
  303. return;
  304. }
  305. spin_lock_bh(&card->vlanlock);
  306. list_for_each_entry(id, &card->vid_list, list) {
  307. if (id->vid == vid) {
  308. list_del(&id->list);
  309. tmpid = id;
  310. break;
  311. }
  312. }
  313. spin_unlock_bh(&card->vlanlock);
  314. if (tmpid) {
  315. qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
  316. kfree(tmpid);
  317. }
  318. qeth_l2_set_multicast_list(card->dev);
  319. }
  320. static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
  321. {
  322. int rc = 0;
  323. QETH_DBF_TEXT(SETUP , 2, "stopcard");
  324. QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
  325. qeth_set_allowed_threads(card, 0, 1);
  326. if (card->read.state == CH_STATE_UP &&
  327. card->write.state == CH_STATE_UP &&
  328. (card->state == CARD_STATE_UP)) {
  329. if (recovery_mode &&
  330. card->info.type != QETH_CARD_TYPE_OSN) {
  331. qeth_l2_stop(card->dev);
  332. } else {
  333. rtnl_lock();
  334. dev_close(card->dev);
  335. rtnl_unlock();
  336. }
  337. if (!card->use_hard_stop ||
  338. recovery_mode) {
  339. __u8 *mac = &card->dev->dev_addr[0];
  340. rc = qeth_l2_send_delmac(card, mac);
  341. QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc);
  342. }
  343. card->state = CARD_STATE_SOFTSETUP;
  344. }
  345. if (card->state == CARD_STATE_SOFTSETUP) {
  346. qeth_l2_process_vlans(card, 1);
  347. if (!card->use_hard_stop ||
  348. recovery_mode)
  349. qeth_l2_del_all_mc(card);
  350. qeth_clear_ipacmd_list(card);
  351. card->state = CARD_STATE_HARDSETUP;
  352. }
  353. if (card->state == CARD_STATE_HARDSETUP) {
  354. qeth_qdio_clear_card(card, 0);
  355. qeth_clear_qdio_buffers(card);
  356. qeth_clear_working_pool_list(card);
  357. card->state = CARD_STATE_DOWN;
  358. }
  359. if (card->state == CARD_STATE_DOWN) {
  360. qeth_clear_cmd_buffers(&card->read);
  361. qeth_clear_cmd_buffers(&card->write);
  362. }
  363. card->use_hard_stop = 0;
  364. return rc;
  365. }
  366. static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
  367. int budget, int *done)
  368. {
  369. int work_done = 0;
  370. struct sk_buff *skb;
  371. struct qeth_hdr *hdr;
  372. unsigned int len;
  373. *done = 0;
  374. BUG_ON(!budget);
  375. while (budget) {
  376. skb = qeth_core_get_next_skb(card,
  377. card->qdio.in_q->bufs[card->rx.b_index].buffer,
  378. &card->rx.b_element, &card->rx.e_offset, &hdr);
  379. if (!skb) {
  380. *done = 1;
  381. break;
  382. }
  383. skb->dev = card->dev;
  384. switch (hdr->hdr.l2.id) {
  385. case QETH_HEADER_TYPE_LAYER2:
  386. skb->pkt_type = PACKET_HOST;
  387. skb->protocol = eth_type_trans(skb, skb->dev);
  388. if (card->options.checksum_type == NO_CHECKSUMMING)
  389. skb->ip_summed = CHECKSUM_UNNECESSARY;
  390. else
  391. skb->ip_summed = CHECKSUM_NONE;
  392. if (skb->protocol == htons(ETH_P_802_2))
  393. *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
  394. len = skb->len;
  395. netif_receive_skb(skb);
  396. break;
  397. case QETH_HEADER_TYPE_OSN:
  398. if (card->info.type == QETH_CARD_TYPE_OSN) {
  399. skb_push(skb, sizeof(struct qeth_hdr));
  400. skb_copy_to_linear_data(skb, hdr,
  401. sizeof(struct qeth_hdr));
  402. len = skb->len;
  403. card->osn_info.data_cb(skb);
  404. break;
  405. }
  406. /* else unknown */
  407. default:
  408. dev_kfree_skb_any(skb);
  409. QETH_CARD_TEXT(card, 3, "inbunkno");
  410. QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
  411. continue;
  412. }
  413. work_done++;
  414. budget--;
  415. card->stats.rx_packets++;
  416. card->stats.rx_bytes += len;
  417. }
  418. return work_done;
  419. }
  420. static int qeth_l2_poll(struct napi_struct *napi, int budget)
  421. {
  422. struct qeth_card *card = container_of(napi, struct qeth_card, napi);
  423. int work_done = 0;
  424. struct qeth_qdio_buffer *buffer;
  425. int done;
  426. int new_budget = budget;
  427. if (card->options.performance_stats) {
  428. card->perf_stats.inbound_cnt++;
  429. card->perf_stats.inbound_start_time = qeth_get_micros();
  430. }
  431. while (1) {
  432. if (!card->rx.b_count) {
  433. card->rx.qdio_err = 0;
  434. card->rx.b_count = qdio_get_next_buffers(
  435. card->data.ccwdev, 0, &card->rx.b_index,
  436. &card->rx.qdio_err);
  437. if (card->rx.b_count <= 0) {
  438. card->rx.b_count = 0;
  439. break;
  440. }
  441. card->rx.b_element =
  442. &card->qdio.in_q->bufs[card->rx.b_index]
  443. .buffer->element[0];
  444. card->rx.e_offset = 0;
  445. }
  446. while (card->rx.b_count) {
  447. buffer = &card->qdio.in_q->bufs[card->rx.b_index];
  448. if (!(card->rx.qdio_err &&
  449. qeth_check_qdio_errors(card, buffer->buffer,
  450. card->rx.qdio_err, "qinerr")))
  451. work_done += qeth_l2_process_inbound_buffer(
  452. card, new_budget, &done);
  453. else
  454. done = 1;
  455. if (done) {
  456. if (card->options.performance_stats)
  457. card->perf_stats.bufs_rec++;
  458. qeth_put_buffer_pool_entry(card,
  459. buffer->pool_entry);
  460. qeth_queue_input_buffer(card, card->rx.b_index);
  461. card->rx.b_count--;
  462. if (card->rx.b_count) {
  463. card->rx.b_index =
  464. (card->rx.b_index + 1) %
  465. QDIO_MAX_BUFFERS_PER_Q;
  466. card->rx.b_element =
  467. &card->qdio.in_q
  468. ->bufs[card->rx.b_index]
  469. .buffer->element[0];
  470. card->rx.e_offset = 0;
  471. }
  472. }
  473. if (work_done >= budget)
  474. goto out;
  475. else
  476. new_budget = budget - work_done;
  477. }
  478. }
  479. napi_complete(napi);
  480. if (qdio_start_irq(card->data.ccwdev, 0))
  481. napi_schedule(&card->napi);
  482. out:
  483. if (card->options.performance_stats)
  484. card->perf_stats.inbound_time += qeth_get_micros() -
  485. card->perf_stats.inbound_start_time;
  486. return work_done;
  487. }
  488. static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
  489. enum qeth_ipa_cmds ipacmd,
  490. int (*reply_cb) (struct qeth_card *,
  491. struct qeth_reply*,
  492. unsigned long))
  493. {
  494. struct qeth_ipa_cmd *cmd;
  495. struct qeth_cmd_buffer *iob;
  496. QETH_CARD_TEXT(card, 2, "L2sdmac");
  497. iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
  498. cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
  499. cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
  500. memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
  501. return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
  502. }
  503. static int qeth_l2_send_setmac_cb(struct qeth_card *card,
  504. struct qeth_reply *reply,
  505. unsigned long data)
  506. {
  507. struct qeth_ipa_cmd *cmd;
  508. QETH_CARD_TEXT(card, 2, "L2Smaccb");
  509. cmd = (struct qeth_ipa_cmd *) data;
  510. if (cmd->hdr.return_code) {
  511. QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
  512. card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
  513. switch (cmd->hdr.return_code) {
  514. case IPA_RC_L2_DUP_MAC:
  515. case IPA_RC_L2_DUP_LAYER3_MAC:
  516. dev_warn(&card->gdev->dev,
  517. "MAC address %pM already exists\n",
  518. card->dev->dev_addr);
  519. break;
  520. case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
  521. case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
  522. dev_warn(&card->gdev->dev,
  523. "MAC address %pM is not authorized\n",
  524. card->dev->dev_addr);
  525. break;
  526. default:
  527. break;
  528. }
  529. cmd->hdr.return_code = -EIO;
  530. } else {
  531. card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
  532. memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
  533. OSA_ADDR_LEN);
  534. dev_info(&card->gdev->dev,
  535. "MAC address %pM successfully registered on device %s\n",
  536. card->dev->dev_addr, card->dev->name);
  537. }
  538. return 0;
  539. }
  540. static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
  541. {
  542. QETH_CARD_TEXT(card, 2, "L2Setmac");
  543. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
  544. qeth_l2_send_setmac_cb);
  545. }
  546. static int qeth_l2_send_delmac_cb(struct qeth_card *card,
  547. struct qeth_reply *reply,
  548. unsigned long data)
  549. {
  550. struct qeth_ipa_cmd *cmd;
  551. QETH_CARD_TEXT(card, 2, "L2Dmaccb");
  552. cmd = (struct qeth_ipa_cmd *) data;
  553. if (cmd->hdr.return_code) {
  554. QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
  555. cmd->hdr.return_code = -EIO;
  556. return 0;
  557. }
  558. card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
  559. return 0;
  560. }
  561. static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
  562. {
  563. QETH_CARD_TEXT(card, 2, "L2Delmac");
  564. if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
  565. return 0;
  566. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
  567. qeth_l2_send_delmac_cb);
  568. }
  569. static int qeth_l2_request_initial_mac(struct qeth_card *card)
  570. {
  571. int rc = 0;
  572. char vendor_pre[] = {0x02, 0x00, 0x00};
  573. QETH_DBF_TEXT(SETUP, 2, "doL2init");
  574. QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
  575. rc = qeth_query_setadapterparms(card);
  576. if (rc) {
  577. QETH_DBF_MESSAGE(2, "could not query adapter parameters on "
  578. "device %s: x%x\n", CARD_BUS_ID(card), rc);
  579. }
  580. if (card->info.type == QETH_CARD_TYPE_IQD ||
  581. card->info.type == QETH_CARD_TYPE_OSM ||
  582. card->info.type == QETH_CARD_TYPE_OSX ||
  583. card->info.guestlan) {
  584. rc = qeth_setadpparms_change_macaddr(card);
  585. if (rc) {
  586. QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
  587. "device %s: x%x\n", CARD_BUS_ID(card), rc);
  588. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  589. return rc;
  590. }
  591. QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
  592. } else {
  593. random_ether_addr(card->dev->dev_addr);
  594. memcpy(card->dev->dev_addr, vendor_pre, 3);
  595. }
  596. return 0;
  597. }
  598. static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
  599. {
  600. struct sockaddr *addr = p;
  601. struct qeth_card *card = dev->ml_priv;
  602. int rc = 0;
  603. QETH_CARD_TEXT(card, 3, "setmac");
  604. if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) {
  605. QETH_CARD_TEXT(card, 3, "setmcINV");
  606. return -EOPNOTSUPP;
  607. }
  608. if (card->info.type == QETH_CARD_TYPE_OSN ||
  609. card->info.type == QETH_CARD_TYPE_OSM ||
  610. card->info.type == QETH_CARD_TYPE_OSX) {
  611. QETH_CARD_TEXT(card, 3, "setmcTYP");
  612. return -EOPNOTSUPP;
  613. }
  614. QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN);
  615. if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
  616. QETH_CARD_TEXT(card, 3, "setmcREC");
  617. return -ERESTARTSYS;
  618. }
  619. rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
  620. if (!rc)
  621. rc = qeth_l2_send_setmac(card, addr->sa_data);
  622. return rc;
  623. }
  624. static void qeth_l2_set_multicast_list(struct net_device *dev)
  625. {
  626. struct qeth_card *card = dev->ml_priv;
  627. struct netdev_hw_addr *ha;
  628. if (card->info.type == QETH_CARD_TYPE_OSN)
  629. return ;
  630. QETH_CARD_TEXT(card, 3, "setmulti");
  631. if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
  632. (card->state != CARD_STATE_UP))
  633. return;
  634. qeth_l2_del_all_mc(card);
  635. spin_lock_bh(&card->mclock);
  636. netdev_for_each_mc_addr(ha, dev)
  637. qeth_l2_add_mc(card, ha->addr, 0);
  638. netdev_for_each_uc_addr(ha, dev)
  639. qeth_l2_add_mc(card, ha->addr, 1);
  640. spin_unlock_bh(&card->mclock);
  641. if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
  642. return;
  643. qeth_setadp_promisc_mode(card);
  644. }
  645. static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
  646. {
  647. int rc;
  648. struct qeth_hdr *hdr = NULL;
  649. int elements = 0;
  650. struct qeth_card *card = dev->ml_priv;
  651. struct sk_buff *new_skb = skb;
  652. int ipv = qeth_get_ip_version(skb);
  653. int cast_type = qeth_l2_get_cast_type(card, skb);
  654. struct qeth_qdio_out_q *queue = card->qdio.out_qs
  655. [qeth_get_priority_queue(card, skb, ipv, cast_type)];
  656. int tx_bytes = skb->len;
  657. int data_offset = -1;
  658. int elements_needed = 0;
  659. int hd_len = 0;
  660. if ((card->state != CARD_STATE_UP) || !card->lan_online) {
  661. card->stats.tx_carrier_errors++;
  662. goto tx_drop;
  663. }
  664. if ((card->info.type == QETH_CARD_TYPE_OSN) &&
  665. (skb->protocol == htons(ETH_P_IPV6)))
  666. goto tx_drop;
  667. if (card->options.performance_stats) {
  668. card->perf_stats.outbound_cnt++;
  669. card->perf_stats.outbound_start_time = qeth_get_micros();
  670. }
  671. netif_stop_queue(dev);
  672. if (card->info.type == QETH_CARD_TYPE_OSN)
  673. hdr = (struct qeth_hdr *)skb->data;
  674. else {
  675. if (card->info.type == QETH_CARD_TYPE_IQD) {
  676. new_skb = skb;
  677. data_offset = ETH_HLEN;
  678. hd_len = ETH_HLEN;
  679. hdr = kmem_cache_alloc(qeth_core_header_cache,
  680. GFP_ATOMIC);
  681. if (!hdr)
  682. goto tx_drop;
  683. elements_needed++;
  684. skb_reset_mac_header(new_skb);
  685. qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
  686. hdr->hdr.l2.pkt_length = new_skb->len;
  687. memcpy(((char *)hdr) + sizeof(struct qeth_hdr),
  688. skb_mac_header(new_skb), ETH_HLEN);
  689. } else {
  690. /* create a clone with writeable headroom */
  691. new_skb = skb_realloc_headroom(skb,
  692. sizeof(struct qeth_hdr));
  693. if (!new_skb)
  694. goto tx_drop;
  695. hdr = (struct qeth_hdr *)skb_push(new_skb,
  696. sizeof(struct qeth_hdr));
  697. skb_set_mac_header(new_skb, sizeof(struct qeth_hdr));
  698. qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
  699. }
  700. }
  701. elements = qeth_get_elements_no(card, (void *)hdr, new_skb,
  702. elements_needed);
  703. if (!elements) {
  704. if (data_offset >= 0)
  705. kmem_cache_free(qeth_core_header_cache, hdr);
  706. goto tx_drop;
  707. }
  708. if (card->info.type != QETH_CARD_TYPE_IQD) {
  709. if (qeth_hdr_chk_and_bounce(new_skb,
  710. sizeof(struct qeth_hdr_layer2)))
  711. goto tx_drop;
  712. rc = qeth_do_send_packet(card, queue, new_skb, hdr,
  713. elements);
  714. } else
  715. rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
  716. elements, data_offset, hd_len);
  717. if (!rc) {
  718. card->stats.tx_packets++;
  719. card->stats.tx_bytes += tx_bytes;
  720. if (new_skb != skb)
  721. dev_kfree_skb_any(skb);
  722. rc = NETDEV_TX_OK;
  723. } else {
  724. if (data_offset >= 0)
  725. kmem_cache_free(qeth_core_header_cache, hdr);
  726. if (rc == -EBUSY) {
  727. if (new_skb != skb)
  728. dev_kfree_skb_any(new_skb);
  729. return NETDEV_TX_BUSY;
  730. } else
  731. goto tx_drop;
  732. }
  733. netif_wake_queue(dev);
  734. if (card->options.performance_stats)
  735. card->perf_stats.outbound_time += qeth_get_micros() -
  736. card->perf_stats.outbound_start_time;
  737. return rc;
  738. tx_drop:
  739. card->stats.tx_dropped++;
  740. card->stats.tx_errors++;
  741. if ((new_skb != skb) && new_skb)
  742. dev_kfree_skb_any(new_skb);
  743. dev_kfree_skb_any(skb);
  744. netif_wake_queue(dev);
  745. return NETDEV_TX_OK;
  746. }
  747. static int qeth_l2_open(struct net_device *dev)
  748. {
  749. struct qeth_card *card = dev->ml_priv;
  750. int rc = 0;
  751. QETH_CARD_TEXT(card, 4, "qethopen");
  752. if (card->state != CARD_STATE_SOFTSETUP)
  753. return -ENODEV;
  754. if ((card->info.type != QETH_CARD_TYPE_OSN) &&
  755. (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
  756. QETH_CARD_TEXT(card, 4, "nomacadr");
  757. return -EPERM;
  758. }
  759. card->data.state = CH_STATE_UP;
  760. card->state = CARD_STATE_UP;
  761. netif_start_queue(dev);
  762. if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
  763. napi_enable(&card->napi);
  764. napi_schedule(&card->napi);
  765. } else
  766. rc = -EIO;
  767. return rc;
  768. }
  769. static int qeth_l2_stop(struct net_device *dev)
  770. {
  771. struct qeth_card *card = dev->ml_priv;
  772. QETH_CARD_TEXT(card, 4, "qethstop");
  773. netif_tx_disable(dev);
  774. if (card->state == CARD_STATE_UP) {
  775. card->state = CARD_STATE_SOFTSETUP;
  776. napi_disable(&card->napi);
  777. }
  778. return 0;
  779. }
  780. static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
  781. {
  782. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  783. INIT_LIST_HEAD(&card->vid_list);
  784. INIT_LIST_HEAD(&card->mc_list);
  785. card->options.layer2 = 1;
  786. card->discipline.start_poll = qeth_qdio_start_poll;
  787. card->discipline.input_handler = (qdio_handler_t *)
  788. qeth_qdio_input_handler;
  789. card->discipline.output_handler = (qdio_handler_t *)
  790. qeth_qdio_output_handler;
  791. card->discipline.recover = qeth_l2_recover;
  792. return 0;
  793. }
  794. static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
  795. {
  796. struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
  797. qeth_set_allowed_threads(card, 0, 1);
  798. wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
  799. if (cgdev->state == CCWGROUP_ONLINE) {
  800. card->use_hard_stop = 1;
  801. qeth_l2_set_offline(cgdev);
  802. }
  803. if (card->dev) {
  804. unregister_netdev(card->dev);
  805. card->dev = NULL;
  806. }
  807. return;
  808. }
  809. static const struct ethtool_ops qeth_l2_ethtool_ops = {
  810. .get_link = ethtool_op_get_link,
  811. .get_strings = qeth_core_get_strings,
  812. .get_ethtool_stats = qeth_core_get_ethtool_stats,
  813. .get_sset_count = qeth_core_get_sset_count,
  814. .get_drvinfo = qeth_core_get_drvinfo,
  815. .get_settings = qeth_core_ethtool_get_settings,
  816. };
  817. static const struct ethtool_ops qeth_l2_osn_ops = {
  818. .get_strings = qeth_core_get_strings,
  819. .get_ethtool_stats = qeth_core_get_ethtool_stats,
  820. .get_sset_count = qeth_core_get_sset_count,
  821. .get_drvinfo = qeth_core_get_drvinfo,
  822. };
  823. static const struct net_device_ops qeth_l2_netdev_ops = {
  824. .ndo_open = qeth_l2_open,
  825. .ndo_stop = qeth_l2_stop,
  826. .ndo_get_stats = qeth_get_stats,
  827. .ndo_start_xmit = qeth_l2_hard_start_xmit,
  828. .ndo_validate_addr = eth_validate_addr,
  829. .ndo_set_multicast_list = qeth_l2_set_multicast_list,
  830. .ndo_do_ioctl = qeth_l2_do_ioctl,
  831. .ndo_set_mac_address = qeth_l2_set_mac_address,
  832. .ndo_change_mtu = qeth_change_mtu,
  833. .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
  834. .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
  835. .ndo_tx_timeout = qeth_tx_timeout,
  836. };
  837. static int qeth_l2_setup_netdev(struct qeth_card *card)
  838. {
  839. switch (card->info.type) {
  840. case QETH_CARD_TYPE_IQD:
  841. card->dev = alloc_netdev(0, "hsi%d", ether_setup);
  842. break;
  843. case QETH_CARD_TYPE_OSN:
  844. card->dev = alloc_netdev(0, "osn%d", ether_setup);
  845. card->dev->flags |= IFF_NOARP;
  846. break;
  847. default:
  848. card->dev = alloc_etherdev(0);
  849. }
  850. if (!card->dev)
  851. return -ENODEV;
  852. card->dev->ml_priv = card;
  853. card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
  854. card->dev->mtu = card->info.initial_mtu;
  855. card->dev->netdev_ops = &qeth_l2_netdev_ops;
  856. if (card->info.type != QETH_CARD_TYPE_OSN)
  857. SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
  858. else
  859. SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
  860. card->dev->features |= NETIF_F_HW_VLAN_FILTER;
  861. card->info.broadcast_capable = 1;
  862. qeth_l2_request_initial_mac(card);
  863. SET_NETDEV_DEV(card->dev, &card->gdev->dev);
  864. netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
  865. return register_netdev(card->dev);
  866. }
  867. static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
  868. {
  869. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  870. int rc = 0;
  871. enum qeth_card_states recover_flag;
  872. BUG_ON(!card);
  873. mutex_lock(&card->discipline_mutex);
  874. mutex_lock(&card->conf_mutex);
  875. QETH_DBF_TEXT(SETUP, 2, "setonlin");
  876. QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
  877. recover_flag = card->state;
  878. rc = qeth_core_hardsetup_card(card);
  879. if (rc) {
  880. QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
  881. rc = -ENODEV;
  882. goto out_remove;
  883. }
  884. if (!card->dev && qeth_l2_setup_netdev(card)) {
  885. rc = -ENODEV;
  886. goto out_remove;
  887. }
  888. if (card->info.type != QETH_CARD_TYPE_OSN)
  889. qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
  890. card->state = CARD_STATE_HARDSETUP;
  891. memset(&card->rx, 0, sizeof(struct qeth_rx));
  892. qeth_print_status_message(card);
  893. /* softsetup */
  894. QETH_DBF_TEXT(SETUP, 2, "softsetp");
  895. rc = qeth_send_startlan(card);
  896. if (rc) {
  897. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  898. if (rc == 0xe080) {
  899. dev_warn(&card->gdev->dev,
  900. "The LAN is offline\n");
  901. card->lan_online = 0;
  902. goto contin;
  903. }
  904. rc = -ENODEV;
  905. goto out_remove;
  906. } else
  907. card->lan_online = 1;
  908. contin:
  909. if ((card->info.type == QETH_CARD_TYPE_OSD) ||
  910. (card->info.type == QETH_CARD_TYPE_OSX))
  911. /* configure isolation level */
  912. qeth_set_access_ctrl_online(card);
  913. if (card->info.type != QETH_CARD_TYPE_OSN &&
  914. card->info.type != QETH_CARD_TYPE_OSM)
  915. qeth_l2_process_vlans(card, 0);
  916. netif_tx_disable(card->dev);
  917. rc = qeth_init_qdio_queues(card);
  918. if (rc) {
  919. QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
  920. rc = -ENODEV;
  921. goto out_remove;
  922. }
  923. card->state = CARD_STATE_SOFTSETUP;
  924. if (card->lan_online)
  925. netif_carrier_on(card->dev);
  926. else
  927. netif_carrier_off(card->dev);
  928. qeth_set_allowed_threads(card, 0xffffffff, 0);
  929. if (recover_flag == CARD_STATE_RECOVER) {
  930. if (recovery_mode &&
  931. card->info.type != QETH_CARD_TYPE_OSN) {
  932. qeth_l2_open(card->dev);
  933. } else {
  934. rtnl_lock();
  935. dev_open(card->dev);
  936. rtnl_unlock();
  937. }
  938. /* this also sets saved unicast addresses */
  939. qeth_l2_set_multicast_list(card->dev);
  940. }
  941. /* let user_space know that device is online */
  942. kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
  943. mutex_unlock(&card->conf_mutex);
  944. mutex_unlock(&card->discipline_mutex);
  945. return 0;
  946. out_remove:
  947. card->use_hard_stop = 1;
  948. qeth_l2_stop_card(card, 0);
  949. ccw_device_set_offline(CARD_DDEV(card));
  950. ccw_device_set_offline(CARD_WDEV(card));
  951. ccw_device_set_offline(CARD_RDEV(card));
  952. if (recover_flag == CARD_STATE_RECOVER)
  953. card->state = CARD_STATE_RECOVER;
  954. else
  955. card->state = CARD_STATE_DOWN;
  956. mutex_unlock(&card->conf_mutex);
  957. mutex_unlock(&card->discipline_mutex);
  958. return rc;
  959. }
  960. static int qeth_l2_set_online(struct ccwgroup_device *gdev)
  961. {
  962. return __qeth_l2_set_online(gdev, 0);
  963. }
  964. static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
  965. int recovery_mode)
  966. {
  967. struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
  968. int rc = 0, rc2 = 0, rc3 = 0;
  969. enum qeth_card_states recover_flag;
  970. mutex_lock(&card->discipline_mutex);
  971. mutex_lock(&card->conf_mutex);
  972. QETH_DBF_TEXT(SETUP, 3, "setoffl");
  973. QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
  974. if (card->dev && netif_carrier_ok(card->dev))
  975. netif_carrier_off(card->dev);
  976. recover_flag = card->state;
  977. qeth_l2_stop_card(card, recovery_mode);
  978. rc = ccw_device_set_offline(CARD_DDEV(card));
  979. rc2 = ccw_device_set_offline(CARD_WDEV(card));
  980. rc3 = ccw_device_set_offline(CARD_RDEV(card));
  981. if (!rc)
  982. rc = (rc2) ? rc2 : rc3;
  983. if (rc)
  984. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  985. if (recover_flag == CARD_STATE_UP)
  986. card->state = CARD_STATE_RECOVER;
  987. /* let user_space know that device is offline */
  988. kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
  989. mutex_unlock(&card->conf_mutex);
  990. mutex_unlock(&card->discipline_mutex);
  991. return 0;
  992. }
  993. static int qeth_l2_set_offline(struct ccwgroup_device *cgdev)
  994. {
  995. return __qeth_l2_set_offline(cgdev, 0);
  996. }
  997. static int qeth_l2_recover(void *ptr)
  998. {
  999. struct qeth_card *card;
  1000. int rc = 0;
  1001. card = (struct qeth_card *) ptr;
  1002. QETH_CARD_TEXT(card, 2, "recover1");
  1003. if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
  1004. return 0;
  1005. QETH_CARD_TEXT(card, 2, "recover2");
  1006. dev_warn(&card->gdev->dev,
  1007. "A recovery process has been started for the device\n");
  1008. card->use_hard_stop = 1;
  1009. __qeth_l2_set_offline(card->gdev, 1);
  1010. rc = __qeth_l2_set_online(card->gdev, 1);
  1011. if (!rc)
  1012. dev_info(&card->gdev->dev,
  1013. "Device successfully recovered!\n");
  1014. else {
  1015. rtnl_lock();
  1016. dev_close(card->dev);
  1017. rtnl_unlock();
  1018. dev_warn(&card->gdev->dev, "The qeth device driver "
  1019. "failed to recover an error on the device\n");
  1020. }
  1021. qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
  1022. qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
  1023. return 0;
  1024. }
  1025. static int __init qeth_l2_init(void)
  1026. {
  1027. pr_info("register layer 2 discipline\n");
  1028. return 0;
  1029. }
  1030. static void __exit qeth_l2_exit(void)
  1031. {
  1032. pr_info("unregister layer 2 discipline\n");
  1033. }
  1034. static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
  1035. {
  1036. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  1037. qeth_qdio_clear_card(card, 0);
  1038. qeth_clear_qdio_buffers(card);
  1039. }
  1040. static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
  1041. {
  1042. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  1043. if (card->dev)
  1044. netif_device_detach(card->dev);
  1045. qeth_set_allowed_threads(card, 0, 1);
  1046. wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
  1047. if (gdev->state == CCWGROUP_OFFLINE)
  1048. return 0;
  1049. if (card->state == CARD_STATE_UP) {
  1050. card->use_hard_stop = 1;
  1051. __qeth_l2_set_offline(card->gdev, 1);
  1052. } else
  1053. __qeth_l2_set_offline(card->gdev, 0);
  1054. return 0;
  1055. }
  1056. static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
  1057. {
  1058. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  1059. int rc = 0;
  1060. if (gdev->state == CCWGROUP_OFFLINE)
  1061. goto out;
  1062. if (card->state == CARD_STATE_RECOVER) {
  1063. rc = __qeth_l2_set_online(card->gdev, 1);
  1064. if (rc) {
  1065. rtnl_lock();
  1066. dev_close(card->dev);
  1067. rtnl_unlock();
  1068. }
  1069. } else
  1070. rc = __qeth_l2_set_online(card->gdev, 0);
  1071. out:
  1072. qeth_set_allowed_threads(card, 0xffffffff, 0);
  1073. if (card->dev)
  1074. netif_device_attach(card->dev);
  1075. if (rc)
  1076. dev_warn(&card->gdev->dev, "The qeth device driver "
  1077. "failed to recover an error on the device\n");
  1078. return rc;
  1079. }
  1080. struct ccwgroup_driver qeth_l2_ccwgroup_driver = {
  1081. .probe = qeth_l2_probe_device,
  1082. .remove = qeth_l2_remove_device,
  1083. .set_online = qeth_l2_set_online,
  1084. .set_offline = qeth_l2_set_offline,
  1085. .shutdown = qeth_l2_shutdown,
  1086. .freeze = qeth_l2_pm_suspend,
  1087. .thaw = qeth_l2_pm_resume,
  1088. .restore = qeth_l2_pm_resume,
  1089. };
  1090. EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver);
  1091. static int qeth_osn_send_control_data(struct qeth_card *card, int len,
  1092. struct qeth_cmd_buffer *iob)
  1093. {
  1094. unsigned long flags;
  1095. int rc = 0;
  1096. QETH_CARD_TEXT(card, 5, "osndctrd");
  1097. wait_event(card->wait_q,
  1098. atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
  1099. qeth_prepare_control_data(card, len, iob);
  1100. QETH_CARD_TEXT(card, 6, "osnoirqp");
  1101. spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
  1102. rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
  1103. (addr_t) iob, 0, 0);
  1104. spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
  1105. if (rc) {
  1106. QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
  1107. "ccw_device_start rc = %i\n", rc);
  1108. QETH_CARD_TEXT_(card, 2, " err%d", rc);
  1109. qeth_release_buffer(iob->channel, iob);
  1110. atomic_set(&card->write.irq_pending, 0);
  1111. wake_up(&card->wait_q);
  1112. }
  1113. return rc;
  1114. }
  1115. static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
  1116. struct qeth_cmd_buffer *iob, int data_len)
  1117. {
  1118. u16 s1, s2;
  1119. QETH_CARD_TEXT(card, 4, "osndipa");
  1120. qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
  1121. s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
  1122. s2 = (u16)data_len;
  1123. memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
  1124. memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
  1125. memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
  1126. memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
  1127. return qeth_osn_send_control_data(card, s1, iob);
  1128. }
  1129. int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
  1130. {
  1131. struct qeth_cmd_buffer *iob;
  1132. struct qeth_card *card;
  1133. int rc;
  1134. if (!dev)
  1135. return -ENODEV;
  1136. card = dev->ml_priv;
  1137. if (!card)
  1138. return -ENODEV;
  1139. QETH_CARD_TEXT(card, 2, "osnsdmc");
  1140. if ((card->state != CARD_STATE_UP) &&
  1141. (card->state != CARD_STATE_SOFTSETUP))
  1142. return -ENODEV;
  1143. iob = qeth_wait_for_buffer(&card->write);
  1144. memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
  1145. rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
  1146. return rc;
  1147. }
  1148. EXPORT_SYMBOL(qeth_osn_assist);
  1149. int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
  1150. int (*assist_cb)(struct net_device *, void *),
  1151. int (*data_cb)(struct sk_buff *))
  1152. {
  1153. struct qeth_card *card;
  1154. *dev = qeth_l2_netdev_by_devno(read_dev_no);
  1155. if (*dev == NULL)
  1156. return -ENODEV;
  1157. card = (*dev)->ml_priv;
  1158. if (!card)
  1159. return -ENODEV;
  1160. QETH_CARD_TEXT(card, 2, "osnreg");
  1161. if ((assist_cb == NULL) || (data_cb == NULL))
  1162. return -EINVAL;
  1163. card->osn_info.assist_cb = assist_cb;
  1164. card->osn_info.data_cb = data_cb;
  1165. return 0;
  1166. }
  1167. EXPORT_SYMBOL(qeth_osn_register);
  1168. void qeth_osn_deregister(struct net_device *dev)
  1169. {
  1170. struct qeth_card *card;
  1171. if (!dev)
  1172. return;
  1173. card = dev->ml_priv;
  1174. if (!card)
  1175. return;
  1176. QETH_CARD_TEXT(card, 2, "osndereg");
  1177. card->osn_info.assist_cb = NULL;
  1178. card->osn_info.data_cb = NULL;
  1179. return;
  1180. }
  1181. EXPORT_SYMBOL(qeth_osn_deregister);
  1182. module_init(qeth_l2_init);
  1183. module_exit(qeth_l2_exit);
  1184. MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
  1185. MODULE_DESCRIPTION("qeth layer 2 discipline");
  1186. MODULE_LICENSE("GPL");