qeth_l2_main.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311
  1. /*
  2. * drivers/s390/net/qeth_l2_main.c
  3. *
  4. * Copyright IBM Corp. 2007, 2009
  5. * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
  6. * Frank Pavlic <fpavlic@de.ibm.com>,
  7. * Thomas Spatzier <tspat@de.ibm.com>,
  8. * Frank Blaschka <frank.blaschka@de.ibm.com>
  9. */
  10. #define KMSG_COMPONENT "qeth"
  11. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/moduleparam.h>
  14. #include <linux/string.h>
  15. #include <linux/errno.h>
  16. #include <linux/kernel.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/mii.h>
  19. #include <linux/ip.h>
  20. #include <linux/list.h>
  21. #include "qeth_core.h"
  22. static int qeth_l2_set_offline(struct ccwgroup_device *);
  23. static int qeth_l2_stop(struct net_device *);
  24. static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
  25. static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
  26. enum qeth_ipa_cmds,
  27. int (*reply_cb) (struct qeth_card *,
  28. struct qeth_reply*,
  29. unsigned long));
  30. static void qeth_l2_set_multicast_list(struct net_device *);
  31. static int qeth_l2_recover(void *);
  32. static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  33. {
  34. struct qeth_card *card = dev->ml_priv;
  35. struct mii_ioctl_data *mii_data;
  36. int rc = 0;
  37. if (!card)
  38. return -ENODEV;
  39. if ((card->state != CARD_STATE_UP) &&
  40. (card->state != CARD_STATE_SOFTSETUP))
  41. return -ENODEV;
  42. if (card->info.type == QETH_CARD_TYPE_OSN)
  43. return -EPERM;
  44. switch (cmd) {
  45. case SIOC_QETH_ADP_SET_SNMP_CONTROL:
  46. rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
  47. break;
  48. case SIOC_QETH_GET_CARD_TYPE:
  49. if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
  50. !card->info.guestlan)
  51. return 1;
  52. return 0;
  53. break;
  54. case SIOCGMIIPHY:
  55. mii_data = if_mii(rq);
  56. mii_data->phy_id = 0;
  57. break;
  58. case SIOCGMIIREG:
  59. mii_data = if_mii(rq);
  60. if (mii_data->phy_id != 0)
  61. rc = -EINVAL;
  62. else
  63. mii_data->val_out = qeth_mdio_read(dev,
  64. mii_data->phy_id, mii_data->reg_num);
  65. break;
  66. default:
  67. rc = -EOPNOTSUPP;
  68. }
  69. if (rc)
  70. QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
  71. return rc;
  72. }
  73. static int qeth_l2_verify_dev(struct net_device *dev)
  74. {
  75. struct qeth_card *card;
  76. unsigned long flags;
  77. int rc = 0;
  78. read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
  79. list_for_each_entry(card, &qeth_core_card_list.list, list) {
  80. if (card->dev == dev) {
  81. rc = QETH_REAL_CARD;
  82. break;
  83. }
  84. }
  85. read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
  86. return rc;
  87. }
  88. static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
  89. {
  90. struct qeth_card *card;
  91. struct net_device *ndev;
  92. __u16 temp_dev_no;
  93. unsigned long flags;
  94. struct ccw_dev_id read_devid;
  95. ndev = NULL;
  96. memcpy(&temp_dev_no, read_dev_no, 2);
  97. read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
  98. list_for_each_entry(card, &qeth_core_card_list.list, list) {
  99. ccw_device_get_id(CARD_RDEV(card), &read_devid);
  100. if (read_devid.devno == temp_dev_no) {
  101. ndev = card->dev;
  102. break;
  103. }
  104. }
  105. read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
  106. return ndev;
  107. }
  108. static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
  109. struct qeth_reply *reply,
  110. unsigned long data)
  111. {
  112. struct qeth_ipa_cmd *cmd;
  113. __u8 *mac;
  114. QETH_DBF_TEXT(TRACE, 2, "L2Sgmacb");
  115. cmd = (struct qeth_ipa_cmd *) data;
  116. mac = &cmd->data.setdelmac.mac[0];
  117. /* MAC already registered, needed in couple/uncouple case */
  118. if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) {
  119. QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
  120. mac, QETH_CARD_IFNAME(card));
  121. cmd->hdr.return_code = 0;
  122. }
  123. if (cmd->hdr.return_code)
  124. QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
  125. mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
  126. return 0;
  127. }
  128. static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
  129. {
  130. QETH_DBF_TEXT(TRACE, 2, "L2Sgmac");
  131. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
  132. qeth_l2_send_setgroupmac_cb);
  133. }
  134. static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
  135. struct qeth_reply *reply,
  136. unsigned long data)
  137. {
  138. struct qeth_ipa_cmd *cmd;
  139. __u8 *mac;
  140. QETH_DBF_TEXT(TRACE, 2, "L2Dgmacb");
  141. cmd = (struct qeth_ipa_cmd *) data;
  142. mac = &cmd->data.setdelmac.mac[0];
  143. if (cmd->hdr.return_code)
  144. QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n",
  145. mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
  146. return 0;
  147. }
  148. static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
  149. {
  150. QETH_DBF_TEXT(TRACE, 2, "L2Dgmac");
  151. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
  152. qeth_l2_send_delgroupmac_cb);
  153. }
  154. static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
  155. {
  156. struct qeth_mc_mac *mc;
  157. int rc;
  158. mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC);
  159. if (!mc)
  160. return;
  161. memcpy(mc->mc_addr, mac, OSA_ADDR_LEN);
  162. mc->mc_addrlen = OSA_ADDR_LEN;
  163. mc->is_vmac = vmac;
  164. if (vmac) {
  165. rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
  166. NULL);
  167. } else {
  168. rc = qeth_l2_send_setgroupmac(card, mac);
  169. }
  170. if (!rc)
  171. list_add_tail(&mc->list, &card->mc_list);
  172. else
  173. kfree(mc);
  174. }
  175. static void qeth_l2_del_all_mc(struct qeth_card *card)
  176. {
  177. struct qeth_mc_mac *mc, *tmp;
  178. spin_lock_bh(&card->mclock);
  179. list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
  180. if (mc->is_vmac)
  181. qeth_l2_send_setdelmac(card, mc->mc_addr,
  182. IPA_CMD_DELVMAC, NULL);
  183. else
  184. qeth_l2_send_delgroupmac(card, mc->mc_addr);
  185. list_del(&mc->list);
  186. kfree(mc);
  187. }
  188. spin_unlock_bh(&card->mclock);
  189. }
  190. static void qeth_l2_get_packet_type(struct qeth_card *card,
  191. struct qeth_hdr *hdr, struct sk_buff *skb)
  192. {
  193. __u16 hdr_mac;
  194. if (!memcmp(skb->data + QETH_HEADER_SIZE,
  195. skb->dev->broadcast, 6)) {
  196. /* broadcast? */
  197. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
  198. return;
  199. }
  200. hdr_mac = *((__u16 *)skb->data);
  201. /* tr multicast? */
  202. switch (card->info.link_type) {
  203. case QETH_LINK_TYPE_HSTR:
  204. case QETH_LINK_TYPE_LANE_TR:
  205. if ((hdr_mac == QETH_TR_MAC_NC) ||
  206. (hdr_mac == QETH_TR_MAC_C))
  207. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
  208. else
  209. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
  210. break;
  211. /* eth or so multicast? */
  212. default:
  213. if ((hdr_mac == QETH_ETH_MAC_V4) ||
  214. (hdr_mac == QETH_ETH_MAC_V6))
  215. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
  216. else
  217. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
  218. }
  219. }
  220. static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
  221. struct sk_buff *skb, int ipv, int cast_type)
  222. {
  223. struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
  224. memset(hdr, 0, sizeof(struct qeth_hdr));
  225. hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
  226. /* set byte byte 3 to casting flags */
  227. if (cast_type == RTN_MULTICAST)
  228. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
  229. else if (cast_type == RTN_BROADCAST)
  230. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
  231. else
  232. qeth_l2_get_packet_type(card, hdr, skb);
  233. hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
  234. /* VSWITCH relies on the VLAN
  235. * information to be present in
  236. * the QDIO header */
  237. if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
  238. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
  239. hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
  240. }
  241. }
  242. static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
  243. struct qeth_reply *reply, unsigned long data)
  244. {
  245. struct qeth_ipa_cmd *cmd;
  246. QETH_DBF_TEXT(TRACE, 2, "L2sdvcb");
  247. cmd = (struct qeth_ipa_cmd *) data;
  248. if (cmd->hdr.return_code) {
  249. QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. "
  250. "Continuing\n", cmd->data.setdelvlan.vlan_id,
  251. QETH_CARD_IFNAME(card), cmd->hdr.return_code);
  252. QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command);
  253. QETH_DBF_TEXT_(TRACE, 2, "L2%s", CARD_BUS_ID(card));
  254. QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
  255. }
  256. return 0;
  257. }
  258. static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
  259. enum qeth_ipa_cmds ipacmd)
  260. {
  261. struct qeth_ipa_cmd *cmd;
  262. struct qeth_cmd_buffer *iob;
  263. QETH_DBF_TEXT_(TRACE, 4, "L2sdv%x", ipacmd);
  264. iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
  265. cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
  266. cmd->data.setdelvlan.vlan_id = i;
  267. return qeth_send_ipa_cmd(card, iob,
  268. qeth_l2_send_setdelvlan_cb, NULL);
  269. }
  270. static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
  271. {
  272. struct qeth_vlan_vid *id;
  273. QETH_DBF_TEXT(TRACE, 3, "L2prcvln");
  274. spin_lock_bh(&card->vlanlock);
  275. list_for_each_entry(id, &card->vid_list, list) {
  276. if (clear)
  277. qeth_l2_send_setdelvlan(card, id->vid,
  278. IPA_CMD_DELVLAN);
  279. else
  280. qeth_l2_send_setdelvlan(card, id->vid,
  281. IPA_CMD_SETVLAN);
  282. }
  283. spin_unlock_bh(&card->vlanlock);
  284. }
  285. static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  286. {
  287. struct qeth_card *card = dev->ml_priv;
  288. struct qeth_vlan_vid *id;
  289. QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid);
  290. if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
  291. QETH_DBF_TEXT(TRACE, 3, "aidREC");
  292. return;
  293. }
  294. id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
  295. if (id) {
  296. id->vid = vid;
  297. qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
  298. spin_lock_bh(&card->vlanlock);
  299. list_add_tail(&id->list, &card->vid_list);
  300. spin_unlock_bh(&card->vlanlock);
  301. }
  302. }
  303. static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  304. {
  305. struct qeth_vlan_vid *id, *tmpid = NULL;
  306. struct qeth_card *card = dev->ml_priv;
  307. QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
  308. if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
  309. QETH_DBF_TEXT(TRACE, 3, "kidREC");
  310. return;
  311. }
  312. spin_lock_bh(&card->vlanlock);
  313. list_for_each_entry(id, &card->vid_list, list) {
  314. if (id->vid == vid) {
  315. list_del(&id->list);
  316. tmpid = id;
  317. break;
  318. }
  319. }
  320. spin_unlock_bh(&card->vlanlock);
  321. if (tmpid) {
  322. qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
  323. kfree(tmpid);
  324. }
  325. qeth_l2_set_multicast_list(card->dev);
  326. }
  327. static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
  328. {
  329. int rc = 0;
  330. QETH_DBF_TEXT(SETUP , 2, "stopcard");
  331. QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
  332. qeth_set_allowed_threads(card, 0, 1);
  333. if (card->read.state == CH_STATE_UP &&
  334. card->write.state == CH_STATE_UP &&
  335. (card->state == CARD_STATE_UP)) {
  336. if (recovery_mode &&
  337. card->info.type != QETH_CARD_TYPE_OSN) {
  338. qeth_l2_stop(card->dev);
  339. } else {
  340. rtnl_lock();
  341. dev_close(card->dev);
  342. rtnl_unlock();
  343. }
  344. if (!card->use_hard_stop ||
  345. recovery_mode) {
  346. __u8 *mac = &card->dev->dev_addr[0];
  347. rc = qeth_l2_send_delmac(card, mac);
  348. QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc);
  349. }
  350. card->state = CARD_STATE_SOFTSETUP;
  351. }
  352. if (card->state == CARD_STATE_SOFTSETUP) {
  353. qeth_l2_process_vlans(card, 1);
  354. if (!card->use_hard_stop ||
  355. recovery_mode)
  356. qeth_l2_del_all_mc(card);
  357. qeth_clear_ipacmd_list(card);
  358. card->state = CARD_STATE_HARDSETUP;
  359. }
  360. if (card->state == CARD_STATE_HARDSETUP) {
  361. qeth_qdio_clear_card(card, 0);
  362. qeth_clear_qdio_buffers(card);
  363. qeth_clear_working_pool_list(card);
  364. card->state = CARD_STATE_DOWN;
  365. }
  366. if (card->state == CARD_STATE_DOWN) {
  367. qeth_clear_cmd_buffers(&card->read);
  368. qeth_clear_cmd_buffers(&card->write);
  369. }
  370. card->use_hard_stop = 0;
  371. return rc;
  372. }
  373. static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
  374. struct qeth_qdio_buffer *buf, int index)
  375. {
  376. struct qdio_buffer_element *element;
  377. struct sk_buff *skb;
  378. struct qeth_hdr *hdr;
  379. int offset;
  380. unsigned int len;
  381. /* get first element of current buffer */
  382. element = (struct qdio_buffer_element *)&buf->buffer->element[0];
  383. offset = 0;
  384. if (card->options.performance_stats)
  385. card->perf_stats.bufs_rec++;
  386. while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
  387. &offset, &hdr))) {
  388. skb->dev = card->dev;
  389. /* is device UP ? */
  390. if (!(card->dev->flags & IFF_UP)) {
  391. dev_kfree_skb_any(skb);
  392. continue;
  393. }
  394. switch (hdr->hdr.l2.id) {
  395. case QETH_HEADER_TYPE_LAYER2:
  396. skb->pkt_type = PACKET_HOST;
  397. skb->protocol = eth_type_trans(skb, skb->dev);
  398. if (card->options.checksum_type == NO_CHECKSUMMING)
  399. skb->ip_summed = CHECKSUM_UNNECESSARY;
  400. else
  401. skb->ip_summed = CHECKSUM_NONE;
  402. if (skb->protocol == htons(ETH_P_802_2))
  403. *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
  404. len = skb->len;
  405. netif_rx(skb);
  406. break;
  407. case QETH_HEADER_TYPE_OSN:
  408. if (card->info.type == QETH_CARD_TYPE_OSN) {
  409. skb_push(skb, sizeof(struct qeth_hdr));
  410. skb_copy_to_linear_data(skb, hdr,
  411. sizeof(struct qeth_hdr));
  412. len = skb->len;
  413. card->osn_info.data_cb(skb);
  414. break;
  415. }
  416. /* else unknown */
  417. default:
  418. dev_kfree_skb_any(skb);
  419. QETH_DBF_TEXT(TRACE, 3, "inbunkno");
  420. QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
  421. continue;
  422. }
  423. card->dev->last_rx = jiffies;
  424. card->stats.rx_packets++;
  425. card->stats.rx_bytes += len;
  426. }
  427. }
  428. static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
  429. enum qeth_ipa_cmds ipacmd,
  430. int (*reply_cb) (struct qeth_card *,
  431. struct qeth_reply*,
  432. unsigned long))
  433. {
  434. struct qeth_ipa_cmd *cmd;
  435. struct qeth_cmd_buffer *iob;
  436. QETH_DBF_TEXT(TRACE, 2, "L2sdmac");
  437. iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
  438. cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
  439. cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
  440. memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
  441. return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
  442. }
  443. static int qeth_l2_send_setmac_cb(struct qeth_card *card,
  444. struct qeth_reply *reply,
  445. unsigned long data)
  446. {
  447. struct qeth_ipa_cmd *cmd;
  448. QETH_DBF_TEXT(TRACE, 2, "L2Smaccb");
  449. cmd = (struct qeth_ipa_cmd *) data;
  450. if (cmd->hdr.return_code) {
  451. QETH_DBF_TEXT_(TRACE, 2, "L2er%x", cmd->hdr.return_code);
  452. card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
  453. switch (cmd->hdr.return_code) {
  454. case IPA_RC_L2_DUP_MAC:
  455. case IPA_RC_L2_DUP_LAYER3_MAC:
  456. dev_warn(&card->gdev->dev,
  457. "MAC address "
  458. "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
  459. "already exists\n",
  460. card->dev->dev_addr[0], card->dev->dev_addr[1],
  461. card->dev->dev_addr[2], card->dev->dev_addr[3],
  462. card->dev->dev_addr[4], card->dev->dev_addr[5]);
  463. break;
  464. case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
  465. case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
  466. dev_warn(&card->gdev->dev,
  467. "MAC address "
  468. "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
  469. "is not authorized\n",
  470. card->dev->dev_addr[0], card->dev->dev_addr[1],
  471. card->dev->dev_addr[2], card->dev->dev_addr[3],
  472. card->dev->dev_addr[4], card->dev->dev_addr[5]);
  473. break;
  474. default:
  475. break;
  476. }
  477. cmd->hdr.return_code = -EIO;
  478. } else {
  479. card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
  480. memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
  481. OSA_ADDR_LEN);
  482. dev_info(&card->gdev->dev,
  483. "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
  484. "successfully registered on device %s\n",
  485. card->dev->dev_addr[0], card->dev->dev_addr[1],
  486. card->dev->dev_addr[2], card->dev->dev_addr[3],
  487. card->dev->dev_addr[4], card->dev->dev_addr[5],
  488. card->dev->name);
  489. }
  490. return 0;
  491. }
  492. static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
  493. {
  494. QETH_DBF_TEXT(TRACE, 2, "L2Setmac");
  495. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
  496. qeth_l2_send_setmac_cb);
  497. }
  498. static int qeth_l2_send_delmac_cb(struct qeth_card *card,
  499. struct qeth_reply *reply,
  500. unsigned long data)
  501. {
  502. struct qeth_ipa_cmd *cmd;
  503. QETH_DBF_TEXT(TRACE, 2, "L2Dmaccb");
  504. cmd = (struct qeth_ipa_cmd *) data;
  505. if (cmd->hdr.return_code) {
  506. QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
  507. cmd->hdr.return_code = -EIO;
  508. return 0;
  509. }
  510. card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
  511. return 0;
  512. }
  513. static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
  514. {
  515. QETH_DBF_TEXT(TRACE, 2, "L2Delmac");
  516. if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
  517. return 0;
  518. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
  519. qeth_l2_send_delmac_cb);
  520. }
  521. static int qeth_l2_request_initial_mac(struct qeth_card *card)
  522. {
  523. int rc = 0;
  524. char vendor_pre[] = {0x02, 0x00, 0x00};
  525. QETH_DBF_TEXT(SETUP, 2, "doL2init");
  526. QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
  527. rc = qeth_query_setadapterparms(card);
  528. if (rc) {
  529. QETH_DBF_MESSAGE(2, "could not query adapter parameters on "
  530. "device %s: x%x\n", CARD_BUS_ID(card), rc);
  531. }
  532. if ((card->info.type == QETH_CARD_TYPE_IQD) ||
  533. (card->info.guestlan)) {
  534. rc = qeth_setadpparms_change_macaddr(card);
  535. if (rc) {
  536. QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
  537. "device %s: x%x\n", CARD_BUS_ID(card), rc);
  538. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  539. return rc;
  540. }
  541. QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
  542. } else {
  543. random_ether_addr(card->dev->dev_addr);
  544. memcpy(card->dev->dev_addr, vendor_pre, 3);
  545. }
  546. return 0;
  547. }
  548. static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
  549. {
  550. struct sockaddr *addr = p;
  551. struct qeth_card *card = dev->ml_priv;
  552. int rc = 0;
  553. QETH_DBF_TEXT(TRACE, 3, "setmac");
  554. if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) {
  555. QETH_DBF_TEXT(TRACE, 3, "setmcINV");
  556. return -EOPNOTSUPP;
  557. }
  558. if (card->info.type == QETH_CARD_TYPE_OSN) {
  559. QETH_DBF_TEXT(TRACE, 3, "setmcOSN");
  560. return -EOPNOTSUPP;
  561. }
  562. QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card));
  563. QETH_DBF_HEX(TRACE, 3, addr->sa_data, OSA_ADDR_LEN);
  564. if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
  565. QETH_DBF_TEXT(TRACE, 3, "setmcREC");
  566. return -ERESTARTSYS;
  567. }
  568. rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
  569. if (!rc)
  570. rc = qeth_l2_send_setmac(card, addr->sa_data);
  571. return rc;
  572. }
  573. static void qeth_l2_set_multicast_list(struct net_device *dev)
  574. {
  575. struct qeth_card *card = dev->ml_priv;
  576. struct dev_addr_list *dm;
  577. struct netdev_hw_addr *ha;
  578. if (card->info.type == QETH_CARD_TYPE_OSN)
  579. return ;
  580. QETH_DBF_TEXT(TRACE, 3, "setmulti");
  581. if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
  582. (card->state != CARD_STATE_UP))
  583. return;
  584. qeth_l2_del_all_mc(card);
  585. spin_lock_bh(&card->mclock);
  586. for (dm = dev->mc_list; dm; dm = dm->next)
  587. qeth_l2_add_mc(card, dm->da_addr, 0);
  588. list_for_each_entry(ha, &dev->uc.list, list)
  589. qeth_l2_add_mc(card, ha->addr, 1);
  590. spin_unlock_bh(&card->mclock);
  591. if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
  592. return;
  593. qeth_setadp_promisc_mode(card);
  594. }
  595. static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
  596. {
  597. int rc;
  598. struct qeth_hdr *hdr = NULL;
  599. int elements = 0;
  600. struct qeth_card *card = dev->ml_priv;
  601. struct sk_buff *new_skb = skb;
  602. int ipv = qeth_get_ip_version(skb);
  603. int cast_type = qeth_get_cast_type(card, skb);
  604. struct qeth_qdio_out_q *queue = card->qdio.out_qs
  605. [qeth_get_priority_queue(card, skb, ipv, cast_type)];
  606. int tx_bytes = skb->len;
  607. int data_offset = -1;
  608. int elements_needed = 0;
  609. int hd_len = 0;
  610. if ((card->state != CARD_STATE_UP) || !card->lan_online) {
  611. card->stats.tx_carrier_errors++;
  612. goto tx_drop;
  613. }
  614. if ((card->info.type == QETH_CARD_TYPE_OSN) &&
  615. (skb->protocol == htons(ETH_P_IPV6)))
  616. goto tx_drop;
  617. if (card->options.performance_stats) {
  618. card->perf_stats.outbound_cnt++;
  619. card->perf_stats.outbound_start_time = qeth_get_micros();
  620. }
  621. netif_stop_queue(dev);
  622. if (card->info.type == QETH_CARD_TYPE_OSN)
  623. hdr = (struct qeth_hdr *)skb->data;
  624. else {
  625. if (card->info.type == QETH_CARD_TYPE_IQD) {
  626. new_skb = skb;
  627. data_offset = ETH_HLEN;
  628. hd_len = ETH_HLEN;
  629. hdr = kmem_cache_alloc(qeth_core_header_cache,
  630. GFP_ATOMIC);
  631. if (!hdr)
  632. goto tx_drop;
  633. elements_needed++;
  634. skb_reset_mac_header(new_skb);
  635. qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
  636. hdr->hdr.l2.pkt_length = new_skb->len;
  637. memcpy(((char *)hdr) + sizeof(struct qeth_hdr),
  638. skb_mac_header(new_skb), ETH_HLEN);
  639. } else {
  640. /* create a clone with writeable headroom */
  641. new_skb = skb_realloc_headroom(skb,
  642. sizeof(struct qeth_hdr));
  643. if (!new_skb)
  644. goto tx_drop;
  645. hdr = (struct qeth_hdr *)skb_push(new_skb,
  646. sizeof(struct qeth_hdr));
  647. skb_set_mac_header(new_skb, sizeof(struct qeth_hdr));
  648. qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
  649. }
  650. }
  651. elements = qeth_get_elements_no(card, (void *)hdr, new_skb,
  652. elements_needed);
  653. if (!elements) {
  654. if (data_offset >= 0)
  655. kmem_cache_free(qeth_core_header_cache, hdr);
  656. goto tx_drop;
  657. }
  658. if (card->info.type != QETH_CARD_TYPE_IQD)
  659. rc = qeth_do_send_packet(card, queue, new_skb, hdr,
  660. elements);
  661. else
  662. rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
  663. elements, data_offset, hd_len);
  664. if (!rc) {
  665. card->stats.tx_packets++;
  666. card->stats.tx_bytes += tx_bytes;
  667. if (new_skb != skb)
  668. dev_kfree_skb_any(skb);
  669. rc = NETDEV_TX_OK;
  670. } else {
  671. if (data_offset >= 0)
  672. kmem_cache_free(qeth_core_header_cache, hdr);
  673. if (rc == -EBUSY) {
  674. if (new_skb != skb)
  675. dev_kfree_skb_any(new_skb);
  676. return NETDEV_TX_BUSY;
  677. } else
  678. goto tx_drop;
  679. }
  680. netif_wake_queue(dev);
  681. if (card->options.performance_stats)
  682. card->perf_stats.outbound_time += qeth_get_micros() -
  683. card->perf_stats.outbound_start_time;
  684. return rc;
  685. tx_drop:
  686. card->stats.tx_dropped++;
  687. card->stats.tx_errors++;
  688. if ((new_skb != skb) && new_skb)
  689. dev_kfree_skb_any(new_skb);
  690. dev_kfree_skb_any(skb);
  691. netif_wake_queue(dev);
  692. return NETDEV_TX_OK;
  693. }
  694. static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
  695. unsigned int qdio_err, unsigned int queue,
  696. int first_element, int count, unsigned long card_ptr)
  697. {
  698. struct net_device *net_dev;
  699. struct qeth_card *card;
  700. struct qeth_qdio_buffer *buffer;
  701. int index;
  702. int i;
  703. card = (struct qeth_card *) card_ptr;
  704. net_dev = card->dev;
  705. if (card->options.performance_stats) {
  706. card->perf_stats.inbound_cnt++;
  707. card->perf_stats.inbound_start_time = qeth_get_micros();
  708. }
  709. if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
  710. QETH_DBF_TEXT(TRACE, 1, "qdinchk");
  711. QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
  712. QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
  713. count);
  714. QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
  715. qeth_schedule_recovery(card);
  716. return;
  717. }
  718. for (i = first_element; i < (first_element + count); ++i) {
  719. index = i % QDIO_MAX_BUFFERS_PER_Q;
  720. buffer = &card->qdio.in_q->bufs[index];
  721. if (!(qdio_err &&
  722. qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr")))
  723. qeth_l2_process_inbound_buffer(card, buffer, index);
  724. /* clear buffer and give back to hardware */
  725. qeth_put_buffer_pool_entry(card, buffer->pool_entry);
  726. qeth_queue_input_buffer(card, index);
  727. }
  728. if (card->options.performance_stats)
  729. card->perf_stats.inbound_time += qeth_get_micros() -
  730. card->perf_stats.inbound_start_time;
  731. }
  732. static int qeth_l2_open(struct net_device *dev)
  733. {
  734. struct qeth_card *card = dev->ml_priv;
  735. QETH_DBF_TEXT(TRACE, 4, "qethopen");
  736. if (card->state != CARD_STATE_SOFTSETUP)
  737. return -ENODEV;
  738. if ((card->info.type != QETH_CARD_TYPE_OSN) &&
  739. (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
  740. QETH_DBF_TEXT(TRACE, 4, "nomacadr");
  741. return -EPERM;
  742. }
  743. card->data.state = CH_STATE_UP;
  744. card->state = CARD_STATE_UP;
  745. netif_start_queue(dev);
  746. if (!card->lan_online && netif_carrier_ok(dev))
  747. netif_carrier_off(dev);
  748. return 0;
  749. }
  750. static int qeth_l2_stop(struct net_device *dev)
  751. {
  752. struct qeth_card *card = dev->ml_priv;
  753. QETH_DBF_TEXT(TRACE, 4, "qethstop");
  754. netif_tx_disable(dev);
  755. if (card->state == CARD_STATE_UP)
  756. card->state = CARD_STATE_SOFTSETUP;
  757. return 0;
  758. }
  759. static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
  760. {
  761. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  762. INIT_LIST_HEAD(&card->vid_list);
  763. INIT_LIST_HEAD(&card->mc_list);
  764. card->options.layer2 = 1;
  765. card->discipline.input_handler = (qdio_handler_t *)
  766. qeth_l2_qdio_input_handler;
  767. card->discipline.output_handler = (qdio_handler_t *)
  768. qeth_qdio_output_handler;
  769. card->discipline.recover = qeth_l2_recover;
  770. return 0;
  771. }
  772. static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
  773. {
  774. struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
  775. qeth_set_allowed_threads(card, 0, 1);
  776. wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
  777. if (cgdev->state == CCWGROUP_ONLINE) {
  778. card->use_hard_stop = 1;
  779. qeth_l2_set_offline(cgdev);
  780. }
  781. if (card->dev) {
  782. unregister_netdev(card->dev);
  783. card->dev = NULL;
  784. }
  785. qeth_l2_del_all_mc(card);
  786. return;
  787. }
  788. static struct ethtool_ops qeth_l2_ethtool_ops = {
  789. .get_link = ethtool_op_get_link,
  790. .get_strings = qeth_core_get_strings,
  791. .get_ethtool_stats = qeth_core_get_ethtool_stats,
  792. .get_stats_count = qeth_core_get_stats_count,
  793. .get_drvinfo = qeth_core_get_drvinfo,
  794. .get_settings = qeth_core_ethtool_get_settings,
  795. };
  796. static struct ethtool_ops qeth_l2_osn_ops = {
  797. .get_strings = qeth_core_get_strings,
  798. .get_ethtool_stats = qeth_core_get_ethtool_stats,
  799. .get_stats_count = qeth_core_get_stats_count,
  800. .get_drvinfo = qeth_core_get_drvinfo,
  801. };
  802. static const struct net_device_ops qeth_l2_netdev_ops = {
  803. .ndo_open = qeth_l2_open,
  804. .ndo_stop = qeth_l2_stop,
  805. .ndo_get_stats = qeth_get_stats,
  806. .ndo_start_xmit = qeth_l2_hard_start_xmit,
  807. .ndo_validate_addr = eth_validate_addr,
  808. .ndo_set_multicast_list = qeth_l2_set_multicast_list,
  809. .ndo_do_ioctl = qeth_l2_do_ioctl,
  810. .ndo_set_mac_address = qeth_l2_set_mac_address,
  811. .ndo_change_mtu = qeth_change_mtu,
  812. .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
  813. .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
  814. .ndo_tx_timeout = qeth_tx_timeout,
  815. };
  816. static int qeth_l2_setup_netdev(struct qeth_card *card)
  817. {
  818. switch (card->info.type) {
  819. case QETH_CARD_TYPE_OSAE:
  820. card->dev = alloc_etherdev(0);
  821. break;
  822. case QETH_CARD_TYPE_IQD:
  823. card->dev = alloc_netdev(0, "hsi%d", ether_setup);
  824. break;
  825. case QETH_CARD_TYPE_OSN:
  826. card->dev = alloc_netdev(0, "osn%d", ether_setup);
  827. card->dev->flags |= IFF_NOARP;
  828. break;
  829. default:
  830. card->dev = alloc_etherdev(0);
  831. }
  832. if (!card->dev)
  833. return -ENODEV;
  834. card->dev->ml_priv = card;
  835. card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
  836. card->dev->mtu = card->info.initial_mtu;
  837. card->dev->netdev_ops = &qeth_l2_netdev_ops;
  838. if (card->info.type != QETH_CARD_TYPE_OSN)
  839. SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
  840. else
  841. SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
  842. card->dev->features |= NETIF_F_HW_VLAN_FILTER;
  843. card->info.broadcast_capable = 1;
  844. qeth_l2_request_initial_mac(card);
  845. SET_NETDEV_DEV(card->dev, &card->gdev->dev);
  846. return register_netdev(card->dev);
  847. }
  848. static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
  849. {
  850. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  851. int rc = 0;
  852. enum qeth_card_states recover_flag;
  853. BUG_ON(!card);
  854. QETH_DBF_TEXT(SETUP, 2, "setonlin");
  855. QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
  856. qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
  857. recover_flag = card->state;
  858. rc = ccw_device_set_online(CARD_RDEV(card));
  859. if (rc) {
  860. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  861. return -EIO;
  862. }
  863. rc = ccw_device_set_online(CARD_WDEV(card));
  864. if (rc) {
  865. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  866. return -EIO;
  867. }
  868. rc = ccw_device_set_online(CARD_DDEV(card));
  869. if (rc) {
  870. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  871. return -EIO;
  872. }
  873. rc = qeth_core_hardsetup_card(card);
  874. if (rc) {
  875. QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
  876. goto out_remove;
  877. }
  878. if (!card->dev && qeth_l2_setup_netdev(card))
  879. goto out_remove;
  880. if (card->info.type != QETH_CARD_TYPE_OSN)
  881. qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
  882. card->state = CARD_STATE_HARDSETUP;
  883. qeth_print_status_message(card);
  884. /* softsetup */
  885. QETH_DBF_TEXT(SETUP, 2, "softsetp");
  886. rc = qeth_send_startlan(card);
  887. if (rc) {
  888. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  889. if (rc == 0xe080) {
  890. dev_warn(&card->gdev->dev,
  891. "The LAN is offline\n");
  892. card->lan_online = 0;
  893. return 0;
  894. }
  895. goto out_remove;
  896. } else
  897. card->lan_online = 1;
  898. if (card->info.type != QETH_CARD_TYPE_OSN) {
  899. qeth_set_large_send(card, card->options.large_send);
  900. qeth_l2_process_vlans(card, 0);
  901. }
  902. netif_tx_disable(card->dev);
  903. rc = qeth_init_qdio_queues(card);
  904. if (rc) {
  905. QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
  906. goto out_remove;
  907. }
  908. card->state = CARD_STATE_SOFTSETUP;
  909. netif_carrier_on(card->dev);
  910. qeth_set_allowed_threads(card, 0xffffffff, 0);
  911. if (recover_flag == CARD_STATE_RECOVER) {
  912. if (recovery_mode &&
  913. card->info.type != QETH_CARD_TYPE_OSN) {
  914. qeth_l2_open(card->dev);
  915. } else {
  916. rtnl_lock();
  917. dev_open(card->dev);
  918. rtnl_unlock();
  919. }
  920. /* this also sets saved unicast addresses */
  921. qeth_l2_set_multicast_list(card->dev);
  922. }
  923. /* let user_space know that device is online */
  924. kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
  925. return 0;
  926. out_remove:
  927. card->use_hard_stop = 1;
  928. qeth_l2_stop_card(card, 0);
  929. ccw_device_set_offline(CARD_DDEV(card));
  930. ccw_device_set_offline(CARD_WDEV(card));
  931. ccw_device_set_offline(CARD_RDEV(card));
  932. if (recover_flag == CARD_STATE_RECOVER)
  933. card->state = CARD_STATE_RECOVER;
  934. else
  935. card->state = CARD_STATE_DOWN;
  936. return -ENODEV;
  937. }
  938. static int qeth_l2_set_online(struct ccwgroup_device *gdev)
  939. {
  940. return __qeth_l2_set_online(gdev, 0);
  941. }
  942. static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
  943. int recovery_mode)
  944. {
  945. struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
  946. int rc = 0, rc2 = 0, rc3 = 0;
  947. enum qeth_card_states recover_flag;
  948. QETH_DBF_TEXT(SETUP, 3, "setoffl");
  949. QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
  950. if (card->dev && netif_carrier_ok(card->dev))
  951. netif_carrier_off(card->dev);
  952. recover_flag = card->state;
  953. qeth_l2_stop_card(card, recovery_mode);
  954. rc = ccw_device_set_offline(CARD_DDEV(card));
  955. rc2 = ccw_device_set_offline(CARD_WDEV(card));
  956. rc3 = ccw_device_set_offline(CARD_RDEV(card));
  957. if (!rc)
  958. rc = (rc2) ? rc2 : rc3;
  959. if (rc)
  960. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  961. if (recover_flag == CARD_STATE_UP)
  962. card->state = CARD_STATE_RECOVER;
  963. /* let user_space know that device is offline */
  964. kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
  965. return 0;
  966. }
  967. static int qeth_l2_set_offline(struct ccwgroup_device *cgdev)
  968. {
  969. return __qeth_l2_set_offline(cgdev, 0);
  970. }
  971. static int qeth_l2_recover(void *ptr)
  972. {
  973. struct qeth_card *card;
  974. int rc = 0;
  975. card = (struct qeth_card *) ptr;
  976. QETH_DBF_TEXT(TRACE, 2, "recover1");
  977. QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
  978. if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
  979. return 0;
  980. QETH_DBF_TEXT(TRACE, 2, "recover2");
  981. dev_warn(&card->gdev->dev,
  982. "A recovery process has been started for the device\n");
  983. card->use_hard_stop = 1;
  984. __qeth_l2_set_offline(card->gdev, 1);
  985. rc = __qeth_l2_set_online(card->gdev, 1);
  986. /* don't run another scheduled recovery */
  987. qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
  988. qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
  989. if (!rc)
  990. dev_info(&card->gdev->dev,
  991. "Device successfully recovered!\n");
  992. else {
  993. if (card->dev) {
  994. rtnl_lock();
  995. dev_close(card->dev);
  996. rtnl_unlock();
  997. }
  998. dev_warn(&card->gdev->dev, "The qeth device driver "
  999. "failed to recover an error on the device\n");
  1000. }
  1001. return 0;
  1002. }
  1003. static int __init qeth_l2_init(void)
  1004. {
  1005. pr_info("register layer 2 discipline\n");
  1006. return 0;
  1007. }
  1008. static void __exit qeth_l2_exit(void)
  1009. {
  1010. pr_info("unregister layer 2 discipline\n");
  1011. }
  1012. static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
  1013. {
  1014. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  1015. qeth_qdio_clear_card(card, 0);
  1016. qeth_clear_qdio_buffers(card);
  1017. }
  1018. static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
  1019. {
  1020. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  1021. if (card->dev)
  1022. netif_device_detach(card->dev);
  1023. qeth_set_allowed_threads(card, 0, 1);
  1024. wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
  1025. if (gdev->state == CCWGROUP_OFFLINE)
  1026. return 0;
  1027. if (card->state == CARD_STATE_UP) {
  1028. card->use_hard_stop = 1;
  1029. __qeth_l2_set_offline(card->gdev, 1);
  1030. } else
  1031. __qeth_l2_set_offline(card->gdev, 0);
  1032. return 0;
  1033. }
  1034. static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
  1035. {
  1036. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  1037. int rc = 0;
  1038. if (gdev->state == CCWGROUP_OFFLINE)
  1039. goto out;
  1040. if (card->state == CARD_STATE_RECOVER) {
  1041. rc = __qeth_l2_set_online(card->gdev, 1);
  1042. if (rc) {
  1043. if (card->dev) {
  1044. rtnl_lock();
  1045. dev_close(card->dev);
  1046. rtnl_unlock();
  1047. }
  1048. }
  1049. } else
  1050. rc = __qeth_l2_set_online(card->gdev, 0);
  1051. out:
  1052. qeth_set_allowed_threads(card, 0xffffffff, 0);
  1053. if (card->dev)
  1054. netif_device_attach(card->dev);
  1055. if (rc)
  1056. dev_warn(&card->gdev->dev, "The qeth device driver "
  1057. "failed to recover an error on the device\n");
  1058. return rc;
  1059. }
  1060. struct ccwgroup_driver qeth_l2_ccwgroup_driver = {
  1061. .probe = qeth_l2_probe_device,
  1062. .remove = qeth_l2_remove_device,
  1063. .set_online = qeth_l2_set_online,
  1064. .set_offline = qeth_l2_set_offline,
  1065. .shutdown = qeth_l2_shutdown,
  1066. .freeze = qeth_l2_pm_suspend,
  1067. .thaw = qeth_l2_pm_resume,
  1068. .restore = qeth_l2_pm_resume,
  1069. };
  1070. EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver);
  1071. static int qeth_osn_send_control_data(struct qeth_card *card, int len,
  1072. struct qeth_cmd_buffer *iob)
  1073. {
  1074. unsigned long flags;
  1075. int rc = 0;
  1076. QETH_DBF_TEXT(TRACE, 5, "osndctrd");
  1077. wait_event(card->wait_q,
  1078. atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
  1079. qeth_prepare_control_data(card, len, iob);
  1080. QETH_DBF_TEXT(TRACE, 6, "osnoirqp");
  1081. spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
  1082. rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
  1083. (addr_t) iob, 0, 0);
  1084. spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
  1085. if (rc) {
  1086. QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
  1087. "ccw_device_start rc = %i\n", rc);
  1088. QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
  1089. qeth_release_buffer(iob->channel, iob);
  1090. atomic_set(&card->write.irq_pending, 0);
  1091. wake_up(&card->wait_q);
  1092. }
  1093. return rc;
  1094. }
  1095. static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
  1096. struct qeth_cmd_buffer *iob, int data_len)
  1097. {
  1098. u16 s1, s2;
  1099. QETH_DBF_TEXT(TRACE, 4, "osndipa");
  1100. qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
  1101. s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
  1102. s2 = (u16)data_len;
  1103. memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
  1104. memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
  1105. memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
  1106. memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
  1107. return qeth_osn_send_control_data(card, s1, iob);
  1108. }
  1109. int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
  1110. {
  1111. struct qeth_cmd_buffer *iob;
  1112. struct qeth_card *card;
  1113. int rc;
  1114. QETH_DBF_TEXT(TRACE, 2, "osnsdmc");
  1115. if (!dev)
  1116. return -ENODEV;
  1117. card = dev->ml_priv;
  1118. if (!card)
  1119. return -ENODEV;
  1120. if ((card->state != CARD_STATE_UP) &&
  1121. (card->state != CARD_STATE_SOFTSETUP))
  1122. return -ENODEV;
  1123. iob = qeth_wait_for_buffer(&card->write);
  1124. memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
  1125. rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
  1126. return rc;
  1127. }
  1128. EXPORT_SYMBOL(qeth_osn_assist);
  1129. int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
  1130. int (*assist_cb)(struct net_device *, void *),
  1131. int (*data_cb)(struct sk_buff *))
  1132. {
  1133. struct qeth_card *card;
  1134. QETH_DBF_TEXT(TRACE, 2, "osnreg");
  1135. *dev = qeth_l2_netdev_by_devno(read_dev_no);
  1136. if (*dev == NULL)
  1137. return -ENODEV;
  1138. card = (*dev)->ml_priv;
  1139. if (!card)
  1140. return -ENODEV;
  1141. if ((assist_cb == NULL) || (data_cb == NULL))
  1142. return -EINVAL;
  1143. card->osn_info.assist_cb = assist_cb;
  1144. card->osn_info.data_cb = data_cb;
  1145. return 0;
  1146. }
  1147. EXPORT_SYMBOL(qeth_osn_register);
  1148. void qeth_osn_deregister(struct net_device *dev)
  1149. {
  1150. struct qeth_card *card;
  1151. QETH_DBF_TEXT(TRACE, 2, "osndereg");
  1152. if (!dev)
  1153. return;
  1154. card = dev->ml_priv;
  1155. if (!card)
  1156. return;
  1157. card->osn_info.assist_cb = NULL;
  1158. card->osn_info.data_cb = NULL;
  1159. return;
  1160. }
  1161. EXPORT_SYMBOL(qeth_osn_deregister);
  1162. module_init(qeth_l2_init);
  1163. module_exit(qeth_l2_exit);
  1164. MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
  1165. MODULE_DESCRIPTION("qeth layer 2 discipline");
  1166. MODULE_LICENSE("GPL");