qeth_l2_main.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272
  1. /*
  2. * drivers/s390/net/qeth_l2_main.c
  3. *
  4. * Copyright IBM Corp. 2007
  5. * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
  6. * Frank Pavlic <fpavlic@de.ibm.com>,
  7. * Thomas Spatzier <tspat@de.ibm.com>,
  8. * Frank Blaschka <frank.blaschka@de.ibm.com>
  9. */
  10. #define KMSG_COMPONENT "qeth"
  11. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/moduleparam.h>
  14. #include <linux/string.h>
  15. #include <linux/errno.h>
  16. #include <linux/kernel.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/mii.h>
  19. #include <linux/ip.h>
  20. #include <asm/s390_rdev.h>
  21. #include "qeth_core.h"
  22. #include "qeth_core_offl.h"
  23. static int qeth_l2_set_offline(struct ccwgroup_device *);
  24. static int qeth_l2_stop(struct net_device *);
  25. static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
  26. static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
  27. enum qeth_ipa_cmds,
  28. int (*reply_cb) (struct qeth_card *,
  29. struct qeth_reply*,
  30. unsigned long));
  31. static void qeth_l2_set_multicast_list(struct net_device *);
  32. static int qeth_l2_recover(void *);
  33. static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  34. {
  35. struct qeth_card *card = dev->ml_priv;
  36. struct mii_ioctl_data *mii_data;
  37. int rc = 0;
  38. if (!card)
  39. return -ENODEV;
  40. if ((card->state != CARD_STATE_UP) &&
  41. (card->state != CARD_STATE_SOFTSETUP))
  42. return -ENODEV;
  43. if (card->info.type == QETH_CARD_TYPE_OSN)
  44. return -EPERM;
  45. switch (cmd) {
  46. case SIOC_QETH_ADP_SET_SNMP_CONTROL:
  47. rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
  48. break;
  49. case SIOC_QETH_GET_CARD_TYPE:
  50. if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
  51. !card->info.guestlan)
  52. return 1;
  53. return 0;
  54. break;
  55. case SIOCGMIIPHY:
  56. mii_data = if_mii(rq);
  57. mii_data->phy_id = 0;
  58. break;
  59. case SIOCGMIIREG:
  60. mii_data = if_mii(rq);
  61. if (mii_data->phy_id != 0)
  62. rc = -EINVAL;
  63. else
  64. mii_data->val_out = qeth_mdio_read(dev,
  65. mii_data->phy_id, mii_data->reg_num);
  66. break;
  67. default:
  68. rc = -EOPNOTSUPP;
  69. }
  70. if (rc)
  71. QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
  72. return rc;
  73. }
  74. static int qeth_l2_verify_dev(struct net_device *dev)
  75. {
  76. struct qeth_card *card;
  77. unsigned long flags;
  78. int rc = 0;
  79. read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
  80. list_for_each_entry(card, &qeth_core_card_list.list, list) {
  81. if (card->dev == dev) {
  82. rc = QETH_REAL_CARD;
  83. break;
  84. }
  85. }
  86. read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
  87. return rc;
  88. }
  89. static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
  90. {
  91. struct qeth_card *card;
  92. struct net_device *ndev;
  93. __u16 temp_dev_no;
  94. unsigned long flags;
  95. struct ccw_dev_id read_devid;
  96. ndev = NULL;
  97. memcpy(&temp_dev_no, read_dev_no, 2);
  98. read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
  99. list_for_each_entry(card, &qeth_core_card_list.list, list) {
  100. ccw_device_get_id(CARD_RDEV(card), &read_devid);
  101. if (read_devid.devno == temp_dev_no) {
  102. ndev = card->dev;
  103. break;
  104. }
  105. }
  106. read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
  107. return ndev;
  108. }
  109. static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
  110. struct qeth_reply *reply,
  111. unsigned long data)
  112. {
  113. struct qeth_ipa_cmd *cmd;
  114. __u8 *mac;
  115. QETH_DBF_TEXT(TRACE, 2, "L2Sgmacb");
  116. cmd = (struct qeth_ipa_cmd *) data;
  117. mac = &cmd->data.setdelmac.mac[0];
  118. /* MAC already registered, needed in couple/uncouple case */
  119. if (cmd->hdr.return_code == 0x2005) {
  120. QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
  121. mac, QETH_CARD_IFNAME(card));
  122. cmd->hdr.return_code = 0;
  123. }
  124. if (cmd->hdr.return_code)
  125. QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
  126. mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
  127. return 0;
  128. }
  129. static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
  130. {
  131. QETH_DBF_TEXT(TRACE, 2, "L2Sgmac");
  132. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
  133. qeth_l2_send_setgroupmac_cb);
  134. }
  135. static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
  136. struct qeth_reply *reply,
  137. unsigned long data)
  138. {
  139. struct qeth_ipa_cmd *cmd;
  140. __u8 *mac;
  141. QETH_DBF_TEXT(TRACE, 2, "L2Dgmacb");
  142. cmd = (struct qeth_ipa_cmd *) data;
  143. mac = &cmd->data.setdelmac.mac[0];
  144. if (cmd->hdr.return_code)
  145. QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n",
  146. mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
  147. return 0;
  148. }
  149. static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
  150. {
  151. QETH_DBF_TEXT(TRACE, 2, "L2Dgmac");
  152. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
  153. qeth_l2_send_delgroupmac_cb);
  154. }
  155. static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
  156. {
  157. struct qeth_mc_mac *mc;
  158. int rc;
  159. mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC);
  160. if (!mc)
  161. return;
  162. memcpy(mc->mc_addr, mac, OSA_ADDR_LEN);
  163. mc->mc_addrlen = OSA_ADDR_LEN;
  164. mc->is_vmac = vmac;
  165. if (vmac) {
  166. rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
  167. NULL);
  168. } else {
  169. rc = qeth_l2_send_setgroupmac(card, mac);
  170. }
  171. if (!rc)
  172. list_add_tail(&mc->list, &card->mc_list);
  173. else
  174. kfree(mc);
  175. }
  176. static void qeth_l2_del_all_mc(struct qeth_card *card)
  177. {
  178. struct qeth_mc_mac *mc, *tmp;
  179. spin_lock_bh(&card->mclock);
  180. list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
  181. if (mc->is_vmac)
  182. qeth_l2_send_setdelmac(card, mc->mc_addr,
  183. IPA_CMD_DELVMAC, NULL);
  184. else
  185. qeth_l2_send_delgroupmac(card, mc->mc_addr);
  186. list_del(&mc->list);
  187. kfree(mc);
  188. }
  189. spin_unlock_bh(&card->mclock);
  190. }
  191. static void qeth_l2_get_packet_type(struct qeth_card *card,
  192. struct qeth_hdr *hdr, struct sk_buff *skb)
  193. {
  194. __u16 hdr_mac;
  195. if (!memcmp(skb->data + QETH_HEADER_SIZE,
  196. skb->dev->broadcast, 6)) {
  197. /* broadcast? */
  198. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
  199. return;
  200. }
  201. hdr_mac = *((__u16 *)skb->data);
  202. /* tr multicast? */
  203. switch (card->info.link_type) {
  204. case QETH_LINK_TYPE_HSTR:
  205. case QETH_LINK_TYPE_LANE_TR:
  206. if ((hdr_mac == QETH_TR_MAC_NC) ||
  207. (hdr_mac == QETH_TR_MAC_C))
  208. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
  209. else
  210. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
  211. break;
  212. /* eth or so multicast? */
  213. default:
  214. if ((hdr_mac == QETH_ETH_MAC_V4) ||
  215. (hdr_mac == QETH_ETH_MAC_V6))
  216. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
  217. else
  218. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
  219. }
  220. }
  221. static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
  222. struct sk_buff *skb, int ipv, int cast_type)
  223. {
  224. struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
  225. memset(hdr, 0, sizeof(struct qeth_hdr));
  226. hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
  227. /* set byte byte 3 to casting flags */
  228. if (cast_type == RTN_MULTICAST)
  229. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
  230. else if (cast_type == RTN_BROADCAST)
  231. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
  232. else
  233. qeth_l2_get_packet_type(card, hdr, skb);
  234. hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
  235. /* VSWITCH relies on the VLAN
  236. * information to be present in
  237. * the QDIO header */
  238. if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
  239. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
  240. hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
  241. }
  242. }
  243. static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
  244. struct qeth_reply *reply, unsigned long data)
  245. {
  246. struct qeth_ipa_cmd *cmd;
  247. QETH_DBF_TEXT(TRACE, 2, "L2sdvcb");
  248. cmd = (struct qeth_ipa_cmd *) data;
  249. if (cmd->hdr.return_code) {
  250. QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. "
  251. "Continuing\n", cmd->data.setdelvlan.vlan_id,
  252. QETH_CARD_IFNAME(card), cmd->hdr.return_code);
  253. QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command);
  254. QETH_DBF_TEXT_(TRACE, 2, "L2%s", CARD_BUS_ID(card));
  255. QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
  256. }
  257. return 0;
  258. }
  259. static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
  260. enum qeth_ipa_cmds ipacmd)
  261. {
  262. struct qeth_ipa_cmd *cmd;
  263. struct qeth_cmd_buffer *iob;
  264. QETH_DBF_TEXT_(TRACE, 4, "L2sdv%x", ipacmd);
  265. iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
  266. cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
  267. cmd->data.setdelvlan.vlan_id = i;
  268. return qeth_send_ipa_cmd(card, iob,
  269. qeth_l2_send_setdelvlan_cb, NULL);
  270. }
  271. static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
  272. {
  273. struct qeth_vlan_vid *id;
  274. QETH_DBF_TEXT(TRACE, 3, "L2prcvln");
  275. spin_lock_bh(&card->vlanlock);
  276. list_for_each_entry(id, &card->vid_list, list) {
  277. if (clear)
  278. qeth_l2_send_setdelvlan(card, id->vid,
  279. IPA_CMD_DELVLAN);
  280. else
  281. qeth_l2_send_setdelvlan(card, id->vid,
  282. IPA_CMD_SETVLAN);
  283. }
  284. spin_unlock_bh(&card->vlanlock);
  285. }
  286. static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  287. {
  288. struct qeth_card *card = dev->ml_priv;
  289. struct qeth_vlan_vid *id;
  290. QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid);
  291. id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
  292. if (id) {
  293. id->vid = vid;
  294. qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
  295. spin_lock_bh(&card->vlanlock);
  296. list_add_tail(&id->list, &card->vid_list);
  297. spin_unlock_bh(&card->vlanlock);
  298. }
  299. }
  300. static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  301. {
  302. struct qeth_vlan_vid *id, *tmpid = NULL;
  303. struct qeth_card *card = dev->ml_priv;
  304. QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
  305. spin_lock_bh(&card->vlanlock);
  306. list_for_each_entry(id, &card->vid_list, list) {
  307. if (id->vid == vid) {
  308. list_del(&id->list);
  309. tmpid = id;
  310. break;
  311. }
  312. }
  313. spin_unlock_bh(&card->vlanlock);
  314. if (tmpid) {
  315. qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
  316. kfree(tmpid);
  317. }
  318. qeth_l2_set_multicast_list(card->dev);
  319. }
  320. static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
  321. {
  322. int rc = 0;
  323. QETH_DBF_TEXT(SETUP , 2, "stopcard");
  324. QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
  325. qeth_set_allowed_threads(card, 0, 1);
  326. if (card->read.state == CH_STATE_UP &&
  327. card->write.state == CH_STATE_UP &&
  328. (card->state == CARD_STATE_UP)) {
  329. if (recovery_mode &&
  330. card->info.type != QETH_CARD_TYPE_OSN) {
  331. qeth_l2_stop(card->dev);
  332. } else {
  333. rtnl_lock();
  334. dev_close(card->dev);
  335. rtnl_unlock();
  336. }
  337. if (!card->use_hard_stop) {
  338. __u8 *mac = &card->dev->dev_addr[0];
  339. rc = qeth_l2_send_delmac(card, mac);
  340. QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc);
  341. }
  342. card->state = CARD_STATE_SOFTSETUP;
  343. }
  344. if (card->state == CARD_STATE_SOFTSETUP) {
  345. qeth_l2_process_vlans(card, 1);
  346. if (!card->use_hard_stop)
  347. qeth_l2_del_all_mc(card);
  348. qeth_clear_ipacmd_list(card);
  349. card->state = CARD_STATE_HARDSETUP;
  350. }
  351. if (card->state == CARD_STATE_HARDSETUP) {
  352. qeth_qdio_clear_card(card, 0);
  353. qeth_clear_qdio_buffers(card);
  354. qeth_clear_working_pool_list(card);
  355. card->state = CARD_STATE_DOWN;
  356. }
  357. if (card->state == CARD_STATE_DOWN) {
  358. qeth_clear_cmd_buffers(&card->read);
  359. qeth_clear_cmd_buffers(&card->write);
  360. }
  361. card->use_hard_stop = 0;
  362. return rc;
  363. }
  364. static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
  365. struct qeth_qdio_buffer *buf, int index)
  366. {
  367. struct qdio_buffer_element *element;
  368. struct sk_buff *skb;
  369. struct qeth_hdr *hdr;
  370. int offset;
  371. unsigned int len;
  372. /* get first element of current buffer */
  373. element = (struct qdio_buffer_element *)&buf->buffer->element[0];
  374. offset = 0;
  375. if (card->options.performance_stats)
  376. card->perf_stats.bufs_rec++;
  377. while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
  378. &offset, &hdr))) {
  379. skb->dev = card->dev;
  380. /* is device UP ? */
  381. if (!(card->dev->flags & IFF_UP)) {
  382. dev_kfree_skb_any(skb);
  383. continue;
  384. }
  385. switch (hdr->hdr.l2.id) {
  386. case QETH_HEADER_TYPE_LAYER2:
  387. skb->pkt_type = PACKET_HOST;
  388. skb->protocol = eth_type_trans(skb, skb->dev);
  389. if (card->options.checksum_type == NO_CHECKSUMMING)
  390. skb->ip_summed = CHECKSUM_UNNECESSARY;
  391. else
  392. skb->ip_summed = CHECKSUM_NONE;
  393. if (skb->protocol == htons(ETH_P_802_2))
  394. *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
  395. len = skb->len;
  396. netif_rx(skb);
  397. break;
  398. case QETH_HEADER_TYPE_OSN:
  399. if (card->info.type == QETH_CARD_TYPE_OSN) {
  400. skb_push(skb, sizeof(struct qeth_hdr));
  401. skb_copy_to_linear_data(skb, hdr,
  402. sizeof(struct qeth_hdr));
  403. len = skb->len;
  404. card->osn_info.data_cb(skb);
  405. break;
  406. }
  407. /* else unknown */
  408. default:
  409. dev_kfree_skb_any(skb);
  410. QETH_DBF_TEXT(TRACE, 3, "inbunkno");
  411. QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
  412. continue;
  413. }
  414. card->dev->last_rx = jiffies;
  415. card->stats.rx_packets++;
  416. card->stats.rx_bytes += len;
  417. }
  418. }
  419. static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
  420. enum qeth_ipa_cmds ipacmd,
  421. int (*reply_cb) (struct qeth_card *,
  422. struct qeth_reply*,
  423. unsigned long))
  424. {
  425. struct qeth_ipa_cmd *cmd;
  426. struct qeth_cmd_buffer *iob;
  427. QETH_DBF_TEXT(TRACE, 2, "L2sdmac");
  428. iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
  429. cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
  430. cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
  431. memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
  432. return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
  433. }
  434. static int qeth_l2_send_setmac_cb(struct qeth_card *card,
  435. struct qeth_reply *reply,
  436. unsigned long data)
  437. {
  438. struct qeth_ipa_cmd *cmd;
  439. QETH_DBF_TEXT(TRACE, 2, "L2Smaccb");
  440. cmd = (struct qeth_ipa_cmd *) data;
  441. if (cmd->hdr.return_code) {
  442. QETH_DBF_TEXT_(TRACE, 2, "L2er%x", cmd->hdr.return_code);
  443. card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
  444. cmd->hdr.return_code = -EIO;
  445. } else {
  446. card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
  447. memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
  448. OSA_ADDR_LEN);
  449. dev_info(&card->gdev->dev,
  450. "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
  451. "successfully registered on device %s\n",
  452. card->dev->dev_addr[0], card->dev->dev_addr[1],
  453. card->dev->dev_addr[2], card->dev->dev_addr[3],
  454. card->dev->dev_addr[4], card->dev->dev_addr[5],
  455. card->dev->name);
  456. }
  457. return 0;
  458. }
  459. static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
  460. {
  461. QETH_DBF_TEXT(TRACE, 2, "L2Setmac");
  462. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
  463. qeth_l2_send_setmac_cb);
  464. }
  465. static int qeth_l2_send_delmac_cb(struct qeth_card *card,
  466. struct qeth_reply *reply,
  467. unsigned long data)
  468. {
  469. struct qeth_ipa_cmd *cmd;
  470. QETH_DBF_TEXT(TRACE, 2, "L2Dmaccb");
  471. cmd = (struct qeth_ipa_cmd *) data;
  472. if (cmd->hdr.return_code) {
  473. QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
  474. cmd->hdr.return_code = -EIO;
  475. return 0;
  476. }
  477. card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
  478. return 0;
  479. }
  480. static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
  481. {
  482. QETH_DBF_TEXT(TRACE, 2, "L2Delmac");
  483. if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
  484. return 0;
  485. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
  486. qeth_l2_send_delmac_cb);
  487. }
  488. static int qeth_l2_request_initial_mac(struct qeth_card *card)
  489. {
  490. int rc = 0;
  491. char vendor_pre[] = {0x02, 0x00, 0x00};
  492. QETH_DBF_TEXT(SETUP, 2, "doL2init");
  493. QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
  494. rc = qeth_query_setadapterparms(card);
  495. if (rc) {
  496. QETH_DBF_MESSAGE(2, "could not query adapter parameters on "
  497. "device %s: x%x\n", CARD_BUS_ID(card), rc);
  498. }
  499. if ((card->info.type == QETH_CARD_TYPE_IQD) ||
  500. (card->info.guestlan)) {
  501. rc = qeth_setadpparms_change_macaddr(card);
  502. if (rc) {
  503. QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
  504. "device %s: x%x\n", CARD_BUS_ID(card), rc);
  505. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  506. return rc;
  507. }
  508. QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
  509. } else {
  510. random_ether_addr(card->dev->dev_addr);
  511. memcpy(card->dev->dev_addr, vendor_pre, 3);
  512. }
  513. return 0;
  514. }
  515. static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
  516. {
  517. struct sockaddr *addr = p;
  518. struct qeth_card *card = dev->ml_priv;
  519. int rc = 0;
  520. QETH_DBF_TEXT(TRACE, 3, "setmac");
  521. if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) {
  522. QETH_DBF_TEXT(TRACE, 3, "setmcINV");
  523. return -EOPNOTSUPP;
  524. }
  525. if (card->info.type == QETH_CARD_TYPE_OSN) {
  526. QETH_DBF_TEXT(TRACE, 3, "setmcOSN");
  527. return -EOPNOTSUPP;
  528. }
  529. QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card));
  530. QETH_DBF_HEX(TRACE, 3, addr->sa_data, OSA_ADDR_LEN);
  531. rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
  532. if (!rc)
  533. rc = qeth_l2_send_setmac(card, addr->sa_data);
  534. return rc;
  535. }
  536. static void qeth_l2_set_multicast_list(struct net_device *dev)
  537. {
  538. struct qeth_card *card = dev->ml_priv;
  539. struct dev_addr_list *dm;
  540. if (card->info.type == QETH_CARD_TYPE_OSN)
  541. return ;
  542. QETH_DBF_TEXT(TRACE, 3, "setmulti");
  543. qeth_l2_del_all_mc(card);
  544. spin_lock_bh(&card->mclock);
  545. for (dm = dev->mc_list; dm; dm = dm->next)
  546. qeth_l2_add_mc(card, dm->da_addr, 0);
  547. for (dm = dev->uc_list; dm; dm = dm->next)
  548. qeth_l2_add_mc(card, dm->da_addr, 1);
  549. spin_unlock_bh(&card->mclock);
  550. if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
  551. return;
  552. qeth_setadp_promisc_mode(card);
  553. }
  554. static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
  555. {
  556. int rc;
  557. struct qeth_hdr *hdr = NULL;
  558. int elements = 0;
  559. struct qeth_card *card = dev->ml_priv;
  560. struct sk_buff *new_skb = skb;
  561. int ipv = qeth_get_ip_version(skb);
  562. int cast_type = qeth_get_cast_type(card, skb);
  563. struct qeth_qdio_out_q *queue = card->qdio.out_qs
  564. [qeth_get_priority_queue(card, skb, ipv, cast_type)];
  565. int tx_bytes = skb->len;
  566. enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
  567. struct qeth_eddp_context *ctx = NULL;
  568. int data_offset = -1;
  569. int elements_needed = 0;
  570. int hd_len = 0;
  571. if ((card->state != CARD_STATE_UP) || !card->lan_online) {
  572. card->stats.tx_carrier_errors++;
  573. goto tx_drop;
  574. }
  575. if ((card->info.type == QETH_CARD_TYPE_OSN) &&
  576. (skb->protocol == htons(ETH_P_IPV6)))
  577. goto tx_drop;
  578. if (card->options.performance_stats) {
  579. card->perf_stats.outbound_cnt++;
  580. card->perf_stats.outbound_start_time = qeth_get_micros();
  581. }
  582. netif_stop_queue(dev);
  583. if (skb_is_gso(skb))
  584. large_send = QETH_LARGE_SEND_EDDP;
  585. if (card->info.type == QETH_CARD_TYPE_OSN)
  586. hdr = (struct qeth_hdr *)skb->data;
  587. else {
  588. if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
  589. (skb_shinfo(skb)->nr_frags == 0)) {
  590. new_skb = skb;
  591. data_offset = ETH_HLEN;
  592. hd_len = ETH_HLEN;
  593. hdr = kmem_cache_alloc(qeth_core_header_cache,
  594. GFP_ATOMIC);
  595. if (!hdr)
  596. goto tx_drop;
  597. elements_needed++;
  598. skb_reset_mac_header(new_skb);
  599. qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
  600. hdr->hdr.l2.pkt_length = new_skb->len;
  601. memcpy(((char *)hdr) + sizeof(struct qeth_hdr),
  602. skb_mac_header(new_skb), ETH_HLEN);
  603. } else {
  604. /* create a clone with writeable headroom */
  605. new_skb = skb_realloc_headroom(skb,
  606. sizeof(struct qeth_hdr));
  607. if (!new_skb)
  608. goto tx_drop;
  609. hdr = (struct qeth_hdr *)skb_push(new_skb,
  610. sizeof(struct qeth_hdr));
  611. skb_set_mac_header(new_skb, sizeof(struct qeth_hdr));
  612. qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
  613. }
  614. }
  615. if (large_send == QETH_LARGE_SEND_EDDP) {
  616. ctx = qeth_eddp_create_context(card, new_skb, hdr,
  617. skb->sk->sk_protocol);
  618. if (ctx == NULL) {
  619. QETH_DBF_MESSAGE(2, "could not create eddp context\n");
  620. goto tx_drop;
  621. }
  622. } else {
  623. elements = qeth_get_elements_no(card, (void *)hdr, new_skb,
  624. elements_needed);
  625. if (!elements) {
  626. if (data_offset >= 0)
  627. kmem_cache_free(qeth_core_header_cache, hdr);
  628. goto tx_drop;
  629. }
  630. }
  631. if ((large_send == QETH_LARGE_SEND_NO) &&
  632. (skb->ip_summed == CHECKSUM_PARTIAL))
  633. qeth_tx_csum(new_skb);
  634. if (card->info.type != QETH_CARD_TYPE_IQD)
  635. rc = qeth_do_send_packet(card, queue, new_skb, hdr,
  636. elements, ctx);
  637. else
  638. rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
  639. elements, ctx, data_offset, hd_len);
  640. if (!rc) {
  641. card->stats.tx_packets++;
  642. card->stats.tx_bytes += tx_bytes;
  643. if (new_skb != skb)
  644. dev_kfree_skb_any(skb);
  645. if (card->options.performance_stats) {
  646. if (large_send != QETH_LARGE_SEND_NO) {
  647. card->perf_stats.large_send_bytes += tx_bytes;
  648. card->perf_stats.large_send_cnt++;
  649. }
  650. if (skb_shinfo(new_skb)->nr_frags > 0) {
  651. card->perf_stats.sg_skbs_sent++;
  652. /* nr_frags + skb->data */
  653. card->perf_stats.sg_frags_sent +=
  654. skb_shinfo(new_skb)->nr_frags + 1;
  655. }
  656. }
  657. if (ctx != NULL) {
  658. qeth_eddp_put_context(ctx);
  659. dev_kfree_skb_any(new_skb);
  660. }
  661. } else {
  662. if (ctx != NULL)
  663. qeth_eddp_put_context(ctx);
  664. if (data_offset >= 0)
  665. kmem_cache_free(qeth_core_header_cache, hdr);
  666. if (rc == -EBUSY) {
  667. if (new_skb != skb)
  668. dev_kfree_skb_any(new_skb);
  669. return NETDEV_TX_BUSY;
  670. } else
  671. goto tx_drop;
  672. }
  673. netif_wake_queue(dev);
  674. if (card->options.performance_stats)
  675. card->perf_stats.outbound_time += qeth_get_micros() -
  676. card->perf_stats.outbound_start_time;
  677. return rc;
  678. tx_drop:
  679. card->stats.tx_dropped++;
  680. card->stats.tx_errors++;
  681. if ((new_skb != skb) && new_skb)
  682. dev_kfree_skb_any(new_skb);
  683. dev_kfree_skb_any(skb);
  684. netif_wake_queue(dev);
  685. return NETDEV_TX_OK;
  686. }
  687. static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
  688. unsigned int qdio_err, unsigned int queue,
  689. int first_element, int count, unsigned long card_ptr)
  690. {
  691. struct net_device *net_dev;
  692. struct qeth_card *card;
  693. struct qeth_qdio_buffer *buffer;
  694. int index;
  695. int i;
  696. card = (struct qeth_card *) card_ptr;
  697. net_dev = card->dev;
  698. if (card->options.performance_stats) {
  699. card->perf_stats.inbound_cnt++;
  700. card->perf_stats.inbound_start_time = qeth_get_micros();
  701. }
  702. if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
  703. QETH_DBF_TEXT(TRACE, 1, "qdinchk");
  704. QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
  705. QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
  706. count);
  707. QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
  708. qeth_schedule_recovery(card);
  709. return;
  710. }
  711. for (i = first_element; i < (first_element + count); ++i) {
  712. index = i % QDIO_MAX_BUFFERS_PER_Q;
  713. buffer = &card->qdio.in_q->bufs[index];
  714. if (!(qdio_err &&
  715. qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr")))
  716. qeth_l2_process_inbound_buffer(card, buffer, index);
  717. /* clear buffer and give back to hardware */
  718. qeth_put_buffer_pool_entry(card, buffer->pool_entry);
  719. qeth_queue_input_buffer(card, index);
  720. }
  721. if (card->options.performance_stats)
  722. card->perf_stats.inbound_time += qeth_get_micros() -
  723. card->perf_stats.inbound_start_time;
  724. }
  725. static int qeth_l2_open(struct net_device *dev)
  726. {
  727. struct qeth_card *card = dev->ml_priv;
  728. QETH_DBF_TEXT(TRACE, 4, "qethopen");
  729. if (card->state != CARD_STATE_SOFTSETUP)
  730. return -ENODEV;
  731. if ((card->info.type != QETH_CARD_TYPE_OSN) &&
  732. (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
  733. QETH_DBF_TEXT(TRACE, 4, "nomacadr");
  734. return -EPERM;
  735. }
  736. card->data.state = CH_STATE_UP;
  737. card->state = CARD_STATE_UP;
  738. netif_start_queue(dev);
  739. if (!card->lan_online && netif_carrier_ok(dev))
  740. netif_carrier_off(dev);
  741. return 0;
  742. }
  743. static int qeth_l2_stop(struct net_device *dev)
  744. {
  745. struct qeth_card *card = dev->ml_priv;
  746. QETH_DBF_TEXT(TRACE, 4, "qethstop");
  747. netif_tx_disable(dev);
  748. if (card->state == CARD_STATE_UP)
  749. card->state = CARD_STATE_SOFTSETUP;
  750. return 0;
  751. }
  752. static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
  753. {
  754. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  755. INIT_LIST_HEAD(&card->vid_list);
  756. INIT_LIST_HEAD(&card->mc_list);
  757. card->options.layer2 = 1;
  758. card->discipline.input_handler = (qdio_handler_t *)
  759. qeth_l2_qdio_input_handler;
  760. card->discipline.output_handler = (qdio_handler_t *)
  761. qeth_qdio_output_handler;
  762. card->discipline.recover = qeth_l2_recover;
  763. return 0;
  764. }
  765. static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
  766. {
  767. struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
  768. wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
  769. if (cgdev->state == CCWGROUP_ONLINE) {
  770. card->use_hard_stop = 1;
  771. qeth_l2_set_offline(cgdev);
  772. }
  773. if (card->dev) {
  774. unregister_netdev(card->dev);
  775. card->dev = NULL;
  776. }
  777. qeth_l2_del_all_mc(card);
  778. return;
  779. }
  780. static int qeth_l2_ethtool_set_tso(struct net_device *dev, u32 data)
  781. {
  782. struct qeth_card *card = dev->ml_priv;
  783. if (data) {
  784. if (card->options.large_send == QETH_LARGE_SEND_NO) {
  785. card->options.large_send = QETH_LARGE_SEND_EDDP;
  786. dev->features |= NETIF_F_TSO;
  787. }
  788. } else {
  789. dev->features &= ~NETIF_F_TSO;
  790. card->options.large_send = QETH_LARGE_SEND_NO;
  791. }
  792. return 0;
  793. }
  794. static struct ethtool_ops qeth_l2_ethtool_ops = {
  795. .get_link = ethtool_op_get_link,
  796. .get_tx_csum = ethtool_op_get_tx_csum,
  797. .set_tx_csum = ethtool_op_set_tx_hw_csum,
  798. .get_sg = ethtool_op_get_sg,
  799. .set_sg = ethtool_op_set_sg,
  800. .get_tso = ethtool_op_get_tso,
  801. .set_tso = qeth_l2_ethtool_set_tso,
  802. .get_strings = qeth_core_get_strings,
  803. .get_ethtool_stats = qeth_core_get_ethtool_stats,
  804. .get_stats_count = qeth_core_get_stats_count,
  805. .get_drvinfo = qeth_core_get_drvinfo,
  806. .get_settings = qeth_core_ethtool_get_settings,
  807. };
  808. static struct ethtool_ops qeth_l2_osn_ops = {
  809. .get_strings = qeth_core_get_strings,
  810. .get_ethtool_stats = qeth_core_get_ethtool_stats,
  811. .get_stats_count = qeth_core_get_stats_count,
  812. .get_drvinfo = qeth_core_get_drvinfo,
  813. };
  814. static int qeth_l2_setup_netdev(struct qeth_card *card)
  815. {
  816. switch (card->info.type) {
  817. case QETH_CARD_TYPE_OSAE:
  818. card->dev = alloc_etherdev(0);
  819. break;
  820. case QETH_CARD_TYPE_IQD:
  821. card->dev = alloc_netdev(0, "hsi%d", ether_setup);
  822. break;
  823. case QETH_CARD_TYPE_OSN:
  824. card->dev = alloc_netdev(0, "osn%d", ether_setup);
  825. card->dev->flags |= IFF_NOARP;
  826. break;
  827. default:
  828. card->dev = alloc_etherdev(0);
  829. }
  830. if (!card->dev)
  831. return -ENODEV;
  832. card->dev->ml_priv = card;
  833. card->dev->tx_timeout = &qeth_tx_timeout;
  834. card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
  835. card->dev->open = qeth_l2_open;
  836. card->dev->stop = qeth_l2_stop;
  837. card->dev->hard_start_xmit = qeth_l2_hard_start_xmit;
  838. card->dev->do_ioctl = qeth_l2_do_ioctl;
  839. card->dev->get_stats = qeth_get_stats;
  840. card->dev->change_mtu = qeth_change_mtu;
  841. card->dev->set_multicast_list = qeth_l2_set_multicast_list;
  842. card->dev->vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid;
  843. card->dev->vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid;
  844. card->dev->set_mac_address = qeth_l2_set_mac_address;
  845. card->dev->mtu = card->info.initial_mtu;
  846. if (card->info.type != QETH_CARD_TYPE_OSN)
  847. SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
  848. else
  849. SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
  850. card->dev->features |= NETIF_F_HW_VLAN_FILTER;
  851. card->info.broadcast_capable = 1;
  852. qeth_l2_request_initial_mac(card);
  853. SET_NETDEV_DEV(card->dev, &card->gdev->dev);
  854. return register_netdev(card->dev);
  855. }
  856. static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
  857. {
  858. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  859. int rc = 0;
  860. enum qeth_card_states recover_flag;
  861. BUG_ON(!card);
  862. QETH_DBF_TEXT(SETUP, 2, "setonlin");
  863. QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
  864. qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
  865. recover_flag = card->state;
  866. rc = ccw_device_set_online(CARD_RDEV(card));
  867. if (rc) {
  868. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  869. return -EIO;
  870. }
  871. rc = ccw_device_set_online(CARD_WDEV(card));
  872. if (rc) {
  873. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  874. return -EIO;
  875. }
  876. rc = ccw_device_set_online(CARD_DDEV(card));
  877. if (rc) {
  878. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  879. return -EIO;
  880. }
  881. rc = qeth_core_hardsetup_card(card);
  882. if (rc) {
  883. QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
  884. goto out_remove;
  885. }
  886. if (!card->dev && qeth_l2_setup_netdev(card))
  887. goto out_remove;
  888. if (card->info.type != QETH_CARD_TYPE_OSN)
  889. qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
  890. card->state = CARD_STATE_HARDSETUP;
  891. qeth_print_status_message(card);
  892. /* softsetup */
  893. QETH_DBF_TEXT(SETUP, 2, "softsetp");
  894. rc = qeth_send_startlan(card);
  895. if (rc) {
  896. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  897. if (rc == 0xe080) {
  898. dev_warn(&card->gdev->dev,
  899. "The LAN is offline\n");
  900. card->lan_online = 0;
  901. }
  902. return rc;
  903. } else
  904. card->lan_online = 1;
  905. if (card->info.type != QETH_CARD_TYPE_OSN) {
  906. qeth_set_large_send(card, card->options.large_send);
  907. qeth_l2_process_vlans(card, 0);
  908. }
  909. netif_tx_disable(card->dev);
  910. rc = qeth_init_qdio_queues(card);
  911. if (rc) {
  912. QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
  913. goto out_remove;
  914. }
  915. card->state = CARD_STATE_SOFTSETUP;
  916. netif_carrier_on(card->dev);
  917. qeth_set_allowed_threads(card, 0xffffffff, 0);
  918. if (recover_flag == CARD_STATE_RECOVER) {
  919. if (recovery_mode &&
  920. card->info.type != QETH_CARD_TYPE_OSN) {
  921. qeth_l2_open(card->dev);
  922. } else {
  923. rtnl_lock();
  924. dev_open(card->dev);
  925. rtnl_unlock();
  926. }
  927. /* this also sets saved unicast addresses */
  928. qeth_l2_set_multicast_list(card->dev);
  929. }
  930. /* let user_space know that device is online */
  931. kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
  932. return 0;
  933. out_remove:
  934. card->use_hard_stop = 1;
  935. qeth_l2_stop_card(card, 0);
  936. ccw_device_set_offline(CARD_DDEV(card));
  937. ccw_device_set_offline(CARD_WDEV(card));
  938. ccw_device_set_offline(CARD_RDEV(card));
  939. if (recover_flag == CARD_STATE_RECOVER)
  940. card->state = CARD_STATE_RECOVER;
  941. else
  942. card->state = CARD_STATE_DOWN;
  943. return -ENODEV;
  944. }
  945. static int qeth_l2_set_online(struct ccwgroup_device *gdev)
  946. {
  947. return __qeth_l2_set_online(gdev, 0);
  948. }
  949. static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
  950. int recovery_mode)
  951. {
  952. struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
  953. int rc = 0, rc2 = 0, rc3 = 0;
  954. enum qeth_card_states recover_flag;
  955. QETH_DBF_TEXT(SETUP, 3, "setoffl");
  956. QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
  957. if (card->dev && netif_carrier_ok(card->dev))
  958. netif_carrier_off(card->dev);
  959. recover_flag = card->state;
  960. qeth_l2_stop_card(card, recovery_mode);
  961. rc = ccw_device_set_offline(CARD_DDEV(card));
  962. rc2 = ccw_device_set_offline(CARD_WDEV(card));
  963. rc3 = ccw_device_set_offline(CARD_RDEV(card));
  964. if (!rc)
  965. rc = (rc2) ? rc2 : rc3;
  966. if (rc)
  967. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  968. if (recover_flag == CARD_STATE_UP)
  969. card->state = CARD_STATE_RECOVER;
  970. /* let user_space know that device is offline */
  971. kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
  972. return 0;
  973. }
  974. static int qeth_l2_set_offline(struct ccwgroup_device *cgdev)
  975. {
  976. return __qeth_l2_set_offline(cgdev, 0);
  977. }
  978. static int qeth_l2_recover(void *ptr)
  979. {
  980. struct qeth_card *card;
  981. int rc = 0;
  982. card = (struct qeth_card *) ptr;
  983. QETH_DBF_TEXT(TRACE, 2, "recover1");
  984. QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
  985. if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
  986. return 0;
  987. QETH_DBF_TEXT(TRACE, 2, "recover2");
  988. dev_warn(&card->gdev->dev,
  989. "A recovery process has been started for the device\n");
  990. card->use_hard_stop = 1;
  991. __qeth_l2_set_offline(card->gdev, 1);
  992. rc = __qeth_l2_set_online(card->gdev, 1);
  993. /* don't run another scheduled recovery */
  994. qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
  995. qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
  996. if (!rc)
  997. dev_info(&card->gdev->dev,
  998. "Device successfully recovered!\n");
  999. else {
  1000. rtnl_lock();
  1001. dev_close(card->dev);
  1002. rtnl_unlock();
  1003. dev_warn(&card->gdev->dev, "The qeth device driver "
  1004. "failed to recover an error on the device\n");
  1005. }
  1006. return 0;
  1007. }
  1008. static int __init qeth_l2_init(void)
  1009. {
  1010. pr_info("register layer 2 discipline\n");
  1011. return 0;
  1012. }
  1013. static void __exit qeth_l2_exit(void)
  1014. {
  1015. pr_info("unregister layer 2 discipline\n");
  1016. }
  1017. static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
  1018. {
  1019. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  1020. qeth_qdio_clear_card(card, 0);
  1021. qeth_clear_qdio_buffers(card);
  1022. }
  1023. struct ccwgroup_driver qeth_l2_ccwgroup_driver = {
  1024. .probe = qeth_l2_probe_device,
  1025. .remove = qeth_l2_remove_device,
  1026. .set_online = qeth_l2_set_online,
  1027. .set_offline = qeth_l2_set_offline,
  1028. .shutdown = qeth_l2_shutdown,
  1029. };
  1030. EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver);
  1031. static int qeth_osn_send_control_data(struct qeth_card *card, int len,
  1032. struct qeth_cmd_buffer *iob)
  1033. {
  1034. unsigned long flags;
  1035. int rc = 0;
  1036. QETH_DBF_TEXT(TRACE, 5, "osndctrd");
  1037. wait_event(card->wait_q,
  1038. atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
  1039. qeth_prepare_control_data(card, len, iob);
  1040. QETH_DBF_TEXT(TRACE, 6, "osnoirqp");
  1041. spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
  1042. rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
  1043. (addr_t) iob, 0, 0);
  1044. spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
  1045. if (rc) {
  1046. QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
  1047. "ccw_device_start rc = %i\n", rc);
  1048. QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
  1049. qeth_release_buffer(iob->channel, iob);
  1050. atomic_set(&card->write.irq_pending, 0);
  1051. wake_up(&card->wait_q);
  1052. }
  1053. return rc;
  1054. }
  1055. static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
  1056. struct qeth_cmd_buffer *iob, int data_len)
  1057. {
  1058. u16 s1, s2;
  1059. QETH_DBF_TEXT(TRACE, 4, "osndipa");
  1060. qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
  1061. s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
  1062. s2 = (u16)data_len;
  1063. memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
  1064. memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
  1065. memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
  1066. memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
  1067. return qeth_osn_send_control_data(card, s1, iob);
  1068. }
  1069. int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
  1070. {
  1071. struct qeth_cmd_buffer *iob;
  1072. struct qeth_card *card;
  1073. int rc;
  1074. QETH_DBF_TEXT(TRACE, 2, "osnsdmc");
  1075. if (!dev)
  1076. return -ENODEV;
  1077. card = dev->ml_priv;
  1078. if (!card)
  1079. return -ENODEV;
  1080. if ((card->state != CARD_STATE_UP) &&
  1081. (card->state != CARD_STATE_SOFTSETUP))
  1082. return -ENODEV;
  1083. iob = qeth_wait_for_buffer(&card->write);
  1084. memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
  1085. rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
  1086. return rc;
  1087. }
  1088. EXPORT_SYMBOL(qeth_osn_assist);
  1089. int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
  1090. int (*assist_cb)(struct net_device *, void *),
  1091. int (*data_cb)(struct sk_buff *))
  1092. {
  1093. struct qeth_card *card;
  1094. QETH_DBF_TEXT(TRACE, 2, "osnreg");
  1095. *dev = qeth_l2_netdev_by_devno(read_dev_no);
  1096. if (*dev == NULL)
  1097. return -ENODEV;
  1098. card = (*dev)->ml_priv;
  1099. if (!card)
  1100. return -ENODEV;
  1101. if ((assist_cb == NULL) || (data_cb == NULL))
  1102. return -EINVAL;
  1103. card->osn_info.assist_cb = assist_cb;
  1104. card->osn_info.data_cb = data_cb;
  1105. return 0;
  1106. }
  1107. EXPORT_SYMBOL(qeth_osn_register);
  1108. void qeth_osn_deregister(struct net_device *dev)
  1109. {
  1110. struct qeth_card *card;
  1111. QETH_DBF_TEXT(TRACE, 2, "osndereg");
  1112. if (!dev)
  1113. return;
  1114. card = dev->ml_priv;
  1115. if (!card)
  1116. return;
  1117. card->osn_info.assist_cb = NULL;
  1118. card->osn_info.data_cb = NULL;
  1119. return;
  1120. }
  1121. EXPORT_SYMBOL(qeth_osn_deregister);
  1122. module_init(qeth_l2_init);
  1123. module_exit(qeth_l2_exit);
  1124. MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
  1125. MODULE_DESCRIPTION("qeth layer 2 discipline");
  1126. MODULE_LICENSE("GPL");