qeth_l2_main.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289
  1. /*
  2. * drivers/s390/net/qeth_l2_main.c
  3. *
  4. * Copyright IBM Corp. 2007, 2009
  5. * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
  6. * Frank Pavlic <fpavlic@de.ibm.com>,
  7. * Thomas Spatzier <tspat@de.ibm.com>,
  8. * Frank Blaschka <frank.blaschka@de.ibm.com>
  9. */
  10. #define KMSG_COMPONENT "qeth"
  11. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/moduleparam.h>
  14. #include <linux/string.h>
  15. #include <linux/errno.h>
  16. #include <linux/kernel.h>
  17. #include <linux/slab.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/mii.h>
  20. #include <linux/ip.h>
  21. #include <linux/list.h>
  22. #include "qeth_core.h"
  23. static int qeth_l2_set_offline(struct ccwgroup_device *);
  24. static int qeth_l2_stop(struct net_device *);
  25. static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
  26. static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
  27. enum qeth_ipa_cmds,
  28. int (*reply_cb) (struct qeth_card *,
  29. struct qeth_reply*,
  30. unsigned long));
  31. static void qeth_l2_set_multicast_list(struct net_device *);
  32. static int qeth_l2_recover(void *);
  33. static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  34. {
  35. struct qeth_card *card = dev->ml_priv;
  36. struct mii_ioctl_data *mii_data;
  37. int rc = 0;
  38. if (!card)
  39. return -ENODEV;
  40. if ((card->state != CARD_STATE_UP) &&
  41. (card->state != CARD_STATE_SOFTSETUP))
  42. return -ENODEV;
  43. if (card->info.type == QETH_CARD_TYPE_OSN)
  44. return -EPERM;
  45. switch (cmd) {
  46. case SIOC_QETH_ADP_SET_SNMP_CONTROL:
  47. rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
  48. break;
  49. case SIOC_QETH_GET_CARD_TYPE:
  50. if ((card->info.type == QETH_CARD_TYPE_OSD ||
  51. card->info.type == QETH_CARD_TYPE_OSM ||
  52. card->info.type == QETH_CARD_TYPE_OSX) &&
  53. !card->info.guestlan)
  54. return 1;
  55. return 0;
  56. break;
  57. case SIOCGMIIPHY:
  58. mii_data = if_mii(rq);
  59. mii_data->phy_id = 0;
  60. break;
  61. case SIOCGMIIREG:
  62. mii_data = if_mii(rq);
  63. if (mii_data->phy_id != 0)
  64. rc = -EINVAL;
  65. else
  66. mii_data->val_out = qeth_mdio_read(dev,
  67. mii_data->phy_id, mii_data->reg_num);
  68. break;
  69. default:
  70. rc = -EOPNOTSUPP;
  71. }
  72. if (rc)
  73. QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
  74. return rc;
  75. }
  76. static int qeth_l2_verify_dev(struct net_device *dev)
  77. {
  78. struct qeth_card *card;
  79. unsigned long flags;
  80. int rc = 0;
  81. read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
  82. list_for_each_entry(card, &qeth_core_card_list.list, list) {
  83. if (card->dev == dev) {
  84. rc = QETH_REAL_CARD;
  85. break;
  86. }
  87. }
  88. read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
  89. return rc;
  90. }
  91. static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
  92. {
  93. struct qeth_card *card;
  94. struct net_device *ndev;
  95. __u16 temp_dev_no;
  96. unsigned long flags;
  97. struct ccw_dev_id read_devid;
  98. ndev = NULL;
  99. memcpy(&temp_dev_no, read_dev_no, 2);
  100. read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
  101. list_for_each_entry(card, &qeth_core_card_list.list, list) {
  102. ccw_device_get_id(CARD_RDEV(card), &read_devid);
  103. if (read_devid.devno == temp_dev_no) {
  104. ndev = card->dev;
  105. break;
  106. }
  107. }
  108. read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
  109. return ndev;
  110. }
  111. static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
  112. struct qeth_reply *reply,
  113. unsigned long data)
  114. {
  115. struct qeth_ipa_cmd *cmd;
  116. __u8 *mac;
  117. QETH_CARD_TEXT(card, 2, "L2Sgmacb");
  118. cmd = (struct qeth_ipa_cmd *) data;
  119. mac = &cmd->data.setdelmac.mac[0];
  120. /* MAC already registered, needed in couple/uncouple case */
  121. if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) {
  122. QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
  123. mac, QETH_CARD_IFNAME(card));
  124. cmd->hdr.return_code = 0;
  125. }
  126. if (cmd->hdr.return_code)
  127. QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
  128. mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
  129. return 0;
  130. }
  131. static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
  132. {
  133. QETH_CARD_TEXT(card, 2, "L2Sgmac");
  134. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
  135. qeth_l2_send_setgroupmac_cb);
  136. }
  137. static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
  138. struct qeth_reply *reply,
  139. unsigned long data)
  140. {
  141. struct qeth_ipa_cmd *cmd;
  142. __u8 *mac;
  143. QETH_CARD_TEXT(card, 2, "L2Dgmacb");
  144. cmd = (struct qeth_ipa_cmd *) data;
  145. mac = &cmd->data.setdelmac.mac[0];
  146. if (cmd->hdr.return_code)
  147. QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n",
  148. mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
  149. return 0;
  150. }
  151. static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
  152. {
  153. QETH_CARD_TEXT(card, 2, "L2Dgmac");
  154. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
  155. qeth_l2_send_delgroupmac_cb);
  156. }
  157. static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
  158. {
  159. struct qeth_mc_mac *mc;
  160. int rc;
  161. mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC);
  162. if (!mc)
  163. return;
  164. memcpy(mc->mc_addr, mac, OSA_ADDR_LEN);
  165. mc->mc_addrlen = OSA_ADDR_LEN;
  166. mc->is_vmac = vmac;
  167. if (vmac) {
  168. rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
  169. NULL);
  170. } else {
  171. rc = qeth_l2_send_setgroupmac(card, mac);
  172. }
  173. if (!rc)
  174. list_add_tail(&mc->list, &card->mc_list);
  175. else
  176. kfree(mc);
  177. }
  178. static void qeth_l2_del_all_mc(struct qeth_card *card)
  179. {
  180. struct qeth_mc_mac *mc, *tmp;
  181. spin_lock_bh(&card->mclock);
  182. list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
  183. if (mc->is_vmac)
  184. qeth_l2_send_setdelmac(card, mc->mc_addr,
  185. IPA_CMD_DELVMAC, NULL);
  186. else
  187. qeth_l2_send_delgroupmac(card, mc->mc_addr);
  188. list_del(&mc->list);
  189. kfree(mc);
  190. }
  191. spin_unlock_bh(&card->mclock);
  192. }
  193. static inline int qeth_l2_get_cast_type(struct qeth_card *card,
  194. struct sk_buff *skb)
  195. {
  196. if (card->info.type == QETH_CARD_TYPE_OSN)
  197. return RTN_UNSPEC;
  198. if (is_broadcast_ether_addr(skb->data))
  199. return RTN_BROADCAST;
  200. if (is_multicast_ether_addr(skb->data))
  201. return RTN_MULTICAST;
  202. return RTN_UNSPEC;
  203. }
  204. static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
  205. struct sk_buff *skb, int ipv, int cast_type)
  206. {
  207. struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
  208. memset(hdr, 0, sizeof(struct qeth_hdr));
  209. hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
  210. /* set byte byte 3 to casting flags */
  211. if (cast_type == RTN_MULTICAST)
  212. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
  213. else if (cast_type == RTN_BROADCAST)
  214. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
  215. else
  216. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
  217. hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
  218. /* VSWITCH relies on the VLAN
  219. * information to be present in
  220. * the QDIO header */
  221. if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
  222. hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
  223. hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
  224. }
  225. }
  226. static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
  227. struct qeth_reply *reply, unsigned long data)
  228. {
  229. struct qeth_ipa_cmd *cmd;
  230. QETH_CARD_TEXT(card, 2, "L2sdvcb");
  231. cmd = (struct qeth_ipa_cmd *) data;
  232. if (cmd->hdr.return_code) {
  233. QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. "
  234. "Continuing\n", cmd->data.setdelvlan.vlan_id,
  235. QETH_CARD_IFNAME(card), cmd->hdr.return_code);
  236. QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
  237. QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
  238. }
  239. return 0;
  240. }
  241. static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
  242. enum qeth_ipa_cmds ipacmd)
  243. {
  244. struct qeth_ipa_cmd *cmd;
  245. struct qeth_cmd_buffer *iob;
  246. QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
  247. iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
  248. cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
  249. cmd->data.setdelvlan.vlan_id = i;
  250. return qeth_send_ipa_cmd(card, iob,
  251. qeth_l2_send_setdelvlan_cb, NULL);
  252. }
  253. static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
  254. {
  255. struct qeth_vlan_vid *id;
  256. QETH_CARD_TEXT(card, 3, "L2prcvln");
  257. spin_lock_bh(&card->vlanlock);
  258. list_for_each_entry(id, &card->vid_list, list) {
  259. if (clear)
  260. qeth_l2_send_setdelvlan(card, id->vid,
  261. IPA_CMD_DELVLAN);
  262. else
  263. qeth_l2_send_setdelvlan(card, id->vid,
  264. IPA_CMD_SETVLAN);
  265. }
  266. spin_unlock_bh(&card->vlanlock);
  267. }
  268. static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  269. {
  270. struct qeth_card *card = dev->ml_priv;
  271. struct qeth_vlan_vid *id;
  272. QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
  273. if (card->info.type == QETH_CARD_TYPE_OSM) {
  274. QETH_CARD_TEXT(card, 3, "aidOSM");
  275. return;
  276. }
  277. if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
  278. QETH_CARD_TEXT(card, 3, "aidREC");
  279. return;
  280. }
  281. id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
  282. if (id) {
  283. id->vid = vid;
  284. qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
  285. spin_lock_bh(&card->vlanlock);
  286. list_add_tail(&id->list, &card->vid_list);
  287. spin_unlock_bh(&card->vlanlock);
  288. }
  289. }
  290. static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  291. {
  292. struct qeth_vlan_vid *id, *tmpid = NULL;
  293. struct qeth_card *card = dev->ml_priv;
  294. QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
  295. if (card->info.type == QETH_CARD_TYPE_OSM) {
  296. QETH_CARD_TEXT(card, 3, "kidOSM");
  297. return;
  298. }
  299. if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
  300. QETH_CARD_TEXT(card, 3, "kidREC");
  301. return;
  302. }
  303. spin_lock_bh(&card->vlanlock);
  304. list_for_each_entry(id, &card->vid_list, list) {
  305. if (id->vid == vid) {
  306. list_del(&id->list);
  307. tmpid = id;
  308. break;
  309. }
  310. }
  311. spin_unlock_bh(&card->vlanlock);
  312. if (tmpid) {
  313. qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
  314. kfree(tmpid);
  315. }
  316. qeth_l2_set_multicast_list(card->dev);
  317. }
  318. static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
  319. {
  320. int rc = 0;
  321. QETH_DBF_TEXT(SETUP , 2, "stopcard");
  322. QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
  323. qeth_set_allowed_threads(card, 0, 1);
  324. if (card->read.state == CH_STATE_UP &&
  325. card->write.state == CH_STATE_UP &&
  326. (card->state == CARD_STATE_UP)) {
  327. if (recovery_mode &&
  328. card->info.type != QETH_CARD_TYPE_OSN) {
  329. qeth_l2_stop(card->dev);
  330. } else {
  331. rtnl_lock();
  332. dev_close(card->dev);
  333. rtnl_unlock();
  334. }
  335. if (!card->use_hard_stop ||
  336. recovery_mode) {
  337. __u8 *mac = &card->dev->dev_addr[0];
  338. rc = qeth_l2_send_delmac(card, mac);
  339. QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc);
  340. }
  341. card->state = CARD_STATE_SOFTSETUP;
  342. }
  343. if (card->state == CARD_STATE_SOFTSETUP) {
  344. qeth_l2_process_vlans(card, 1);
  345. if (!card->use_hard_stop ||
  346. recovery_mode)
  347. qeth_l2_del_all_mc(card);
  348. qeth_clear_ipacmd_list(card);
  349. card->state = CARD_STATE_HARDSETUP;
  350. }
  351. if (card->state == CARD_STATE_HARDSETUP) {
  352. qeth_qdio_clear_card(card, 0);
  353. qeth_clear_qdio_buffers(card);
  354. qeth_clear_working_pool_list(card);
  355. card->state = CARD_STATE_DOWN;
  356. }
  357. if (card->state == CARD_STATE_DOWN) {
  358. qeth_clear_cmd_buffers(&card->read);
  359. qeth_clear_cmd_buffers(&card->write);
  360. }
  361. card->use_hard_stop = 0;
  362. return rc;
  363. }
  364. static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
  365. struct qeth_qdio_buffer *buf, int index)
  366. {
  367. struct qdio_buffer_element *element;
  368. struct sk_buff *skb;
  369. struct qeth_hdr *hdr;
  370. int offset;
  371. unsigned int len;
  372. /* get first element of current buffer */
  373. element = (struct qdio_buffer_element *)&buf->buffer->element[0];
  374. offset = 0;
  375. if (card->options.performance_stats)
  376. card->perf_stats.bufs_rec++;
  377. while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
  378. &offset, &hdr))) {
  379. skb->dev = card->dev;
  380. /* is device UP ? */
  381. if (!(card->dev->flags & IFF_UP)) {
  382. dev_kfree_skb_any(skb);
  383. continue;
  384. }
  385. switch (hdr->hdr.l2.id) {
  386. case QETH_HEADER_TYPE_LAYER2:
  387. skb->pkt_type = PACKET_HOST;
  388. skb->protocol = eth_type_trans(skb, skb->dev);
  389. if (card->options.checksum_type == NO_CHECKSUMMING)
  390. skb->ip_summed = CHECKSUM_UNNECESSARY;
  391. else
  392. skb->ip_summed = CHECKSUM_NONE;
  393. if (skb->protocol == htons(ETH_P_802_2))
  394. *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
  395. len = skb->len;
  396. netif_rx(skb);
  397. break;
  398. case QETH_HEADER_TYPE_OSN:
  399. if (card->info.type == QETH_CARD_TYPE_OSN) {
  400. skb_push(skb, sizeof(struct qeth_hdr));
  401. skb_copy_to_linear_data(skb, hdr,
  402. sizeof(struct qeth_hdr));
  403. len = skb->len;
  404. card->osn_info.data_cb(skb);
  405. break;
  406. }
  407. /* else unknown */
  408. default:
  409. dev_kfree_skb_any(skb);
  410. QETH_CARD_TEXT(card, 3, "inbunkno");
  411. QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
  412. continue;
  413. }
  414. card->stats.rx_packets++;
  415. card->stats.rx_bytes += len;
  416. }
  417. }
  418. static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
  419. enum qeth_ipa_cmds ipacmd,
  420. int (*reply_cb) (struct qeth_card *,
  421. struct qeth_reply*,
  422. unsigned long))
  423. {
  424. struct qeth_ipa_cmd *cmd;
  425. struct qeth_cmd_buffer *iob;
  426. QETH_CARD_TEXT(card, 2, "L2sdmac");
  427. iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
  428. cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
  429. cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
  430. memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
  431. return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
  432. }
  433. static int qeth_l2_send_setmac_cb(struct qeth_card *card,
  434. struct qeth_reply *reply,
  435. unsigned long data)
  436. {
  437. struct qeth_ipa_cmd *cmd;
  438. QETH_CARD_TEXT(card, 2, "L2Smaccb");
  439. cmd = (struct qeth_ipa_cmd *) data;
  440. if (cmd->hdr.return_code) {
  441. QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
  442. card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
  443. switch (cmd->hdr.return_code) {
  444. case IPA_RC_L2_DUP_MAC:
  445. case IPA_RC_L2_DUP_LAYER3_MAC:
  446. dev_warn(&card->gdev->dev,
  447. "MAC address %pM already exists\n",
  448. card->dev->dev_addr);
  449. break;
  450. case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
  451. case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
  452. dev_warn(&card->gdev->dev,
  453. "MAC address %pM is not authorized\n",
  454. card->dev->dev_addr);
  455. break;
  456. default:
  457. break;
  458. }
  459. cmd->hdr.return_code = -EIO;
  460. } else {
  461. card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
  462. memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
  463. OSA_ADDR_LEN);
  464. dev_info(&card->gdev->dev,
  465. "MAC address %pM successfully registered on device %s\n",
  466. card->dev->dev_addr, card->dev->name);
  467. }
  468. return 0;
  469. }
  470. static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
  471. {
  472. QETH_CARD_TEXT(card, 2, "L2Setmac");
  473. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
  474. qeth_l2_send_setmac_cb);
  475. }
  476. static int qeth_l2_send_delmac_cb(struct qeth_card *card,
  477. struct qeth_reply *reply,
  478. unsigned long data)
  479. {
  480. struct qeth_ipa_cmd *cmd;
  481. QETH_CARD_TEXT(card, 2, "L2Dmaccb");
  482. cmd = (struct qeth_ipa_cmd *) data;
  483. if (cmd->hdr.return_code) {
  484. QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
  485. cmd->hdr.return_code = -EIO;
  486. return 0;
  487. }
  488. card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
  489. return 0;
  490. }
  491. static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
  492. {
  493. QETH_CARD_TEXT(card, 2, "L2Delmac");
  494. if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
  495. return 0;
  496. return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
  497. qeth_l2_send_delmac_cb);
  498. }
  499. static int qeth_l2_request_initial_mac(struct qeth_card *card)
  500. {
  501. int rc = 0;
  502. char vendor_pre[] = {0x02, 0x00, 0x00};
  503. QETH_DBF_TEXT(SETUP, 2, "doL2init");
  504. QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
  505. rc = qeth_query_setadapterparms(card);
  506. if (rc) {
  507. QETH_DBF_MESSAGE(2, "could not query adapter parameters on "
  508. "device %s: x%x\n", CARD_BUS_ID(card), rc);
  509. }
  510. if (card->info.type == QETH_CARD_TYPE_IQD ||
  511. card->info.type == QETH_CARD_TYPE_OSM ||
  512. card->info.type == QETH_CARD_TYPE_OSX ||
  513. card->info.guestlan) {
  514. rc = qeth_setadpparms_change_macaddr(card);
  515. if (rc) {
  516. QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
  517. "device %s: x%x\n", CARD_BUS_ID(card), rc);
  518. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  519. return rc;
  520. }
  521. QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
  522. } else {
  523. random_ether_addr(card->dev->dev_addr);
  524. memcpy(card->dev->dev_addr, vendor_pre, 3);
  525. }
  526. return 0;
  527. }
  528. static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
  529. {
  530. struct sockaddr *addr = p;
  531. struct qeth_card *card = dev->ml_priv;
  532. int rc = 0;
  533. QETH_CARD_TEXT(card, 3, "setmac");
  534. if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) {
  535. QETH_CARD_TEXT(card, 3, "setmcINV");
  536. return -EOPNOTSUPP;
  537. }
  538. if (card->info.type == QETH_CARD_TYPE_OSN ||
  539. card->info.type == QETH_CARD_TYPE_OSM ||
  540. card->info.type == QETH_CARD_TYPE_OSX) {
  541. QETH_CARD_TEXT(card, 3, "setmcTYP");
  542. return -EOPNOTSUPP;
  543. }
  544. QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN);
  545. if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
  546. QETH_CARD_TEXT(card, 3, "setmcREC");
  547. return -ERESTARTSYS;
  548. }
  549. rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
  550. if (!rc)
  551. rc = qeth_l2_send_setmac(card, addr->sa_data);
  552. return rc;
  553. }
  554. static void qeth_l2_set_multicast_list(struct net_device *dev)
  555. {
  556. struct qeth_card *card = dev->ml_priv;
  557. struct netdev_hw_addr *ha;
  558. if (card->info.type == QETH_CARD_TYPE_OSN)
  559. return ;
  560. QETH_CARD_TEXT(card, 3, "setmulti");
  561. if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
  562. (card->state != CARD_STATE_UP))
  563. return;
  564. qeth_l2_del_all_mc(card);
  565. spin_lock_bh(&card->mclock);
  566. netdev_for_each_mc_addr(ha, dev)
  567. qeth_l2_add_mc(card, ha->addr, 0);
  568. netdev_for_each_uc_addr(ha, dev)
  569. qeth_l2_add_mc(card, ha->addr, 1);
  570. spin_unlock_bh(&card->mclock);
  571. if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
  572. return;
  573. qeth_setadp_promisc_mode(card);
  574. }
  575. static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
  576. {
  577. int rc;
  578. struct qeth_hdr *hdr = NULL;
  579. int elements = 0;
  580. struct qeth_card *card = dev->ml_priv;
  581. struct sk_buff *new_skb = skb;
  582. int ipv = qeth_get_ip_version(skb);
  583. int cast_type = qeth_l2_get_cast_type(card, skb);
  584. struct qeth_qdio_out_q *queue = card->qdio.out_qs
  585. [qeth_get_priority_queue(card, skb, ipv, cast_type)];
  586. int tx_bytes = skb->len;
  587. int data_offset = -1;
  588. int elements_needed = 0;
  589. int hd_len = 0;
  590. if ((card->state != CARD_STATE_UP) || !card->lan_online) {
  591. card->stats.tx_carrier_errors++;
  592. goto tx_drop;
  593. }
  594. if ((card->info.type == QETH_CARD_TYPE_OSN) &&
  595. (skb->protocol == htons(ETH_P_IPV6)))
  596. goto tx_drop;
  597. if (card->options.performance_stats) {
  598. card->perf_stats.outbound_cnt++;
  599. card->perf_stats.outbound_start_time = qeth_get_micros();
  600. }
  601. netif_stop_queue(dev);
  602. if (card->info.type == QETH_CARD_TYPE_OSN)
  603. hdr = (struct qeth_hdr *)skb->data;
  604. else {
  605. if (card->info.type == QETH_CARD_TYPE_IQD) {
  606. new_skb = skb;
  607. data_offset = ETH_HLEN;
  608. hd_len = ETH_HLEN;
  609. hdr = kmem_cache_alloc(qeth_core_header_cache,
  610. GFP_ATOMIC);
  611. if (!hdr)
  612. goto tx_drop;
  613. elements_needed++;
  614. skb_reset_mac_header(new_skb);
  615. qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
  616. hdr->hdr.l2.pkt_length = new_skb->len;
  617. memcpy(((char *)hdr) + sizeof(struct qeth_hdr),
  618. skb_mac_header(new_skb), ETH_HLEN);
  619. } else {
  620. /* create a clone with writeable headroom */
  621. new_skb = skb_realloc_headroom(skb,
  622. sizeof(struct qeth_hdr));
  623. if (!new_skb)
  624. goto tx_drop;
  625. hdr = (struct qeth_hdr *)skb_push(new_skb,
  626. sizeof(struct qeth_hdr));
  627. skb_set_mac_header(new_skb, sizeof(struct qeth_hdr));
  628. qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
  629. }
  630. }
  631. elements = qeth_get_elements_no(card, (void *)hdr, new_skb,
  632. elements_needed);
  633. if (!elements) {
  634. if (data_offset >= 0)
  635. kmem_cache_free(qeth_core_header_cache, hdr);
  636. goto tx_drop;
  637. }
  638. if (card->info.type != QETH_CARD_TYPE_IQD) {
  639. if (qeth_hdr_chk_and_bounce(new_skb,
  640. sizeof(struct qeth_hdr_layer2)))
  641. goto tx_drop;
  642. rc = qeth_do_send_packet(card, queue, new_skb, hdr,
  643. elements);
  644. } else
  645. rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
  646. elements, data_offset, hd_len);
  647. if (!rc) {
  648. card->stats.tx_packets++;
  649. card->stats.tx_bytes += tx_bytes;
  650. if (new_skb != skb)
  651. dev_kfree_skb_any(skb);
  652. rc = NETDEV_TX_OK;
  653. } else {
  654. if (data_offset >= 0)
  655. kmem_cache_free(qeth_core_header_cache, hdr);
  656. if (rc == -EBUSY) {
  657. if (new_skb != skb)
  658. dev_kfree_skb_any(new_skb);
  659. return NETDEV_TX_BUSY;
  660. } else
  661. goto tx_drop;
  662. }
  663. netif_wake_queue(dev);
  664. if (card->options.performance_stats)
  665. card->perf_stats.outbound_time += qeth_get_micros() -
  666. card->perf_stats.outbound_start_time;
  667. return rc;
  668. tx_drop:
  669. card->stats.tx_dropped++;
  670. card->stats.tx_errors++;
  671. if ((new_skb != skb) && new_skb)
  672. dev_kfree_skb_any(new_skb);
  673. dev_kfree_skb_any(skb);
  674. netif_wake_queue(dev);
  675. return NETDEV_TX_OK;
  676. }
  677. static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
  678. unsigned int qdio_err, unsigned int queue,
  679. int first_element, int count, unsigned long card_ptr)
  680. {
  681. struct net_device *net_dev;
  682. struct qeth_card *card;
  683. struct qeth_qdio_buffer *buffer;
  684. int index;
  685. int i;
  686. card = (struct qeth_card *) card_ptr;
  687. net_dev = card->dev;
  688. if (card->options.performance_stats) {
  689. card->perf_stats.inbound_cnt++;
  690. card->perf_stats.inbound_start_time = qeth_get_micros();
  691. }
  692. if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
  693. QETH_CARD_TEXT(card, 1, "qdinchk");
  694. QETH_CARD_TEXT_(card, 1, "%04X%04X", first_element,
  695. count);
  696. QETH_CARD_TEXT_(card, 1, "%04X", queue);
  697. qeth_schedule_recovery(card);
  698. return;
  699. }
  700. for (i = first_element; i < (first_element + count); ++i) {
  701. index = i % QDIO_MAX_BUFFERS_PER_Q;
  702. buffer = &card->qdio.in_q->bufs[index];
  703. if (!(qdio_err &&
  704. qeth_check_qdio_errors(card, buffer->buffer, qdio_err,
  705. "qinerr")))
  706. qeth_l2_process_inbound_buffer(card, buffer, index);
  707. /* clear buffer and give back to hardware */
  708. qeth_put_buffer_pool_entry(card, buffer->pool_entry);
  709. qeth_queue_input_buffer(card, index);
  710. }
  711. if (card->options.performance_stats)
  712. card->perf_stats.inbound_time += qeth_get_micros() -
  713. card->perf_stats.inbound_start_time;
  714. }
  715. static int qeth_l2_open(struct net_device *dev)
  716. {
  717. struct qeth_card *card = dev->ml_priv;
  718. QETH_CARD_TEXT(card, 4, "qethopen");
  719. if (card->state != CARD_STATE_SOFTSETUP)
  720. return -ENODEV;
  721. if ((card->info.type != QETH_CARD_TYPE_OSN) &&
  722. (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
  723. QETH_CARD_TEXT(card, 4, "nomacadr");
  724. return -EPERM;
  725. }
  726. card->data.state = CH_STATE_UP;
  727. card->state = CARD_STATE_UP;
  728. netif_start_queue(dev);
  729. if (!card->lan_online && netif_carrier_ok(dev))
  730. netif_carrier_off(dev);
  731. return 0;
  732. }
  733. static int qeth_l2_stop(struct net_device *dev)
  734. {
  735. struct qeth_card *card = dev->ml_priv;
  736. QETH_CARD_TEXT(card, 4, "qethstop");
  737. netif_tx_disable(dev);
  738. if (card->state == CARD_STATE_UP)
  739. card->state = CARD_STATE_SOFTSETUP;
  740. return 0;
  741. }
  742. static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
  743. {
  744. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  745. INIT_LIST_HEAD(&card->vid_list);
  746. INIT_LIST_HEAD(&card->mc_list);
  747. card->options.layer2 = 1;
  748. card->discipline.input_handler = (qdio_handler_t *)
  749. qeth_l2_qdio_input_handler;
  750. card->discipline.output_handler = (qdio_handler_t *)
  751. qeth_qdio_output_handler;
  752. card->discipline.recover = qeth_l2_recover;
  753. return 0;
  754. }
  755. static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
  756. {
  757. struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
  758. qeth_set_allowed_threads(card, 0, 1);
  759. wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
  760. if (cgdev->state == CCWGROUP_ONLINE) {
  761. card->use_hard_stop = 1;
  762. qeth_l2_set_offline(cgdev);
  763. }
  764. if (card->dev) {
  765. unregister_netdev(card->dev);
  766. card->dev = NULL;
  767. }
  768. qeth_l2_del_all_mc(card);
  769. return;
  770. }
  771. static const struct ethtool_ops qeth_l2_ethtool_ops = {
  772. .get_link = ethtool_op_get_link,
  773. .get_strings = qeth_core_get_strings,
  774. .get_ethtool_stats = qeth_core_get_ethtool_stats,
  775. .get_sset_count = qeth_core_get_sset_count,
  776. .get_drvinfo = qeth_core_get_drvinfo,
  777. .get_settings = qeth_core_ethtool_get_settings,
  778. };
  779. static const struct ethtool_ops qeth_l2_osn_ops = {
  780. .get_strings = qeth_core_get_strings,
  781. .get_ethtool_stats = qeth_core_get_ethtool_stats,
  782. .get_sset_count = qeth_core_get_sset_count,
  783. .get_drvinfo = qeth_core_get_drvinfo,
  784. };
  785. static const struct net_device_ops qeth_l2_netdev_ops = {
  786. .ndo_open = qeth_l2_open,
  787. .ndo_stop = qeth_l2_stop,
  788. .ndo_get_stats = qeth_get_stats,
  789. .ndo_start_xmit = qeth_l2_hard_start_xmit,
  790. .ndo_validate_addr = eth_validate_addr,
  791. .ndo_set_multicast_list = qeth_l2_set_multicast_list,
  792. .ndo_do_ioctl = qeth_l2_do_ioctl,
  793. .ndo_set_mac_address = qeth_l2_set_mac_address,
  794. .ndo_change_mtu = qeth_change_mtu,
  795. .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
  796. .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
  797. .ndo_tx_timeout = qeth_tx_timeout,
  798. };
  799. static int qeth_l2_setup_netdev(struct qeth_card *card)
  800. {
  801. switch (card->info.type) {
  802. case QETH_CARD_TYPE_IQD:
  803. card->dev = alloc_netdev(0, "hsi%d", ether_setup);
  804. break;
  805. case QETH_CARD_TYPE_OSN:
  806. card->dev = alloc_netdev(0, "osn%d", ether_setup);
  807. card->dev->flags |= IFF_NOARP;
  808. break;
  809. default:
  810. card->dev = alloc_etherdev(0);
  811. }
  812. if (!card->dev)
  813. return -ENODEV;
  814. card->dev->ml_priv = card;
  815. card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
  816. card->dev->mtu = card->info.initial_mtu;
  817. card->dev->netdev_ops = &qeth_l2_netdev_ops;
  818. if (card->info.type != QETH_CARD_TYPE_OSN)
  819. SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
  820. else
  821. SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
  822. card->dev->features |= NETIF_F_HW_VLAN_FILTER;
  823. card->info.broadcast_capable = 1;
  824. qeth_l2_request_initial_mac(card);
  825. SET_NETDEV_DEV(card->dev, &card->gdev->dev);
  826. return register_netdev(card->dev);
  827. }
  828. static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
  829. {
  830. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  831. int rc = 0;
  832. enum qeth_card_states recover_flag;
  833. BUG_ON(!card);
  834. mutex_lock(&card->discipline_mutex);
  835. mutex_lock(&card->conf_mutex);
  836. QETH_DBF_TEXT(SETUP, 2, "setonlin");
  837. QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
  838. recover_flag = card->state;
  839. rc = qeth_core_hardsetup_card(card);
  840. if (rc) {
  841. QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
  842. rc = -ENODEV;
  843. goto out_remove;
  844. }
  845. if (!card->dev && qeth_l2_setup_netdev(card)) {
  846. rc = -ENODEV;
  847. goto out_remove;
  848. }
  849. if (card->info.type != QETH_CARD_TYPE_OSN)
  850. qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
  851. card->state = CARD_STATE_HARDSETUP;
  852. qeth_print_status_message(card);
  853. /* softsetup */
  854. QETH_DBF_TEXT(SETUP, 2, "softsetp");
  855. rc = qeth_send_startlan(card);
  856. if (rc) {
  857. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  858. if (rc == 0xe080) {
  859. dev_warn(&card->gdev->dev,
  860. "The LAN is offline\n");
  861. card->lan_online = 0;
  862. goto out;
  863. }
  864. rc = -ENODEV;
  865. goto out_remove;
  866. } else
  867. card->lan_online = 1;
  868. if ((card->info.type == QETH_CARD_TYPE_OSD) ||
  869. (card->info.type == QETH_CARD_TYPE_OSX))
  870. /* configure isolation level */
  871. qeth_set_access_ctrl_online(card);
  872. if (card->info.type != QETH_CARD_TYPE_OSN &&
  873. card->info.type != QETH_CARD_TYPE_OSM)
  874. qeth_l2_process_vlans(card, 0);
  875. netif_tx_disable(card->dev);
  876. rc = qeth_init_qdio_queues(card);
  877. if (rc) {
  878. QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
  879. rc = -ENODEV;
  880. goto out_remove;
  881. }
  882. card->state = CARD_STATE_SOFTSETUP;
  883. netif_carrier_on(card->dev);
  884. qeth_set_allowed_threads(card, 0xffffffff, 0);
  885. if (recover_flag == CARD_STATE_RECOVER) {
  886. if (recovery_mode &&
  887. card->info.type != QETH_CARD_TYPE_OSN) {
  888. qeth_l2_open(card->dev);
  889. } else {
  890. rtnl_lock();
  891. dev_open(card->dev);
  892. rtnl_unlock();
  893. }
  894. /* this also sets saved unicast addresses */
  895. qeth_l2_set_multicast_list(card->dev);
  896. }
  897. /* let user_space know that device is online */
  898. kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
  899. out:
  900. mutex_unlock(&card->conf_mutex);
  901. mutex_unlock(&card->discipline_mutex);
  902. return 0;
  903. out_remove:
  904. card->use_hard_stop = 1;
  905. qeth_l2_stop_card(card, 0);
  906. ccw_device_set_offline(CARD_DDEV(card));
  907. ccw_device_set_offline(CARD_WDEV(card));
  908. ccw_device_set_offline(CARD_RDEV(card));
  909. if (recover_flag == CARD_STATE_RECOVER)
  910. card->state = CARD_STATE_RECOVER;
  911. else
  912. card->state = CARD_STATE_DOWN;
  913. mutex_unlock(&card->conf_mutex);
  914. mutex_unlock(&card->discipline_mutex);
  915. return rc;
  916. }
  917. static int qeth_l2_set_online(struct ccwgroup_device *gdev)
  918. {
  919. return __qeth_l2_set_online(gdev, 0);
  920. }
  921. static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
  922. int recovery_mode)
  923. {
  924. struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
  925. int rc = 0, rc2 = 0, rc3 = 0;
  926. enum qeth_card_states recover_flag;
  927. mutex_lock(&card->discipline_mutex);
  928. mutex_lock(&card->conf_mutex);
  929. QETH_DBF_TEXT(SETUP, 3, "setoffl");
  930. QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
  931. if (card->dev && netif_carrier_ok(card->dev))
  932. netif_carrier_off(card->dev);
  933. recover_flag = card->state;
  934. qeth_l2_stop_card(card, recovery_mode);
  935. rc = ccw_device_set_offline(CARD_DDEV(card));
  936. rc2 = ccw_device_set_offline(CARD_WDEV(card));
  937. rc3 = ccw_device_set_offline(CARD_RDEV(card));
  938. if (!rc)
  939. rc = (rc2) ? rc2 : rc3;
  940. if (rc)
  941. QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
  942. if (recover_flag == CARD_STATE_UP)
  943. card->state = CARD_STATE_RECOVER;
  944. /* let user_space know that device is offline */
  945. kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
  946. mutex_unlock(&card->conf_mutex);
  947. mutex_unlock(&card->discipline_mutex);
  948. return 0;
  949. }
  950. static int qeth_l2_set_offline(struct ccwgroup_device *cgdev)
  951. {
  952. return __qeth_l2_set_offline(cgdev, 0);
  953. }
  954. static int qeth_l2_recover(void *ptr)
  955. {
  956. struct qeth_card *card;
  957. int rc = 0;
  958. card = (struct qeth_card *) ptr;
  959. QETH_CARD_TEXT(card, 2, "recover1");
  960. if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
  961. return 0;
  962. QETH_CARD_TEXT(card, 2, "recover2");
  963. dev_warn(&card->gdev->dev,
  964. "A recovery process has been started for the device\n");
  965. card->use_hard_stop = 1;
  966. __qeth_l2_set_offline(card->gdev, 1);
  967. rc = __qeth_l2_set_online(card->gdev, 1);
  968. /* don't run another scheduled recovery */
  969. qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
  970. qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
  971. if (!rc)
  972. dev_info(&card->gdev->dev,
  973. "Device successfully recovered!\n");
  974. else {
  975. rtnl_lock();
  976. dev_close(card->dev);
  977. rtnl_unlock();
  978. dev_warn(&card->gdev->dev, "The qeth device driver "
  979. "failed to recover an error on the device\n");
  980. }
  981. return 0;
  982. }
  983. static int __init qeth_l2_init(void)
  984. {
  985. pr_info("register layer 2 discipline\n");
  986. return 0;
  987. }
  988. static void __exit qeth_l2_exit(void)
  989. {
  990. pr_info("unregister layer 2 discipline\n");
  991. }
  992. static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
  993. {
  994. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  995. qeth_qdio_clear_card(card, 0);
  996. qeth_clear_qdio_buffers(card);
  997. }
  998. static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
  999. {
  1000. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  1001. if (card->dev)
  1002. netif_device_detach(card->dev);
  1003. qeth_set_allowed_threads(card, 0, 1);
  1004. wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
  1005. if (gdev->state == CCWGROUP_OFFLINE)
  1006. return 0;
  1007. if (card->state == CARD_STATE_UP) {
  1008. card->use_hard_stop = 1;
  1009. __qeth_l2_set_offline(card->gdev, 1);
  1010. } else
  1011. __qeth_l2_set_offline(card->gdev, 0);
  1012. return 0;
  1013. }
  1014. static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
  1015. {
  1016. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  1017. int rc = 0;
  1018. if (gdev->state == CCWGROUP_OFFLINE)
  1019. goto out;
  1020. if (card->state == CARD_STATE_RECOVER) {
  1021. rc = __qeth_l2_set_online(card->gdev, 1);
  1022. if (rc) {
  1023. rtnl_lock();
  1024. dev_close(card->dev);
  1025. rtnl_unlock();
  1026. }
  1027. } else
  1028. rc = __qeth_l2_set_online(card->gdev, 0);
  1029. out:
  1030. qeth_set_allowed_threads(card, 0xffffffff, 0);
  1031. if (card->dev)
  1032. netif_device_attach(card->dev);
  1033. if (rc)
  1034. dev_warn(&card->gdev->dev, "The qeth device driver "
  1035. "failed to recover an error on the device\n");
  1036. return rc;
  1037. }
  1038. struct ccwgroup_driver qeth_l2_ccwgroup_driver = {
  1039. .probe = qeth_l2_probe_device,
  1040. .remove = qeth_l2_remove_device,
  1041. .set_online = qeth_l2_set_online,
  1042. .set_offline = qeth_l2_set_offline,
  1043. .shutdown = qeth_l2_shutdown,
  1044. .freeze = qeth_l2_pm_suspend,
  1045. .thaw = qeth_l2_pm_resume,
  1046. .restore = qeth_l2_pm_resume,
  1047. };
  1048. EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver);
  1049. static int qeth_osn_send_control_data(struct qeth_card *card, int len,
  1050. struct qeth_cmd_buffer *iob)
  1051. {
  1052. unsigned long flags;
  1053. int rc = 0;
  1054. QETH_CARD_TEXT(card, 5, "osndctrd");
  1055. wait_event(card->wait_q,
  1056. atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
  1057. qeth_prepare_control_data(card, len, iob);
  1058. QETH_CARD_TEXT(card, 6, "osnoirqp");
  1059. spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
  1060. rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
  1061. (addr_t) iob, 0, 0);
  1062. spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
  1063. if (rc) {
  1064. QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
  1065. "ccw_device_start rc = %i\n", rc);
  1066. QETH_CARD_TEXT_(card, 2, " err%d", rc);
  1067. qeth_release_buffer(iob->channel, iob);
  1068. atomic_set(&card->write.irq_pending, 0);
  1069. wake_up(&card->wait_q);
  1070. }
  1071. return rc;
  1072. }
  1073. static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
  1074. struct qeth_cmd_buffer *iob, int data_len)
  1075. {
  1076. u16 s1, s2;
  1077. QETH_CARD_TEXT(card, 4, "osndipa");
  1078. qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
  1079. s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
  1080. s2 = (u16)data_len;
  1081. memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
  1082. memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
  1083. memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
  1084. memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
  1085. return qeth_osn_send_control_data(card, s1, iob);
  1086. }
  1087. int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
  1088. {
  1089. struct qeth_cmd_buffer *iob;
  1090. struct qeth_card *card;
  1091. int rc;
  1092. if (!dev)
  1093. return -ENODEV;
  1094. card = dev->ml_priv;
  1095. if (!card)
  1096. return -ENODEV;
  1097. QETH_CARD_TEXT(card, 2, "osnsdmc");
  1098. if ((card->state != CARD_STATE_UP) &&
  1099. (card->state != CARD_STATE_SOFTSETUP))
  1100. return -ENODEV;
  1101. iob = qeth_wait_for_buffer(&card->write);
  1102. memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
  1103. rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
  1104. return rc;
  1105. }
  1106. EXPORT_SYMBOL(qeth_osn_assist);
  1107. int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
  1108. int (*assist_cb)(struct net_device *, void *),
  1109. int (*data_cb)(struct sk_buff *))
  1110. {
  1111. struct qeth_card *card;
  1112. *dev = qeth_l2_netdev_by_devno(read_dev_no);
  1113. if (*dev == NULL)
  1114. return -ENODEV;
  1115. card = (*dev)->ml_priv;
  1116. if (!card)
  1117. return -ENODEV;
  1118. QETH_CARD_TEXT(card, 2, "osnreg");
  1119. if ((assist_cb == NULL) || (data_cb == NULL))
  1120. return -EINVAL;
  1121. card->osn_info.assist_cb = assist_cb;
  1122. card->osn_info.data_cb = data_cb;
  1123. return 0;
  1124. }
  1125. EXPORT_SYMBOL(qeth_osn_register);
  1126. void qeth_osn_deregister(struct net_device *dev)
  1127. {
  1128. struct qeth_card *card;
  1129. if (!dev)
  1130. return;
  1131. card = dev->ml_priv;
  1132. if (!card)
  1133. return;
  1134. QETH_CARD_TEXT(card, 2, "osndereg");
  1135. card->osn_info.assist_cb = NULL;
  1136. card->osn_info.data_cb = NULL;
  1137. return;
  1138. }
  1139. EXPORT_SYMBOL(qeth_osn_deregister);
  1140. module_init(qeth_l2_init);
  1141. module_exit(qeth_l2_exit);
  1142. MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
  1143. MODULE_DESCRIPTION("qeth layer 2 discipline");
  1144. MODULE_LICENSE("GPL");