netiucv.c 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150
  1. /*
  2. * $Id: netiucv.c,v 1.69 2006/01/12 14:33:09 cohuck Exp $
  3. *
  4. * IUCV network driver
  5. *
  6. * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
  7. * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
  8. *
  9. * Sysfs integration and all bugs therein by Cornelia Huck
  10. * (cornelia.huck@de.ibm.com)
  11. *
  12. * Documentation used:
  13. * the source of the original IUCV driver by:
  14. * Stefan Hegewald <hegewald@de.ibm.com>
  15. * Hartmut Penner <hpenner@de.ibm.com>
  16. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  17. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  18. * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
  19. *
  20. * This program is free software; you can redistribute it and/or modify
  21. * it under the terms of the GNU General Public License as published by
  22. * the Free Software Foundation; either version 2, or (at your option)
  23. * any later version.
  24. *
  25. * This program is distributed in the hope that it will be useful,
  26. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  27. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  28. * GNU General Public License for more details.
  29. *
  30. * You should have received a copy of the GNU General Public License
  31. * along with this program; if not, write to the Free Software
  32. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  33. *
  34. * RELEASE-TAG: IUCV network driver $Revision: 1.69 $
  35. *
  36. */
  37. #undef DEBUG
  38. #include <linux/module.h>
  39. #include <linux/init.h>
  40. #include <linux/kernel.h>
  41. #include <linux/slab.h>
  42. #include <linux/errno.h>
  43. #include <linux/types.h>
  44. #include <linux/interrupt.h>
  45. #include <linux/timer.h>
  46. #include <linux/sched.h>
  47. #include <linux/bitops.h>
  48. #include <linux/signal.h>
  49. #include <linux/string.h>
  50. #include <linux/device.h>
  51. #include <linux/ip.h>
  52. #include <linux/if_arp.h>
  53. #include <linux/tcp.h>
  54. #include <linux/skbuff.h>
  55. #include <linux/ctype.h>
  56. #include <net/dst.h>
  57. #include <asm/io.h>
  58. #include <asm/uaccess.h>
  59. #include "iucv.h"
  60. #include "fsm.h"
  61. MODULE_AUTHOR
  62. ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
  63. MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
  64. #define PRINTK_HEADER " iucv: " /* for debugging */
  65. static struct device_driver netiucv_driver = {
  66. .name = "netiucv",
  67. .bus = &iucv_bus,
  68. };
  69. /**
  70. * Per connection profiling data
  71. */
  72. struct connection_profile {
  73. unsigned long maxmulti;
  74. unsigned long maxcqueue;
  75. unsigned long doios_single;
  76. unsigned long doios_multi;
  77. unsigned long txlen;
  78. unsigned long tx_time;
  79. struct timespec send_stamp;
  80. unsigned long tx_pending;
  81. unsigned long tx_max_pending;
  82. };
  83. /**
  84. * Representation of one iucv connection
  85. */
  86. struct iucv_connection {
  87. struct iucv_connection *next;
  88. iucv_handle_t handle;
  89. __u16 pathid;
  90. struct sk_buff *rx_buff;
  91. struct sk_buff *tx_buff;
  92. struct sk_buff_head collect_queue;
  93. struct sk_buff_head commit_queue;
  94. spinlock_t collect_lock;
  95. int collect_len;
  96. int max_buffsize;
  97. fsm_timer timer;
  98. fsm_instance *fsm;
  99. struct net_device *netdev;
  100. struct connection_profile prof;
  101. char userid[9];
  102. };
  103. /**
  104. * Linked list of all connection structs.
  105. */
  106. static struct iucv_connection *iucv_connections;
  107. /**
  108. * Representation of event-data for the
  109. * connection state machine.
  110. */
  111. struct iucv_event {
  112. struct iucv_connection *conn;
  113. void *data;
  114. };
  115. /**
  116. * Private part of the network device structure
  117. */
  118. struct netiucv_priv {
  119. struct net_device_stats stats;
  120. unsigned long tbusy;
  121. fsm_instance *fsm;
  122. struct iucv_connection *conn;
  123. struct device *dev;
  124. };
  125. /**
  126. * Link level header for a packet.
  127. */
  128. typedef struct ll_header_t {
  129. __u16 next;
  130. } ll_header;
  131. #define NETIUCV_HDRLEN (sizeof(ll_header))
  132. #define NETIUCV_BUFSIZE_MAX 32768
  133. #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
  134. #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
  135. #define NETIUCV_MTU_DEFAULT 9216
  136. #define NETIUCV_QUEUELEN_DEFAULT 50
  137. #define NETIUCV_TIMEOUT_5SEC 5000
  138. /**
  139. * Compatibility macros for busy handling
  140. * of network devices.
  141. */
  142. static __inline__ void netiucv_clear_busy(struct net_device *dev)
  143. {
  144. clear_bit(0, &(((struct netiucv_priv *)dev->priv)->tbusy));
  145. netif_wake_queue(dev);
  146. }
  147. static __inline__ int netiucv_test_and_set_busy(struct net_device *dev)
  148. {
  149. netif_stop_queue(dev);
  150. return test_and_set_bit(0, &((struct netiucv_priv *)dev->priv)->tbusy);
  151. }
  152. static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
  153. static __u8 iucvMagic[16] = {
  154. 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
  155. 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
  156. };
  157. /**
  158. * This mask means the 16-byte IUCV "magic" and the origin userid must
  159. * match exactly as specified in order to give connection_pending()
  160. * control.
  161. */
  162. static __u8 netiucv_mask[] = {
  163. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  164. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  165. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  166. };
  167. /**
  168. * Convert an iucv userId to its printable
  169. * form (strip whitespace at end).
  170. *
  171. * @param An iucv userId
  172. *
  173. * @returns The printable string (static data!!)
  174. */
  175. static __inline__ char *
  176. netiucv_printname(char *name)
  177. {
  178. static char tmp[9];
  179. char *p = tmp;
  180. memcpy(tmp, name, 8);
  181. tmp[8] = '\0';
  182. while (*p && (!isspace(*p)))
  183. p++;
  184. *p = '\0';
  185. return tmp;
  186. }
  187. /**
  188. * States of the interface statemachine.
  189. */
  190. enum dev_states {
  191. DEV_STATE_STOPPED,
  192. DEV_STATE_STARTWAIT,
  193. DEV_STATE_STOPWAIT,
  194. DEV_STATE_RUNNING,
  195. /**
  196. * MUST be always the last element!!
  197. */
  198. NR_DEV_STATES
  199. };
  200. static const char *dev_state_names[] = {
  201. "Stopped",
  202. "StartWait",
  203. "StopWait",
  204. "Running",
  205. };
  206. /**
  207. * Events of the interface statemachine.
  208. */
  209. enum dev_events {
  210. DEV_EVENT_START,
  211. DEV_EVENT_STOP,
  212. DEV_EVENT_CONUP,
  213. DEV_EVENT_CONDOWN,
  214. /**
  215. * MUST be always the last element!!
  216. */
  217. NR_DEV_EVENTS
  218. };
  219. static const char *dev_event_names[] = {
  220. "Start",
  221. "Stop",
  222. "Connection up",
  223. "Connection down",
  224. };
  225. /**
  226. * Events of the connection statemachine
  227. */
  228. enum conn_events {
  229. /**
  230. * Events, representing callbacks from
  231. * lowlevel iucv layer)
  232. */
  233. CONN_EVENT_CONN_REQ,
  234. CONN_EVENT_CONN_ACK,
  235. CONN_EVENT_CONN_REJ,
  236. CONN_EVENT_CONN_SUS,
  237. CONN_EVENT_CONN_RES,
  238. CONN_EVENT_RX,
  239. CONN_EVENT_TXDONE,
  240. /**
  241. * Events, representing errors return codes from
  242. * calls to lowlevel iucv layer
  243. */
  244. /**
  245. * Event, representing timer expiry.
  246. */
  247. CONN_EVENT_TIMER,
  248. /**
  249. * Events, representing commands from upper levels.
  250. */
  251. CONN_EVENT_START,
  252. CONN_EVENT_STOP,
  253. /**
  254. * MUST be always the last element!!
  255. */
  256. NR_CONN_EVENTS,
  257. };
  258. static const char *conn_event_names[] = {
  259. "Remote connection request",
  260. "Remote connection acknowledge",
  261. "Remote connection reject",
  262. "Connection suspended",
  263. "Connection resumed",
  264. "Data received",
  265. "Data sent",
  266. "Timer",
  267. "Start",
  268. "Stop",
  269. };
  270. /**
  271. * States of the connection statemachine.
  272. */
  273. enum conn_states {
  274. /**
  275. * Connection not assigned to any device,
  276. * initial state, invalid
  277. */
  278. CONN_STATE_INVALID,
  279. /**
  280. * Userid assigned but not operating
  281. */
  282. CONN_STATE_STOPPED,
  283. /**
  284. * Connection registered,
  285. * no connection request sent yet,
  286. * no connection request received
  287. */
  288. CONN_STATE_STARTWAIT,
  289. /**
  290. * Connection registered and connection request sent,
  291. * no acknowledge and no connection request received yet.
  292. */
  293. CONN_STATE_SETUPWAIT,
  294. /**
  295. * Connection up and running idle
  296. */
  297. CONN_STATE_IDLE,
  298. /**
  299. * Data sent, awaiting CONN_EVENT_TXDONE
  300. */
  301. CONN_STATE_TX,
  302. /**
  303. * Error during registration.
  304. */
  305. CONN_STATE_REGERR,
  306. /**
  307. * Error during registration.
  308. */
  309. CONN_STATE_CONNERR,
  310. /**
  311. * MUST be always the last element!!
  312. */
  313. NR_CONN_STATES,
  314. };
  315. static const char *conn_state_names[] = {
  316. "Invalid",
  317. "Stopped",
  318. "StartWait",
  319. "SetupWait",
  320. "Idle",
  321. "TX",
  322. "Terminating",
  323. "Registration error",
  324. "Connect error",
  325. };
  326. /**
  327. * Debug Facility Stuff
  328. */
  329. static debug_info_t *iucv_dbf_setup = NULL;
  330. static debug_info_t *iucv_dbf_data = NULL;
  331. static debug_info_t *iucv_dbf_trace = NULL;
  332. DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
  333. static void
  334. iucv_unregister_dbf_views(void)
  335. {
  336. if (iucv_dbf_setup)
  337. debug_unregister(iucv_dbf_setup);
  338. if (iucv_dbf_data)
  339. debug_unregister(iucv_dbf_data);
  340. if (iucv_dbf_trace)
  341. debug_unregister(iucv_dbf_trace);
  342. }
  343. static int
  344. iucv_register_dbf_views(void)
  345. {
  346. iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
  347. IUCV_DBF_SETUP_PAGES,
  348. IUCV_DBF_SETUP_NR_AREAS,
  349. IUCV_DBF_SETUP_LEN);
  350. iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
  351. IUCV_DBF_DATA_PAGES,
  352. IUCV_DBF_DATA_NR_AREAS,
  353. IUCV_DBF_DATA_LEN);
  354. iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
  355. IUCV_DBF_TRACE_PAGES,
  356. IUCV_DBF_TRACE_NR_AREAS,
  357. IUCV_DBF_TRACE_LEN);
  358. if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
  359. (iucv_dbf_trace == NULL)) {
  360. iucv_unregister_dbf_views();
  361. return -ENOMEM;
  362. }
  363. debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
  364. debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
  365. debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
  366. debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
  367. debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
  368. debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
  369. return 0;
  370. }
  371. /**
  372. * Callback-wrappers, called from lowlevel iucv layer.
  373. *****************************************************************************/
  374. static void
  375. netiucv_callback_rx(iucv_MessagePending *eib, void *pgm_data)
  376. {
  377. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  378. struct iucv_event ev;
  379. ev.conn = conn;
  380. ev.data = (void *)eib;
  381. fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
  382. }
  383. static void
  384. netiucv_callback_txdone(iucv_MessageComplete *eib, void *pgm_data)
  385. {
  386. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  387. struct iucv_event ev;
  388. ev.conn = conn;
  389. ev.data = (void *)eib;
  390. fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
  391. }
  392. static void
  393. netiucv_callback_connack(iucv_ConnectionComplete *eib, void *pgm_data)
  394. {
  395. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  396. struct iucv_event ev;
  397. ev.conn = conn;
  398. ev.data = (void *)eib;
  399. fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev);
  400. }
  401. static void
  402. netiucv_callback_connreq(iucv_ConnectionPending *eib, void *pgm_data)
  403. {
  404. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  405. struct iucv_event ev;
  406. ev.conn = conn;
  407. ev.data = (void *)eib;
  408. fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
  409. }
  410. static void
  411. netiucv_callback_connrej(iucv_ConnectionSevered *eib, void *pgm_data)
  412. {
  413. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  414. struct iucv_event ev;
  415. ev.conn = conn;
  416. ev.data = (void *)eib;
  417. fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, &ev);
  418. }
  419. static void
  420. netiucv_callback_connsusp(iucv_ConnectionQuiesced *eib, void *pgm_data)
  421. {
  422. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  423. struct iucv_event ev;
  424. ev.conn = conn;
  425. ev.data = (void *)eib;
  426. fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, &ev);
  427. }
  428. static void
  429. netiucv_callback_connres(iucv_ConnectionResumed *eib, void *pgm_data)
  430. {
  431. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  432. struct iucv_event ev;
  433. ev.conn = conn;
  434. ev.data = (void *)eib;
  435. fsm_event(conn->fsm, CONN_EVENT_CONN_RES, &ev);
  436. }
  437. static iucv_interrupt_ops_t netiucv_ops = {
  438. .ConnectionPending = netiucv_callback_connreq,
  439. .ConnectionComplete = netiucv_callback_connack,
  440. .ConnectionSevered = netiucv_callback_connrej,
  441. .ConnectionQuiesced = netiucv_callback_connsusp,
  442. .ConnectionResumed = netiucv_callback_connres,
  443. .MessagePending = netiucv_callback_rx,
  444. .MessageComplete = netiucv_callback_txdone
  445. };
  446. /**
  447. * Dummy NOP action for all statemachines
  448. */
  449. static void
  450. fsm_action_nop(fsm_instance *fi, int event, void *arg)
  451. {
  452. }
  453. /**
  454. * Actions of the connection statemachine
  455. *****************************************************************************/
  456. /**
  457. * Helper function for conn_action_rx()
  458. * Unpack a just received skb and hand it over to
  459. * upper layers.
  460. *
  461. * @param conn The connection where this skb has been received.
  462. * @param pskb The received skb.
  463. */
  464. //static __inline__ void
  465. static void
  466. netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
  467. {
  468. struct net_device *dev = conn->netdev;
  469. struct netiucv_priv *privptr = dev->priv;
  470. __u16 offset = 0;
  471. skb_put(pskb, NETIUCV_HDRLEN);
  472. pskb->dev = dev;
  473. pskb->ip_summed = CHECKSUM_NONE;
  474. pskb->protocol = ntohs(ETH_P_IP);
  475. while (1) {
  476. struct sk_buff *skb;
  477. ll_header *header = (ll_header *)pskb->data;
  478. if (!header->next)
  479. break;
  480. skb_pull(pskb, NETIUCV_HDRLEN);
  481. header->next -= offset;
  482. offset += header->next;
  483. header->next -= NETIUCV_HDRLEN;
  484. if (skb_tailroom(pskb) < header->next) {
  485. PRINT_WARN("%s: Illegal next field in iucv header: "
  486. "%d > %d\n",
  487. dev->name, header->next, skb_tailroom(pskb));
  488. IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
  489. header->next, skb_tailroom(pskb));
  490. return;
  491. }
  492. skb_put(pskb, header->next);
  493. pskb->mac.raw = pskb->data;
  494. skb = dev_alloc_skb(pskb->len);
  495. if (!skb) {
  496. PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
  497. dev->name);
  498. IUCV_DBF_TEXT(data, 2,
  499. "Out of memory in netiucv_unpack_skb\n");
  500. privptr->stats.rx_dropped++;
  501. return;
  502. }
  503. memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
  504. skb->mac.raw = skb->data;
  505. skb->dev = pskb->dev;
  506. skb->protocol = pskb->protocol;
  507. pskb->ip_summed = CHECKSUM_UNNECESSARY;
  508. /*
  509. * Since receiving is always initiated from a tasklet (in iucv.c),
  510. * we must use netif_rx_ni() instead of netif_rx()
  511. */
  512. netif_rx_ni(skb);
  513. dev->last_rx = jiffies;
  514. privptr->stats.rx_packets++;
  515. privptr->stats.rx_bytes += skb->len;
  516. skb_pull(pskb, header->next);
  517. skb_put(pskb, NETIUCV_HDRLEN);
  518. }
  519. }
  520. static void
  521. conn_action_rx(fsm_instance *fi, int event, void *arg)
  522. {
  523. struct iucv_event *ev = (struct iucv_event *)arg;
  524. struct iucv_connection *conn = ev->conn;
  525. iucv_MessagePending *eib = (iucv_MessagePending *)ev->data;
  526. struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv;
  527. __u32 msglen = eib->ln1msg2.ipbfln1f;
  528. int rc;
  529. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  530. if (!conn->netdev) {
  531. /* FRITZ: How to tell iucv LL to drop the msg? */
  532. PRINT_WARN("Received data for unlinked connection\n");
  533. IUCV_DBF_TEXT(data, 2,
  534. "Received data for unlinked connection\n");
  535. return;
  536. }
  537. if (msglen > conn->max_buffsize) {
  538. /* FRITZ: How to tell iucv LL to drop the msg? */
  539. privptr->stats.rx_dropped++;
  540. PRINT_WARN("msglen %d > max_buffsize %d\n",
  541. msglen, conn->max_buffsize);
  542. IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
  543. msglen, conn->max_buffsize);
  544. return;
  545. }
  546. conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
  547. conn->rx_buff->len = 0;
  548. rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls,
  549. conn->rx_buff->data, msglen, NULL, NULL, NULL);
  550. if (rc || msglen < 5) {
  551. privptr->stats.rx_errors++;
  552. PRINT_WARN("iucv_receive returned %08x\n", rc);
  553. IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
  554. return;
  555. }
  556. netiucv_unpack_skb(conn, conn->rx_buff);
  557. }
  558. static void
  559. conn_action_txdone(fsm_instance *fi, int event, void *arg)
  560. {
  561. struct iucv_event *ev = (struct iucv_event *)arg;
  562. struct iucv_connection *conn = ev->conn;
  563. iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data;
  564. struct netiucv_priv *privptr = NULL;
  565. /* Shut up, gcc! skb is always below 2G. */
  566. __u32 single_flag = eib->ipmsgtag;
  567. __u32 txbytes = 0;
  568. __u32 txpackets = 0;
  569. __u32 stat_maxcq = 0;
  570. struct sk_buff *skb;
  571. unsigned long saveflags;
  572. ll_header header;
  573. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  574. if (conn && conn->netdev && conn->netdev->priv)
  575. privptr = (struct netiucv_priv *)conn->netdev->priv;
  576. conn->prof.tx_pending--;
  577. if (single_flag) {
  578. if ((skb = skb_dequeue(&conn->commit_queue))) {
  579. atomic_dec(&skb->users);
  580. dev_kfree_skb_any(skb);
  581. if (privptr) {
  582. privptr->stats.tx_packets++;
  583. privptr->stats.tx_bytes +=
  584. (skb->len - NETIUCV_HDRLEN
  585. - NETIUCV_HDRLEN);
  586. }
  587. }
  588. }
  589. conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head;
  590. conn->tx_buff->len = 0;
  591. spin_lock_irqsave(&conn->collect_lock, saveflags);
  592. while ((skb = skb_dequeue(&conn->collect_queue))) {
  593. header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
  594. memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
  595. NETIUCV_HDRLEN);
  596. memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len);
  597. txbytes += skb->len;
  598. txpackets++;
  599. stat_maxcq++;
  600. atomic_dec(&skb->users);
  601. dev_kfree_skb_any(skb);
  602. }
  603. if (conn->collect_len > conn->prof.maxmulti)
  604. conn->prof.maxmulti = conn->collect_len;
  605. conn->collect_len = 0;
  606. spin_unlock_irqrestore(&conn->collect_lock, saveflags);
  607. if (conn->tx_buff->len) {
  608. int rc;
  609. header.next = 0;
  610. memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
  611. NETIUCV_HDRLEN);
  612. conn->prof.send_stamp = xtime;
  613. rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0,
  614. conn->tx_buff->data, conn->tx_buff->len);
  615. conn->prof.doios_multi++;
  616. conn->prof.txlen += conn->tx_buff->len;
  617. conn->prof.tx_pending++;
  618. if (conn->prof.tx_pending > conn->prof.tx_max_pending)
  619. conn->prof.tx_max_pending = conn->prof.tx_pending;
  620. if (rc) {
  621. conn->prof.tx_pending--;
  622. fsm_newstate(fi, CONN_STATE_IDLE);
  623. if (privptr)
  624. privptr->stats.tx_errors += txpackets;
  625. PRINT_WARN("iucv_send returned %08x\n", rc);
  626. IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
  627. } else {
  628. if (privptr) {
  629. privptr->stats.tx_packets += txpackets;
  630. privptr->stats.tx_bytes += txbytes;
  631. }
  632. if (stat_maxcq > conn->prof.maxcqueue)
  633. conn->prof.maxcqueue = stat_maxcq;
  634. }
  635. } else
  636. fsm_newstate(fi, CONN_STATE_IDLE);
  637. }
  638. static void
  639. conn_action_connaccept(fsm_instance *fi, int event, void *arg)
  640. {
  641. struct iucv_event *ev = (struct iucv_event *)arg;
  642. struct iucv_connection *conn = ev->conn;
  643. iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
  644. struct net_device *netdev = conn->netdev;
  645. struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
  646. int rc;
  647. __u16 msglimit;
  648. __u8 udata[16];
  649. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  650. rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0,
  651. conn->handle, conn, NULL, &msglimit);
  652. if (rc) {
  653. PRINT_WARN("%s: IUCV accept failed with error %d\n",
  654. netdev->name, rc);
  655. IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
  656. return;
  657. }
  658. fsm_newstate(fi, CONN_STATE_IDLE);
  659. conn->pathid = eib->ippathid;
  660. netdev->tx_queue_len = msglimit;
  661. fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
  662. }
  663. static void
  664. conn_action_connreject(fsm_instance *fi, int event, void *arg)
  665. {
  666. struct iucv_event *ev = (struct iucv_event *)arg;
  667. struct iucv_connection *conn = ev->conn;
  668. struct net_device *netdev = conn->netdev;
  669. iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
  670. __u8 udata[16];
  671. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  672. iucv_sever(eib->ippathid, udata);
  673. if (eib->ippathid != conn->pathid) {
  674. PRINT_INFO("%s: IR Connection Pending; "
  675. "pathid %d does not match original pathid %d\n",
  676. netdev->name, eib->ippathid, conn->pathid);
  677. IUCV_DBF_TEXT_(data, 2,
  678. "connreject: IR pathid %d, conn. pathid %d\n",
  679. eib->ippathid, conn->pathid);
  680. iucv_sever(conn->pathid, udata);
  681. }
  682. }
  683. static void
  684. conn_action_connack(fsm_instance *fi, int event, void *arg)
  685. {
  686. struct iucv_event *ev = (struct iucv_event *)arg;
  687. struct iucv_connection *conn = ev->conn;
  688. iucv_ConnectionComplete *eib = (iucv_ConnectionComplete *)ev->data;
  689. struct net_device *netdev = conn->netdev;
  690. struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
  691. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  692. fsm_deltimer(&conn->timer);
  693. fsm_newstate(fi, CONN_STATE_IDLE);
  694. if (eib->ippathid != conn->pathid) {
  695. PRINT_INFO("%s: IR Connection Complete; "
  696. "pathid %d does not match original pathid %d\n",
  697. netdev->name, eib->ippathid, conn->pathid);
  698. IUCV_DBF_TEXT_(data, 2,
  699. "connack: IR pathid %d, conn. pathid %d\n",
  700. eib->ippathid, conn->pathid);
  701. conn->pathid = eib->ippathid;
  702. }
  703. netdev->tx_queue_len = eib->ipmsglim;
  704. fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
  705. }
  706. static void
  707. conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
  708. {
  709. struct iucv_connection *conn = (struct iucv_connection *)arg;
  710. __u8 udata[16];
  711. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  712. fsm_deltimer(&conn->timer);
  713. iucv_sever(conn->pathid, udata);
  714. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  715. }
  716. static void
  717. conn_action_connsever(fsm_instance *fi, int event, void *arg)
  718. {
  719. struct iucv_event *ev = (struct iucv_event *)arg;
  720. struct iucv_connection *conn = ev->conn;
  721. struct net_device *netdev = conn->netdev;
  722. struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
  723. __u8 udata[16];
  724. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  725. fsm_deltimer(&conn->timer);
  726. iucv_sever(conn->pathid, udata);
  727. PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
  728. IUCV_DBF_TEXT(data, 2,
  729. "conn_action_connsever: Remote dropped connection\n");
  730. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  731. fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
  732. }
  733. static void
  734. conn_action_start(fsm_instance *fi, int event, void *arg)
  735. {
  736. struct iucv_event *ev = (struct iucv_event *)arg;
  737. struct iucv_connection *conn = ev->conn;
  738. __u16 msglimit;
  739. int rc;
  740. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  741. if (!conn->handle) {
  742. IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n");
  743. conn->handle =
  744. iucv_register_program(iucvMagic, conn->userid,
  745. netiucv_mask,
  746. &netiucv_ops, conn);
  747. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  748. if (!conn->handle) {
  749. fsm_newstate(fi, CONN_STATE_REGERR);
  750. conn->handle = NULL;
  751. IUCV_DBF_TEXT(setup, 2,
  752. "NULL from iucv_register_program\n");
  753. return;
  754. }
  755. PRINT_DEBUG("%s('%s'): registered successfully\n",
  756. conn->netdev->name, conn->userid);
  757. }
  758. PRINT_DEBUG("%s('%s'): connecting ...\n",
  759. conn->netdev->name, conn->userid);
  760. /* We must set the state before calling iucv_connect because the callback
  761. * handler could be called at any point after the connection request is
  762. * sent */
  763. fsm_newstate(fi, CONN_STATE_SETUPWAIT);
  764. rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic,
  765. conn->userid, iucv_host, 0, NULL, &msglimit,
  766. conn->handle, conn);
  767. switch (rc) {
  768. case 0:
  769. conn->netdev->tx_queue_len = msglimit;
  770. fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
  771. CONN_EVENT_TIMER, conn);
  772. return;
  773. case 11:
  774. PRINT_INFO("%s: User %s is currently not available.\n",
  775. conn->netdev->name,
  776. netiucv_printname(conn->userid));
  777. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  778. return;
  779. case 12:
  780. PRINT_INFO("%s: User %s is currently not ready.\n",
  781. conn->netdev->name,
  782. netiucv_printname(conn->userid));
  783. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  784. return;
  785. case 13:
  786. PRINT_WARN("%s: Too many IUCV connections.\n",
  787. conn->netdev->name);
  788. fsm_newstate(fi, CONN_STATE_CONNERR);
  789. break;
  790. case 14:
  791. PRINT_WARN(
  792. "%s: User %s has too many IUCV connections.\n",
  793. conn->netdev->name,
  794. netiucv_printname(conn->userid));
  795. fsm_newstate(fi, CONN_STATE_CONNERR);
  796. break;
  797. case 15:
  798. PRINT_WARN(
  799. "%s: No IUCV authorization in CP directory.\n",
  800. conn->netdev->name);
  801. fsm_newstate(fi, CONN_STATE_CONNERR);
  802. break;
  803. default:
  804. PRINT_WARN("%s: iucv_connect returned error %d\n",
  805. conn->netdev->name, rc);
  806. fsm_newstate(fi, CONN_STATE_CONNERR);
  807. break;
  808. }
  809. IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
  810. IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
  811. iucv_unregister_program(conn->handle);
  812. conn->handle = NULL;
  813. }
  814. static void
  815. netiucv_purge_skb_queue(struct sk_buff_head *q)
  816. {
  817. struct sk_buff *skb;
  818. while ((skb = skb_dequeue(q))) {
  819. atomic_dec(&skb->users);
  820. dev_kfree_skb_any(skb);
  821. }
  822. }
  823. static void
  824. conn_action_stop(fsm_instance *fi, int event, void *arg)
  825. {
  826. struct iucv_event *ev = (struct iucv_event *)arg;
  827. struct iucv_connection *conn = ev->conn;
  828. struct net_device *netdev = conn->netdev;
  829. struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
  830. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  831. fsm_deltimer(&conn->timer);
  832. fsm_newstate(fi, CONN_STATE_STOPPED);
  833. netiucv_purge_skb_queue(&conn->collect_queue);
  834. if (conn->handle)
  835. IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
  836. iucv_unregister_program(conn->handle);
  837. conn->handle = NULL;
  838. netiucv_purge_skb_queue(&conn->commit_queue);
  839. fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
  840. }
  841. static void
  842. conn_action_inval(fsm_instance *fi, int event, void *arg)
  843. {
  844. struct iucv_event *ev = (struct iucv_event *)arg;
  845. struct iucv_connection *conn = ev->conn;
  846. struct net_device *netdev = conn->netdev;
  847. PRINT_WARN("%s: Cannot connect without username\n",
  848. netdev->name);
  849. IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
  850. }
  851. static const fsm_node conn_fsm[] = {
  852. { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
  853. { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
  854. { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
  855. { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
  856. { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
  857. { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
  858. { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
  859. { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
  860. { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
  861. { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
  862. { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
  863. { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
  864. { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
  865. { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
  866. { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
  867. { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
  868. { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
  869. { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
  870. { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
  871. { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
  872. { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
  873. { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
  874. { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
  875. };
  876. static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
  877. /**
  878. * Actions for interface - statemachine.
  879. *****************************************************************************/
  880. /**
  881. * Startup connection by sending CONN_EVENT_START to it.
  882. *
  883. * @param fi An instance of an interface statemachine.
  884. * @param event The event, just happened.
  885. * @param arg Generic pointer, casted from struct net_device * upon call.
  886. */
  887. static void
  888. dev_action_start(fsm_instance *fi, int event, void *arg)
  889. {
  890. struct net_device *dev = (struct net_device *)arg;
  891. struct netiucv_priv *privptr = dev->priv;
  892. struct iucv_event ev;
  893. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  894. ev.conn = privptr->conn;
  895. fsm_newstate(fi, DEV_STATE_STARTWAIT);
  896. fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev);
  897. }
  898. /**
  899. * Shutdown connection by sending CONN_EVENT_STOP to it.
  900. *
  901. * @param fi An instance of an interface statemachine.
  902. * @param event The event, just happened.
  903. * @param arg Generic pointer, casted from struct net_device * upon call.
  904. */
  905. static void
  906. dev_action_stop(fsm_instance *fi, int event, void *arg)
  907. {
  908. struct net_device *dev = (struct net_device *)arg;
  909. struct netiucv_priv *privptr = dev->priv;
  910. struct iucv_event ev;
  911. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  912. ev.conn = privptr->conn;
  913. fsm_newstate(fi, DEV_STATE_STOPWAIT);
  914. fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
  915. }
  916. /**
  917. * Called from connection statemachine
  918. * when a connection is up and running.
  919. *
  920. * @param fi An instance of an interface statemachine.
  921. * @param event The event, just happened.
  922. * @param arg Generic pointer, casted from struct net_device * upon call.
  923. */
  924. static void
  925. dev_action_connup(fsm_instance *fi, int event, void *arg)
  926. {
  927. struct net_device *dev = (struct net_device *)arg;
  928. struct netiucv_priv *privptr = dev->priv;
  929. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  930. switch (fsm_getstate(fi)) {
  931. case DEV_STATE_STARTWAIT:
  932. fsm_newstate(fi, DEV_STATE_RUNNING);
  933. PRINT_INFO("%s: connected with remote side %s\n",
  934. dev->name, privptr->conn->userid);
  935. IUCV_DBF_TEXT(setup, 3,
  936. "connection is up and running\n");
  937. break;
  938. case DEV_STATE_STOPWAIT:
  939. PRINT_INFO(
  940. "%s: got connection UP event during shutdown!\n",
  941. dev->name);
  942. IUCV_DBF_TEXT(data, 2,
  943. "dev_action_connup: in DEV_STATE_STOPWAIT\n");
  944. break;
  945. }
  946. }
  947. /**
  948. * Called from connection statemachine
  949. * when a connection has been shutdown.
  950. *
  951. * @param fi An instance of an interface statemachine.
  952. * @param event The event, just happened.
  953. * @param arg Generic pointer, casted from struct net_device * upon call.
  954. */
  955. static void
  956. dev_action_conndown(fsm_instance *fi, int event, void *arg)
  957. {
  958. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  959. switch (fsm_getstate(fi)) {
  960. case DEV_STATE_RUNNING:
  961. fsm_newstate(fi, DEV_STATE_STARTWAIT);
  962. break;
  963. case DEV_STATE_STOPWAIT:
  964. fsm_newstate(fi, DEV_STATE_STOPPED);
  965. IUCV_DBF_TEXT(setup, 3, "connection is down\n");
  966. break;
  967. }
  968. }
  969. static const fsm_node dev_fsm[] = {
  970. { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
  971. { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
  972. { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
  973. { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
  974. { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
  975. { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
  976. { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
  977. { DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop },
  978. };
  979. static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
  980. /**
  981. * Transmit a packet.
  982. * This is a helper function for netiucv_tx().
  983. *
  984. * @param conn Connection to be used for sending.
  985. * @param skb Pointer to struct sk_buff of packet to send.
  986. * The linklevel header has already been set up
  987. * by netiucv_tx().
  988. *
  989. * @return 0 on success, -ERRNO on failure. (Never fails.)
  990. */
  991. static int
  992. netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
  993. unsigned long saveflags;
  994. ll_header header;
  995. int rc = 0;
  996. if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
  997. int l = skb->len + NETIUCV_HDRLEN;
  998. spin_lock_irqsave(&conn->collect_lock, saveflags);
  999. if (conn->collect_len + l >
  1000. (conn->max_buffsize - NETIUCV_HDRLEN)) {
  1001. rc = -EBUSY;
  1002. IUCV_DBF_TEXT(data, 2,
  1003. "EBUSY from netiucv_transmit_skb\n");
  1004. } else {
  1005. atomic_inc(&skb->users);
  1006. skb_queue_tail(&conn->collect_queue, skb);
  1007. conn->collect_len += l;
  1008. }
  1009. spin_unlock_irqrestore(&conn->collect_lock, saveflags);
  1010. } else {
  1011. struct sk_buff *nskb = skb;
  1012. /**
  1013. * Copy the skb to a new allocated skb in lowmem only if the
  1014. * data is located above 2G in memory or tailroom is < 2.
  1015. */
  1016. unsigned long hi =
  1017. ((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31;
  1018. int copied = 0;
  1019. if (hi || (skb_tailroom(skb) < 2)) {
  1020. nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
  1021. NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
  1022. if (!nskb) {
  1023. PRINT_WARN("%s: Could not allocate tx_skb\n",
  1024. conn->netdev->name);
  1025. IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
  1026. rc = -ENOMEM;
  1027. return rc;
  1028. } else {
  1029. skb_reserve(nskb, NETIUCV_HDRLEN);
  1030. memcpy(skb_put(nskb, skb->len),
  1031. skb->data, skb->len);
  1032. }
  1033. copied = 1;
  1034. }
  1035. /**
  1036. * skb now is below 2G and has enough room. Add headers.
  1037. */
  1038. header.next = nskb->len + NETIUCV_HDRLEN;
  1039. memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
  1040. header.next = 0;
  1041. memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
  1042. fsm_newstate(conn->fsm, CONN_STATE_TX);
  1043. conn->prof.send_stamp = xtime;
  1044. rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */,
  1045. 0, nskb->data, nskb->len);
  1046. /* Shut up, gcc! nskb is always below 2G. */
  1047. conn->prof.doios_single++;
  1048. conn->prof.txlen += skb->len;
  1049. conn->prof.tx_pending++;
  1050. if (conn->prof.tx_pending > conn->prof.tx_max_pending)
  1051. conn->prof.tx_max_pending = conn->prof.tx_pending;
  1052. if (rc) {
  1053. struct netiucv_priv *privptr;
  1054. fsm_newstate(conn->fsm, CONN_STATE_IDLE);
  1055. conn->prof.tx_pending--;
  1056. privptr = (struct netiucv_priv *)conn->netdev->priv;
  1057. if (privptr)
  1058. privptr->stats.tx_errors++;
  1059. if (copied)
  1060. dev_kfree_skb(nskb);
  1061. else {
  1062. /**
  1063. * Remove our headers. They get added
  1064. * again on retransmit.
  1065. */
  1066. skb_pull(skb, NETIUCV_HDRLEN);
  1067. skb_trim(skb, skb->len - NETIUCV_HDRLEN);
  1068. }
  1069. PRINT_WARN("iucv_send returned %08x\n", rc);
  1070. IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
  1071. } else {
  1072. if (copied)
  1073. dev_kfree_skb(skb);
  1074. atomic_inc(&nskb->users);
  1075. skb_queue_tail(&conn->commit_queue, nskb);
  1076. }
  1077. }
  1078. return rc;
  1079. }
  1080. /**
  1081. * Interface API for upper network layers
  1082. *****************************************************************************/
  1083. /**
  1084. * Open an interface.
  1085. * Called from generic network layer when ifconfig up is run.
  1086. *
  1087. * @param dev Pointer to interface struct.
  1088. *
  1089. * @return 0 on success, -ERRNO on failure. (Never fails.)
  1090. */
  1091. static int
  1092. netiucv_open(struct net_device *dev) {
  1093. fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev);
  1094. return 0;
  1095. }
  1096. /**
  1097. * Close an interface.
  1098. * Called from generic network layer when ifconfig down is run.
  1099. *
  1100. * @param dev Pointer to interface struct.
  1101. *
  1102. * @return 0 on success, -ERRNO on failure. (Never fails.)
  1103. */
  1104. static int
  1105. netiucv_close(struct net_device *dev) {
  1106. fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev);
  1107. return 0;
  1108. }
  1109. /**
  1110. * Start transmission of a packet.
  1111. * Called from generic network device layer.
  1112. *
  1113. * @param skb Pointer to buffer containing the packet.
  1114. * @param dev Pointer to interface struct.
  1115. *
  1116. * @return 0 if packet consumed, !0 if packet rejected.
  1117. * Note: If we return !0, then the packet is free'd by
  1118. * the generic network layer.
  1119. */
  1120. static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
  1121. {
  1122. int rc = 0;
  1123. struct netiucv_priv *privptr = dev->priv;
  1124. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1125. /**
  1126. * Some sanity checks ...
  1127. */
  1128. if (skb == NULL) {
  1129. PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
  1130. IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
  1131. privptr->stats.tx_dropped++;
  1132. return 0;
  1133. }
  1134. if (skb_headroom(skb) < NETIUCV_HDRLEN) {
  1135. PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
  1136. dev->name, NETIUCV_HDRLEN);
  1137. IUCV_DBF_TEXT(data, 2,
  1138. "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
  1139. dev_kfree_skb(skb);
  1140. privptr->stats.tx_dropped++;
  1141. return 0;
  1142. }
  1143. /**
  1144. * If connection is not running, try to restart it
  1145. * and throw away packet.
  1146. */
  1147. if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
  1148. fsm_event(privptr->fsm, DEV_EVENT_START, dev);
  1149. dev_kfree_skb(skb);
  1150. privptr->stats.tx_dropped++;
  1151. privptr->stats.tx_errors++;
  1152. privptr->stats.tx_carrier_errors++;
  1153. return 0;
  1154. }
  1155. if (netiucv_test_and_set_busy(dev)) {
  1156. IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
  1157. return -EBUSY;
  1158. }
  1159. dev->trans_start = jiffies;
  1160. if (netiucv_transmit_skb(privptr->conn, skb))
  1161. rc = 1;
  1162. netiucv_clear_busy(dev);
  1163. return rc;
  1164. }
  1165. /**
  1166. * Returns interface statistics of a device.
  1167. *
  1168. * @param dev Pointer to interface struct.
  1169. *
  1170. * @return Pointer to stats struct of this interface.
  1171. */
  1172. static struct net_device_stats *
  1173. netiucv_stats (struct net_device * dev)
  1174. {
  1175. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1176. return &((struct netiucv_priv *)dev->priv)->stats;
  1177. }
  1178. /**
  1179. * Sets MTU of an interface.
  1180. *
  1181. * @param dev Pointer to interface struct.
  1182. * @param new_mtu The new MTU to use for this interface.
  1183. *
  1184. * @return 0 on success, -EINVAL if MTU is out of valid range.
  1185. * (valid range is 576 .. NETIUCV_MTU_MAX).
  1186. */
  1187. static int
  1188. netiucv_change_mtu (struct net_device * dev, int new_mtu)
  1189. {
  1190. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1191. if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) {
  1192. IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
  1193. return -EINVAL;
  1194. }
  1195. dev->mtu = new_mtu;
  1196. return 0;
  1197. }
  1198. /**
  1199. * attributes in sysfs
  1200. *****************************************************************************/
  1201. static ssize_t
  1202. user_show (struct device *dev, struct device_attribute *attr, char *buf)
  1203. {
  1204. struct netiucv_priv *priv = dev->driver_data;
  1205. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1206. return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
  1207. }
  1208. static ssize_t
  1209. user_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1210. {
  1211. struct netiucv_priv *priv = dev->driver_data;
  1212. struct net_device *ndev = priv->conn->netdev;
  1213. char *p;
  1214. char *tmp;
  1215. char username[10];
  1216. int i;
  1217. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1218. if (count>9) {
  1219. PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
  1220. IUCV_DBF_TEXT_(setup, 2,
  1221. "%d is length of username\n", (int)count);
  1222. return -EINVAL;
  1223. }
  1224. tmp = strsep((char **) &buf, "\n");
  1225. for (i=0, p=tmp; i<8 && *p; i++, p++) {
  1226. if (isalnum(*p) || (*p == '$'))
  1227. username[i]= *p;
  1228. else if (*p == '\n') {
  1229. /* trailing lf, grr */
  1230. break;
  1231. } else {
  1232. PRINT_WARN("netiucv: Invalid char %c in username!\n",
  1233. *p);
  1234. IUCV_DBF_TEXT_(setup, 2,
  1235. "username: invalid character %c\n",
  1236. *p);
  1237. return -EINVAL;
  1238. }
  1239. }
  1240. while (i<9)
  1241. username[i++] = ' ';
  1242. username[9] = '\0';
  1243. if (memcmp(username, priv->conn->userid, 8)) {
  1244. /* username changed */
  1245. if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
  1246. PRINT_WARN(
  1247. "netiucv: device %s active, connected to %s\n",
  1248. dev->bus_id, priv->conn->userid);
  1249. PRINT_WARN("netiucv: user cannot be updated\n");
  1250. IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
  1251. return -EBUSY;
  1252. }
  1253. }
  1254. memcpy(priv->conn->userid, username, 9);
  1255. return count;
  1256. }
  1257. static DEVICE_ATTR(user, 0644, user_show, user_write);
  1258. static ssize_t
  1259. buffer_show (struct device *dev, struct device_attribute *attr, char *buf)
  1260. {
  1261. struct netiucv_priv *priv = dev->driver_data;
  1262. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1263. return sprintf(buf, "%d\n", priv->conn->max_buffsize);
  1264. }
  1265. static ssize_t
  1266. buffer_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1267. {
  1268. struct netiucv_priv *priv = dev->driver_data;
  1269. struct net_device *ndev = priv->conn->netdev;
  1270. char *e;
  1271. int bs1;
  1272. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1273. if (count >= 39)
  1274. return -EINVAL;
  1275. bs1 = simple_strtoul(buf, &e, 0);
  1276. if (e && (!isspace(*e))) {
  1277. PRINT_WARN("netiucv: Invalid character in buffer!\n");
  1278. IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
  1279. return -EINVAL;
  1280. }
  1281. if (bs1 > NETIUCV_BUFSIZE_MAX) {
  1282. PRINT_WARN("netiucv: Given buffer size %d too large.\n",
  1283. bs1);
  1284. IUCV_DBF_TEXT_(setup, 2,
  1285. "buffer_write: buffer size %d too large\n",
  1286. bs1);
  1287. return -EINVAL;
  1288. }
  1289. if ((ndev->flags & IFF_RUNNING) &&
  1290. (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
  1291. PRINT_WARN("netiucv: Given buffer size %d too small.\n",
  1292. bs1);
  1293. IUCV_DBF_TEXT_(setup, 2,
  1294. "buffer_write: buffer size %d too small\n",
  1295. bs1);
  1296. return -EINVAL;
  1297. }
  1298. if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
  1299. PRINT_WARN("netiucv: Given buffer size %d too small.\n",
  1300. bs1);
  1301. IUCV_DBF_TEXT_(setup, 2,
  1302. "buffer_write: buffer size %d too small\n",
  1303. bs1);
  1304. return -EINVAL;
  1305. }
  1306. priv->conn->max_buffsize = bs1;
  1307. if (!(ndev->flags & IFF_RUNNING))
  1308. ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
  1309. return count;
  1310. }
  1311. static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
  1312. static ssize_t
  1313. dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
  1314. {
  1315. struct netiucv_priv *priv = dev->driver_data;
  1316. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1317. return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
  1318. }
  1319. static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
  1320. static ssize_t
  1321. conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
  1322. {
  1323. struct netiucv_priv *priv = dev->driver_data;
  1324. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1325. return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
  1326. }
  1327. static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
  1328. static ssize_t
  1329. maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf)
  1330. {
  1331. struct netiucv_priv *priv = dev->driver_data;
  1332. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1333. return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
  1334. }
  1335. static ssize_t
  1336. maxmulti_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1337. {
  1338. struct netiucv_priv *priv = dev->driver_data;
  1339. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1340. priv->conn->prof.maxmulti = 0;
  1341. return count;
  1342. }
  1343. static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
  1344. static ssize_t
  1345. maxcq_show (struct device *dev, struct device_attribute *attr, char *buf)
  1346. {
  1347. struct netiucv_priv *priv = dev->driver_data;
  1348. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1349. return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
  1350. }
  1351. static ssize_t
  1352. maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1353. {
  1354. struct netiucv_priv *priv = dev->driver_data;
  1355. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1356. priv->conn->prof.maxcqueue = 0;
  1357. return count;
  1358. }
  1359. static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
  1360. static ssize_t
  1361. sdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
  1362. {
  1363. struct netiucv_priv *priv = dev->driver_data;
  1364. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1365. return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
  1366. }
  1367. static ssize_t
  1368. sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1369. {
  1370. struct netiucv_priv *priv = dev->driver_data;
  1371. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1372. priv->conn->prof.doios_single = 0;
  1373. return count;
  1374. }
  1375. static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
  1376. static ssize_t
  1377. mdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
  1378. {
  1379. struct netiucv_priv *priv = dev->driver_data;
  1380. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1381. return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
  1382. }
  1383. static ssize_t
  1384. mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1385. {
  1386. struct netiucv_priv *priv = dev->driver_data;
  1387. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1388. priv->conn->prof.doios_multi = 0;
  1389. return count;
  1390. }
  1391. static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
  1392. static ssize_t
  1393. txlen_show (struct device *dev, struct device_attribute *attr, char *buf)
  1394. {
  1395. struct netiucv_priv *priv = dev->driver_data;
  1396. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1397. return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
  1398. }
  1399. static ssize_t
  1400. txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1401. {
  1402. struct netiucv_priv *priv = dev->driver_data;
  1403. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1404. priv->conn->prof.txlen = 0;
  1405. return count;
  1406. }
  1407. static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
  1408. static ssize_t
  1409. txtime_show (struct device *dev, struct device_attribute *attr, char *buf)
  1410. {
  1411. struct netiucv_priv *priv = dev->driver_data;
  1412. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1413. return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
  1414. }
  1415. static ssize_t
  1416. txtime_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1417. {
  1418. struct netiucv_priv *priv = dev->driver_data;
  1419. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1420. priv->conn->prof.tx_time = 0;
  1421. return count;
  1422. }
  1423. static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
  1424. static ssize_t
  1425. txpend_show (struct device *dev, struct device_attribute *attr, char *buf)
  1426. {
  1427. struct netiucv_priv *priv = dev->driver_data;
  1428. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1429. return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
  1430. }
  1431. static ssize_t
  1432. txpend_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1433. {
  1434. struct netiucv_priv *priv = dev->driver_data;
  1435. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1436. priv->conn->prof.tx_pending = 0;
  1437. return count;
  1438. }
  1439. static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
  1440. static ssize_t
  1441. txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf)
  1442. {
  1443. struct netiucv_priv *priv = dev->driver_data;
  1444. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1445. return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
  1446. }
  1447. static ssize_t
  1448. txmpnd_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1449. {
  1450. struct netiucv_priv *priv = dev->driver_data;
  1451. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1452. priv->conn->prof.tx_max_pending = 0;
  1453. return count;
  1454. }
  1455. static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
  1456. static struct attribute *netiucv_attrs[] = {
  1457. &dev_attr_buffer.attr,
  1458. &dev_attr_user.attr,
  1459. NULL,
  1460. };
  1461. static struct attribute_group netiucv_attr_group = {
  1462. .attrs = netiucv_attrs,
  1463. };
  1464. static struct attribute *netiucv_stat_attrs[] = {
  1465. &dev_attr_device_fsm_state.attr,
  1466. &dev_attr_connection_fsm_state.attr,
  1467. &dev_attr_max_tx_buffer_used.attr,
  1468. &dev_attr_max_chained_skbs.attr,
  1469. &dev_attr_tx_single_write_ops.attr,
  1470. &dev_attr_tx_multi_write_ops.attr,
  1471. &dev_attr_netto_bytes.attr,
  1472. &dev_attr_max_tx_io_time.attr,
  1473. &dev_attr_tx_pending.attr,
  1474. &dev_attr_tx_max_pending.attr,
  1475. NULL,
  1476. };
  1477. static struct attribute_group netiucv_stat_attr_group = {
  1478. .name = "stats",
  1479. .attrs = netiucv_stat_attrs,
  1480. };
  1481. static inline int
  1482. netiucv_add_files(struct device *dev)
  1483. {
  1484. int ret;
  1485. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1486. ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
  1487. if (ret)
  1488. return ret;
  1489. ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
  1490. if (ret)
  1491. sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
  1492. return ret;
  1493. }
  1494. static inline void
  1495. netiucv_remove_files(struct device *dev)
  1496. {
  1497. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1498. sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
  1499. sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
  1500. }
  1501. static int
  1502. netiucv_register_device(struct net_device *ndev)
  1503. {
  1504. struct netiucv_priv *priv = ndev->priv;
  1505. struct device *dev = kmalloc(sizeof(struct device), GFP_KERNEL);
  1506. int ret;
  1507. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1508. if (dev) {
  1509. memset(dev, 0, sizeof(struct device));
  1510. snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
  1511. dev->bus = &iucv_bus;
  1512. dev->parent = iucv_root;
  1513. /*
  1514. * The release function could be called after the
  1515. * module has been unloaded. It's _only_ task is to
  1516. * free the struct. Therefore, we specify kfree()
  1517. * directly here. (Probably a little bit obfuscating
  1518. * but legitime ...).
  1519. */
  1520. dev->release = (void (*)(struct device *))kfree;
  1521. dev->driver = &netiucv_driver;
  1522. } else
  1523. return -ENOMEM;
  1524. ret = device_register(dev);
  1525. if (ret)
  1526. return ret;
  1527. ret = netiucv_add_files(dev);
  1528. if (ret)
  1529. goto out_unreg;
  1530. priv->dev = dev;
  1531. dev->driver_data = priv;
  1532. return 0;
  1533. out_unreg:
  1534. device_unregister(dev);
  1535. return ret;
  1536. }
  1537. static void
  1538. netiucv_unregister_device(struct device *dev)
  1539. {
  1540. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1541. netiucv_remove_files(dev);
  1542. device_unregister(dev);
  1543. }
  1544. /**
  1545. * Allocate and initialize a new connection structure.
  1546. * Add it to the list of netiucv connections;
  1547. */
  1548. static struct iucv_connection *
  1549. netiucv_new_connection(struct net_device *dev, char *username)
  1550. {
  1551. struct iucv_connection **clist = &iucv_connections;
  1552. struct iucv_connection *conn =
  1553. (struct iucv_connection *)
  1554. kmalloc(sizeof(struct iucv_connection), GFP_KERNEL);
  1555. if (conn) {
  1556. memset(conn, 0, sizeof(struct iucv_connection));
  1557. skb_queue_head_init(&conn->collect_queue);
  1558. skb_queue_head_init(&conn->commit_queue);
  1559. conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
  1560. conn->netdev = dev;
  1561. conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
  1562. GFP_KERNEL | GFP_DMA);
  1563. if (!conn->rx_buff) {
  1564. kfree(conn);
  1565. return NULL;
  1566. }
  1567. conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
  1568. GFP_KERNEL | GFP_DMA);
  1569. if (!conn->tx_buff) {
  1570. kfree_skb(conn->rx_buff);
  1571. kfree(conn);
  1572. return NULL;
  1573. }
  1574. conn->fsm = init_fsm("netiucvconn", conn_state_names,
  1575. conn_event_names, NR_CONN_STATES,
  1576. NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
  1577. GFP_KERNEL);
  1578. if (!conn->fsm) {
  1579. kfree_skb(conn->tx_buff);
  1580. kfree_skb(conn->rx_buff);
  1581. kfree(conn);
  1582. return NULL;
  1583. }
  1584. fsm_settimer(conn->fsm, &conn->timer);
  1585. fsm_newstate(conn->fsm, CONN_STATE_INVALID);
  1586. if (username) {
  1587. memcpy(conn->userid, username, 9);
  1588. fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
  1589. }
  1590. conn->next = *clist;
  1591. *clist = conn;
  1592. }
  1593. return conn;
  1594. }
  1595. /**
  1596. * Release a connection structure and remove it from the
  1597. * list of netiucv connections.
  1598. */
  1599. static void
  1600. netiucv_remove_connection(struct iucv_connection *conn)
  1601. {
  1602. struct iucv_connection **clist = &iucv_connections;
  1603. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1604. if (conn == NULL)
  1605. return;
  1606. while (*clist) {
  1607. if (*clist == conn) {
  1608. *clist = conn->next;
  1609. if (conn->handle) {
  1610. iucv_unregister_program(conn->handle);
  1611. conn->handle = NULL;
  1612. }
  1613. fsm_deltimer(&conn->timer);
  1614. kfree_fsm(conn->fsm);
  1615. kfree_skb(conn->rx_buff);
  1616. kfree_skb(conn->tx_buff);
  1617. return;
  1618. }
  1619. clist = &((*clist)->next);
  1620. }
  1621. }
  1622. /**
  1623. * Release everything of a net device.
  1624. */
  1625. static void
  1626. netiucv_free_netdevice(struct net_device *dev)
  1627. {
  1628. struct netiucv_priv *privptr;
  1629. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1630. if (!dev)
  1631. return;
  1632. privptr = (struct netiucv_priv *)dev->priv;
  1633. if (privptr) {
  1634. if (privptr->conn)
  1635. netiucv_remove_connection(privptr->conn);
  1636. if (privptr->fsm)
  1637. kfree_fsm(privptr->fsm);
  1638. privptr->conn = NULL; privptr->fsm = NULL;
  1639. /* privptr gets freed by free_netdev() */
  1640. }
  1641. free_netdev(dev);
  1642. }
  1643. /**
  1644. * Initialize a net device. (Called from kernel in alloc_netdev())
  1645. */
  1646. static void
  1647. netiucv_setup_netdevice(struct net_device *dev)
  1648. {
  1649. memset(dev->priv, 0, sizeof(struct netiucv_priv));
  1650. dev->mtu = NETIUCV_MTU_DEFAULT;
  1651. dev->hard_start_xmit = netiucv_tx;
  1652. dev->open = netiucv_open;
  1653. dev->stop = netiucv_close;
  1654. dev->get_stats = netiucv_stats;
  1655. dev->change_mtu = netiucv_change_mtu;
  1656. dev->destructor = netiucv_free_netdevice;
  1657. dev->hard_header_len = NETIUCV_HDRLEN;
  1658. dev->addr_len = 0;
  1659. dev->type = ARPHRD_SLIP;
  1660. dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
  1661. dev->flags = IFF_POINTOPOINT | IFF_NOARP;
  1662. SET_MODULE_OWNER(dev);
  1663. }
  1664. /**
  1665. * Allocate and initialize everything of a net device.
  1666. */
  1667. static struct net_device *
  1668. netiucv_init_netdevice(char *username)
  1669. {
  1670. struct netiucv_priv *privptr;
  1671. struct net_device *dev;
  1672. dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
  1673. netiucv_setup_netdevice);
  1674. if (!dev)
  1675. return NULL;
  1676. if (dev_alloc_name(dev, dev->name) < 0) {
  1677. free_netdev(dev);
  1678. return NULL;
  1679. }
  1680. privptr = (struct netiucv_priv *)dev->priv;
  1681. privptr->fsm = init_fsm("netiucvdev", dev_state_names,
  1682. dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
  1683. dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
  1684. if (!privptr->fsm) {
  1685. free_netdev(dev);
  1686. return NULL;
  1687. }
  1688. privptr->conn = netiucv_new_connection(dev, username);
  1689. if (!privptr->conn) {
  1690. kfree_fsm(privptr->fsm);
  1691. free_netdev(dev);
  1692. IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
  1693. return NULL;
  1694. }
  1695. fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
  1696. return dev;
  1697. }
  1698. static ssize_t
  1699. conn_write(struct device_driver *drv, const char *buf, size_t count)
  1700. {
  1701. char *p;
  1702. char username[10];
  1703. int i, ret;
  1704. struct net_device *dev;
  1705. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1706. if (count>9) {
  1707. PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
  1708. IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
  1709. return -EINVAL;
  1710. }
  1711. for (i=0, p=(char *)buf; i<8 && *p; i++, p++) {
  1712. if (isalnum(*p) || (*p == '$'))
  1713. username[i]= *p;
  1714. else if (*p == '\n') {
  1715. /* trailing lf, grr */
  1716. break;
  1717. } else {
  1718. PRINT_WARN("netiucv: Invalid character in username!\n");
  1719. IUCV_DBF_TEXT_(setup, 2,
  1720. "conn_write: invalid character %c\n", *p);
  1721. return -EINVAL;
  1722. }
  1723. }
  1724. while (i<9)
  1725. username[i++] = ' ';
  1726. username[9] = '\0';
  1727. dev = netiucv_init_netdevice(username);
  1728. if (!dev) {
  1729. PRINT_WARN(
  1730. "netiucv: Could not allocate network device structure "
  1731. "for user '%s'\n", netiucv_printname(username));
  1732. IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
  1733. return -ENODEV;
  1734. }
  1735. if ((ret = netiucv_register_device(dev))) {
  1736. IUCV_DBF_TEXT_(setup, 2,
  1737. "ret %d from netiucv_register_device\n", ret);
  1738. goto out_free_ndev;
  1739. }
  1740. /* sysfs magic */
  1741. SET_NETDEV_DEV(dev,
  1742. (struct device*)((struct netiucv_priv*)dev->priv)->dev);
  1743. if ((ret = register_netdev(dev))) {
  1744. netiucv_unregister_device((struct device*)
  1745. ((struct netiucv_priv*)dev->priv)->dev);
  1746. goto out_free_ndev;
  1747. }
  1748. PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
  1749. return count;
  1750. out_free_ndev:
  1751. PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
  1752. IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
  1753. netiucv_free_netdevice(dev);
  1754. return ret;
  1755. }
  1756. DRIVER_ATTR(connection, 0200, NULL, conn_write);
  1757. static ssize_t
  1758. remove_write (struct device_driver *drv, const char *buf, size_t count)
  1759. {
  1760. struct iucv_connection **clist = &iucv_connections;
  1761. struct net_device *ndev;
  1762. struct netiucv_priv *priv;
  1763. struct device *dev;
  1764. char name[IFNAMSIZ];
  1765. char *p;
  1766. int i;
  1767. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1768. if (count >= IFNAMSIZ)
  1769. count = IFNAMSIZ-1;
  1770. for (i=0, p=(char *)buf; i<count && *p; i++, p++) {
  1771. if ((*p == '\n') | (*p == ' ')) {
  1772. /* trailing lf, grr */
  1773. break;
  1774. } else {
  1775. name[i]=*p;
  1776. }
  1777. }
  1778. name[i] = '\0';
  1779. while (*clist) {
  1780. ndev = (*clist)->netdev;
  1781. priv = (struct netiucv_priv*)ndev->priv;
  1782. dev = priv->dev;
  1783. if (strncmp(name, ndev->name, count)) {
  1784. clist = &((*clist)->next);
  1785. continue;
  1786. }
  1787. if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
  1788. PRINT_WARN(
  1789. "netiucv: net device %s active with peer %s\n",
  1790. ndev->name, priv->conn->userid);
  1791. PRINT_WARN("netiucv: %s cannot be removed\n",
  1792. ndev->name);
  1793. IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
  1794. return -EBUSY;
  1795. }
  1796. unregister_netdev(ndev);
  1797. netiucv_unregister_device(dev);
  1798. return count;
  1799. }
  1800. PRINT_WARN("netiucv: net device %s unknown\n", name);
  1801. IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
  1802. return -EINVAL;
  1803. }
  1804. DRIVER_ATTR(remove, 0200, NULL, remove_write);
  1805. static void
  1806. netiucv_banner(void)
  1807. {
  1808. char vbuf[] = "$Revision: 1.69 $";
  1809. char *version = vbuf;
  1810. if ((version = strchr(version, ':'))) {
  1811. char *p = strchr(version + 1, '$');
  1812. if (p)
  1813. *p = '\0';
  1814. } else
  1815. version = " ??? ";
  1816. PRINT_INFO("NETIUCV driver Version%s initialized\n", version);
  1817. }
  1818. static void __exit
  1819. netiucv_exit(void)
  1820. {
  1821. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1822. while (iucv_connections) {
  1823. struct net_device *ndev = iucv_connections->netdev;
  1824. struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv;
  1825. struct device *dev = priv->dev;
  1826. unregister_netdev(ndev);
  1827. netiucv_unregister_device(dev);
  1828. }
  1829. driver_remove_file(&netiucv_driver, &driver_attr_connection);
  1830. driver_remove_file(&netiucv_driver, &driver_attr_remove);
  1831. driver_unregister(&netiucv_driver);
  1832. iucv_unregister_dbf_views();
  1833. PRINT_INFO("NETIUCV driver unloaded\n");
  1834. return;
  1835. }
  1836. static int __init
  1837. netiucv_init(void)
  1838. {
  1839. int ret;
  1840. ret = iucv_register_dbf_views();
  1841. if (ret) {
  1842. PRINT_WARN("netiucv_init failed, "
  1843. "iucv_register_dbf_views rc = %d\n", ret);
  1844. return ret;
  1845. }
  1846. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1847. ret = driver_register(&netiucv_driver);
  1848. if (ret) {
  1849. PRINT_ERR("NETIUCV: failed to register driver.\n");
  1850. IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret);
  1851. iucv_unregister_dbf_views();
  1852. return ret;
  1853. }
  1854. /* Add entry for specifying connections. */
  1855. ret = driver_create_file(&netiucv_driver, &driver_attr_connection);
  1856. if (!ret) {
  1857. ret = driver_create_file(&netiucv_driver, &driver_attr_remove);
  1858. netiucv_banner();
  1859. } else {
  1860. PRINT_ERR("NETIUCV: failed to add driver attribute.\n");
  1861. IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret);
  1862. driver_unregister(&netiucv_driver);
  1863. iucv_unregister_dbf_views();
  1864. }
  1865. return ret;
  1866. }
  1867. module_init(netiucv_init);
  1868. module_exit(netiucv_exit);
  1869. MODULE_LICENSE("GPL");