netiucv.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182
  1. /*
  2. * IUCV network driver
  3. *
  4. * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
  5. * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
  6. *
  7. * Sysfs integration and all bugs therein by Cornelia Huck
  8. * (cornelia.huck@de.ibm.com)
  9. *
  10. * Documentation used:
  11. * the source of the original IUCV driver by:
  12. * Stefan Hegewald <hegewald@de.ibm.com>
  13. * Hartmut Penner <hpenner@de.ibm.com>
  14. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  15. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  16. * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
  17. *
  18. * This program is free software; you can redistribute it and/or modify
  19. * it under the terms of the GNU General Public License as published by
  20. * the Free Software Foundation; either version 2, or (at your option)
  21. * any later version.
  22. *
  23. * This program is distributed in the hope that it will be useful,
  24. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  25. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  26. * GNU General Public License for more details.
  27. *
  28. * You should have received a copy of the GNU General Public License
  29. * along with this program; if not, write to the Free Software
  30. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  31. *
  32. */
  33. #undef DEBUG
  34. #include <linux/module.h>
  35. #include <linux/init.h>
  36. #include <linux/kernel.h>
  37. #include <linux/slab.h>
  38. #include <linux/errno.h>
  39. #include <linux/types.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/timer.h>
  42. #include <linux/sched.h>
  43. #include <linux/bitops.h>
  44. #include <linux/signal.h>
  45. #include <linux/string.h>
  46. #include <linux/device.h>
  47. #include <linux/ip.h>
  48. #include <linux/if_arp.h>
  49. #include <linux/tcp.h>
  50. #include <linux/skbuff.h>
  51. #include <linux/ctype.h>
  52. #include <net/dst.h>
  53. #include <asm/io.h>
  54. #include <asm/uaccess.h>
  55. #include "iucv.h"
  56. #include "fsm.h"
  57. MODULE_AUTHOR
  58. ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
  59. MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
  60. #define PRINTK_HEADER " iucv: " /* for debugging */
  61. static struct device_driver netiucv_driver = {
  62. .name = "netiucv",
  63. .bus = &iucv_bus,
  64. };
  65. /**
  66. * Per connection profiling data
  67. */
  68. struct connection_profile {
  69. unsigned long maxmulti;
  70. unsigned long maxcqueue;
  71. unsigned long doios_single;
  72. unsigned long doios_multi;
  73. unsigned long txlen;
  74. unsigned long tx_time;
  75. struct timespec send_stamp;
  76. unsigned long tx_pending;
  77. unsigned long tx_max_pending;
  78. };
  79. /**
  80. * Representation of one iucv connection
  81. */
  82. struct iucv_connection {
  83. struct iucv_connection *next;
  84. iucv_handle_t handle;
  85. __u16 pathid;
  86. struct sk_buff *rx_buff;
  87. struct sk_buff *tx_buff;
  88. struct sk_buff_head collect_queue;
  89. struct sk_buff_head commit_queue;
  90. spinlock_t collect_lock;
  91. int collect_len;
  92. int max_buffsize;
  93. fsm_timer timer;
  94. fsm_instance *fsm;
  95. struct net_device *netdev;
  96. struct connection_profile prof;
  97. char userid[9];
  98. };
  99. /**
  100. * Linked list of all connection structs.
  101. */
  102. struct iucv_connection_struct {
  103. struct iucv_connection *iucv_connections;
  104. rwlock_t iucv_rwlock;
  105. };
  106. static struct iucv_connection_struct iucv_conns;
  107. /**
  108. * Representation of event-data for the
  109. * connection state machine.
  110. */
  111. struct iucv_event {
  112. struct iucv_connection *conn;
  113. void *data;
  114. };
  115. /**
  116. * Private part of the network device structure
  117. */
  118. struct netiucv_priv {
  119. struct net_device_stats stats;
  120. unsigned long tbusy;
  121. fsm_instance *fsm;
  122. struct iucv_connection *conn;
  123. struct device *dev;
  124. };
  125. /**
  126. * Link level header for a packet.
  127. */
  128. typedef struct ll_header_t {
  129. __u16 next;
  130. } ll_header;
  131. #define NETIUCV_HDRLEN (sizeof(ll_header))
  132. #define NETIUCV_BUFSIZE_MAX 32768
  133. #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
  134. #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
  135. #define NETIUCV_MTU_DEFAULT 9216
  136. #define NETIUCV_QUEUELEN_DEFAULT 50
  137. #define NETIUCV_TIMEOUT_5SEC 5000
  138. /**
  139. * Compatibility macros for busy handling
  140. * of network devices.
  141. */
  142. static __inline__ void netiucv_clear_busy(struct net_device *dev)
  143. {
  144. clear_bit(0, &(((struct netiucv_priv *)dev->priv)->tbusy));
  145. netif_wake_queue(dev);
  146. }
  147. static __inline__ int netiucv_test_and_set_busy(struct net_device *dev)
  148. {
  149. netif_stop_queue(dev);
  150. return test_and_set_bit(0, &((struct netiucv_priv *)dev->priv)->tbusy);
  151. }
  152. static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
  153. static __u8 iucvMagic[16] = {
  154. 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
  155. 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
  156. };
  157. /**
  158. * This mask means the 16-byte IUCV "magic" and the origin userid must
  159. * match exactly as specified in order to give connection_pending()
  160. * control.
  161. */
  162. static __u8 netiucv_mask[] = {
  163. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  164. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  165. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  166. };
  167. /**
  168. * Convert an iucv userId to its printable
  169. * form (strip whitespace at end).
  170. *
  171. * @param An iucv userId
  172. *
  173. * @returns The printable string (static data!!)
  174. */
  175. static __inline__ char *
  176. netiucv_printname(char *name)
  177. {
  178. static char tmp[9];
  179. char *p = tmp;
  180. memcpy(tmp, name, 8);
  181. tmp[8] = '\0';
  182. while (*p && (!isspace(*p)))
  183. p++;
  184. *p = '\0';
  185. return tmp;
  186. }
  187. /**
  188. * States of the interface statemachine.
  189. */
  190. enum dev_states {
  191. DEV_STATE_STOPPED,
  192. DEV_STATE_STARTWAIT,
  193. DEV_STATE_STOPWAIT,
  194. DEV_STATE_RUNNING,
  195. /**
  196. * MUST be always the last element!!
  197. */
  198. NR_DEV_STATES
  199. };
  200. static const char *dev_state_names[] = {
  201. "Stopped",
  202. "StartWait",
  203. "StopWait",
  204. "Running",
  205. };
  206. /**
  207. * Events of the interface statemachine.
  208. */
  209. enum dev_events {
  210. DEV_EVENT_START,
  211. DEV_EVENT_STOP,
  212. DEV_EVENT_CONUP,
  213. DEV_EVENT_CONDOWN,
  214. /**
  215. * MUST be always the last element!!
  216. */
  217. NR_DEV_EVENTS
  218. };
  219. static const char *dev_event_names[] = {
  220. "Start",
  221. "Stop",
  222. "Connection up",
  223. "Connection down",
  224. };
  225. /**
  226. * Events of the connection statemachine
  227. */
  228. enum conn_events {
  229. /**
  230. * Events, representing callbacks from
  231. * lowlevel iucv layer)
  232. */
  233. CONN_EVENT_CONN_REQ,
  234. CONN_EVENT_CONN_ACK,
  235. CONN_EVENT_CONN_REJ,
  236. CONN_EVENT_CONN_SUS,
  237. CONN_EVENT_CONN_RES,
  238. CONN_EVENT_RX,
  239. CONN_EVENT_TXDONE,
  240. /**
  241. * Events, representing errors return codes from
  242. * calls to lowlevel iucv layer
  243. */
  244. /**
  245. * Event, representing timer expiry.
  246. */
  247. CONN_EVENT_TIMER,
  248. /**
  249. * Events, representing commands from upper levels.
  250. */
  251. CONN_EVENT_START,
  252. CONN_EVENT_STOP,
  253. /**
  254. * MUST be always the last element!!
  255. */
  256. NR_CONN_EVENTS,
  257. };
  258. static const char *conn_event_names[] = {
  259. "Remote connection request",
  260. "Remote connection acknowledge",
  261. "Remote connection reject",
  262. "Connection suspended",
  263. "Connection resumed",
  264. "Data received",
  265. "Data sent",
  266. "Timer",
  267. "Start",
  268. "Stop",
  269. };
  270. /**
  271. * States of the connection statemachine.
  272. */
  273. enum conn_states {
  274. /**
  275. * Connection not assigned to any device,
  276. * initial state, invalid
  277. */
  278. CONN_STATE_INVALID,
  279. /**
  280. * Userid assigned but not operating
  281. */
  282. CONN_STATE_STOPPED,
  283. /**
  284. * Connection registered,
  285. * no connection request sent yet,
  286. * no connection request received
  287. */
  288. CONN_STATE_STARTWAIT,
  289. /**
  290. * Connection registered and connection request sent,
  291. * no acknowledge and no connection request received yet.
  292. */
  293. CONN_STATE_SETUPWAIT,
  294. /**
  295. * Connection up and running idle
  296. */
  297. CONN_STATE_IDLE,
  298. /**
  299. * Data sent, awaiting CONN_EVENT_TXDONE
  300. */
  301. CONN_STATE_TX,
  302. /**
  303. * Error during registration.
  304. */
  305. CONN_STATE_REGERR,
  306. /**
  307. * Error during registration.
  308. */
  309. CONN_STATE_CONNERR,
  310. /**
  311. * MUST be always the last element!!
  312. */
  313. NR_CONN_STATES,
  314. };
  315. static const char *conn_state_names[] = {
  316. "Invalid",
  317. "Stopped",
  318. "StartWait",
  319. "SetupWait",
  320. "Idle",
  321. "TX",
  322. "Terminating",
  323. "Registration error",
  324. "Connect error",
  325. };
  326. /**
  327. * Debug Facility Stuff
  328. */
  329. static debug_info_t *iucv_dbf_setup = NULL;
  330. static debug_info_t *iucv_dbf_data = NULL;
  331. static debug_info_t *iucv_dbf_trace = NULL;
  332. DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
  333. static void
  334. iucv_unregister_dbf_views(void)
  335. {
  336. if (iucv_dbf_setup)
  337. debug_unregister(iucv_dbf_setup);
  338. if (iucv_dbf_data)
  339. debug_unregister(iucv_dbf_data);
  340. if (iucv_dbf_trace)
  341. debug_unregister(iucv_dbf_trace);
  342. }
  343. static int
  344. iucv_register_dbf_views(void)
  345. {
  346. iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
  347. IUCV_DBF_SETUP_PAGES,
  348. IUCV_DBF_SETUP_NR_AREAS,
  349. IUCV_DBF_SETUP_LEN);
  350. iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
  351. IUCV_DBF_DATA_PAGES,
  352. IUCV_DBF_DATA_NR_AREAS,
  353. IUCV_DBF_DATA_LEN);
  354. iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
  355. IUCV_DBF_TRACE_PAGES,
  356. IUCV_DBF_TRACE_NR_AREAS,
  357. IUCV_DBF_TRACE_LEN);
  358. if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
  359. (iucv_dbf_trace == NULL)) {
  360. iucv_unregister_dbf_views();
  361. return -ENOMEM;
  362. }
  363. debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
  364. debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
  365. debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
  366. debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
  367. debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
  368. debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
  369. return 0;
  370. }
  371. /**
  372. * Callback-wrappers, called from lowlevel iucv layer.
  373. *****************************************************************************/
  374. static void
  375. netiucv_callback_rx(iucv_MessagePending *eib, void *pgm_data)
  376. {
  377. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  378. struct iucv_event ev;
  379. ev.conn = conn;
  380. ev.data = (void *)eib;
  381. fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
  382. }
  383. static void
  384. netiucv_callback_txdone(iucv_MessageComplete *eib, void *pgm_data)
  385. {
  386. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  387. struct iucv_event ev;
  388. ev.conn = conn;
  389. ev.data = (void *)eib;
  390. fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
  391. }
  392. static void
  393. netiucv_callback_connack(iucv_ConnectionComplete *eib, void *pgm_data)
  394. {
  395. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  396. struct iucv_event ev;
  397. ev.conn = conn;
  398. ev.data = (void *)eib;
  399. fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev);
  400. }
  401. static void
  402. netiucv_callback_connreq(iucv_ConnectionPending *eib, void *pgm_data)
  403. {
  404. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  405. struct iucv_event ev;
  406. ev.conn = conn;
  407. ev.data = (void *)eib;
  408. fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
  409. }
  410. static void
  411. netiucv_callback_connrej(iucv_ConnectionSevered *eib, void *pgm_data)
  412. {
  413. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  414. struct iucv_event ev;
  415. ev.conn = conn;
  416. ev.data = (void *)eib;
  417. fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, &ev);
  418. }
  419. static void
  420. netiucv_callback_connsusp(iucv_ConnectionQuiesced *eib, void *pgm_data)
  421. {
  422. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  423. struct iucv_event ev;
  424. ev.conn = conn;
  425. ev.data = (void *)eib;
  426. fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, &ev);
  427. }
  428. static void
  429. netiucv_callback_connres(iucv_ConnectionResumed *eib, void *pgm_data)
  430. {
  431. struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
  432. struct iucv_event ev;
  433. ev.conn = conn;
  434. ev.data = (void *)eib;
  435. fsm_event(conn->fsm, CONN_EVENT_CONN_RES, &ev);
  436. }
  437. static iucv_interrupt_ops_t netiucv_ops = {
  438. .ConnectionPending = netiucv_callback_connreq,
  439. .ConnectionComplete = netiucv_callback_connack,
  440. .ConnectionSevered = netiucv_callback_connrej,
  441. .ConnectionQuiesced = netiucv_callback_connsusp,
  442. .ConnectionResumed = netiucv_callback_connres,
  443. .MessagePending = netiucv_callback_rx,
  444. .MessageComplete = netiucv_callback_txdone
  445. };
  446. /**
  447. * Dummy NOP action for all statemachines
  448. */
  449. static void
  450. fsm_action_nop(fsm_instance *fi, int event, void *arg)
  451. {
  452. }
  453. /**
  454. * Actions of the connection statemachine
  455. *****************************************************************************/
  456. /**
  457. * Helper function for conn_action_rx()
  458. * Unpack a just received skb and hand it over to
  459. * upper layers.
  460. *
  461. * @param conn The connection where this skb has been received.
  462. * @param pskb The received skb.
  463. */
  464. //static __inline__ void
  465. static void
  466. netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
  467. {
  468. struct net_device *dev = conn->netdev;
  469. struct netiucv_priv *privptr = dev->priv;
  470. __u16 offset = 0;
  471. skb_put(pskb, NETIUCV_HDRLEN);
  472. pskb->dev = dev;
  473. pskb->ip_summed = CHECKSUM_NONE;
  474. pskb->protocol = ntohs(ETH_P_IP);
  475. while (1) {
  476. struct sk_buff *skb;
  477. ll_header *header = (ll_header *)pskb->data;
  478. if (!header->next)
  479. break;
  480. skb_pull(pskb, NETIUCV_HDRLEN);
  481. header->next -= offset;
  482. offset += header->next;
  483. header->next -= NETIUCV_HDRLEN;
  484. if (skb_tailroom(pskb) < header->next) {
  485. PRINT_WARN("%s: Illegal next field in iucv header: "
  486. "%d > %d\n",
  487. dev->name, header->next, skb_tailroom(pskb));
  488. IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
  489. header->next, skb_tailroom(pskb));
  490. return;
  491. }
  492. skb_put(pskb, header->next);
  493. pskb->mac.raw = pskb->data;
  494. skb = dev_alloc_skb(pskb->len);
  495. if (!skb) {
  496. PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
  497. dev->name);
  498. IUCV_DBF_TEXT(data, 2,
  499. "Out of memory in netiucv_unpack_skb\n");
  500. privptr->stats.rx_dropped++;
  501. return;
  502. }
  503. memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
  504. skb->mac.raw = skb->data;
  505. skb->dev = pskb->dev;
  506. skb->protocol = pskb->protocol;
  507. pskb->ip_summed = CHECKSUM_UNNECESSARY;
  508. /*
  509. * Since receiving is always initiated from a tasklet (in iucv.c),
  510. * we must use netif_rx_ni() instead of netif_rx()
  511. */
  512. netif_rx_ni(skb);
  513. dev->last_rx = jiffies;
  514. privptr->stats.rx_packets++;
  515. privptr->stats.rx_bytes += skb->len;
  516. skb_pull(pskb, header->next);
  517. skb_put(pskb, NETIUCV_HDRLEN);
  518. }
  519. }
  520. static void
  521. conn_action_rx(fsm_instance *fi, int event, void *arg)
  522. {
  523. struct iucv_event *ev = (struct iucv_event *)arg;
  524. struct iucv_connection *conn = ev->conn;
  525. iucv_MessagePending *eib = (iucv_MessagePending *)ev->data;
  526. struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv;
  527. __u32 msglen = eib->ln1msg2.ipbfln1f;
  528. int rc;
  529. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  530. if (!conn->netdev) {
  531. /* FRITZ: How to tell iucv LL to drop the msg? */
  532. PRINT_WARN("Received data for unlinked connection\n");
  533. IUCV_DBF_TEXT(data, 2,
  534. "Received data for unlinked connection\n");
  535. return;
  536. }
  537. if (msglen > conn->max_buffsize) {
  538. /* FRITZ: How to tell iucv LL to drop the msg? */
  539. privptr->stats.rx_dropped++;
  540. PRINT_WARN("msglen %d > max_buffsize %d\n",
  541. msglen, conn->max_buffsize);
  542. IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
  543. msglen, conn->max_buffsize);
  544. return;
  545. }
  546. conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
  547. conn->rx_buff->len = 0;
  548. rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls,
  549. conn->rx_buff->data, msglen, NULL, NULL, NULL);
  550. if (rc || msglen < 5) {
  551. privptr->stats.rx_errors++;
  552. PRINT_WARN("iucv_receive returned %08x\n", rc);
  553. IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
  554. return;
  555. }
  556. netiucv_unpack_skb(conn, conn->rx_buff);
  557. }
  558. static void
  559. conn_action_txdone(fsm_instance *fi, int event, void *arg)
  560. {
  561. struct iucv_event *ev = (struct iucv_event *)arg;
  562. struct iucv_connection *conn = ev->conn;
  563. iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data;
  564. struct netiucv_priv *privptr = NULL;
  565. /* Shut up, gcc! skb is always below 2G. */
  566. __u32 single_flag = eib->ipmsgtag;
  567. __u32 txbytes = 0;
  568. __u32 txpackets = 0;
  569. __u32 stat_maxcq = 0;
  570. struct sk_buff *skb;
  571. unsigned long saveflags;
  572. ll_header header;
  573. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  574. if (conn && conn->netdev && conn->netdev->priv)
  575. privptr = (struct netiucv_priv *)conn->netdev->priv;
  576. conn->prof.tx_pending--;
  577. if (single_flag) {
  578. if ((skb = skb_dequeue(&conn->commit_queue))) {
  579. atomic_dec(&skb->users);
  580. dev_kfree_skb_any(skb);
  581. if (privptr) {
  582. privptr->stats.tx_packets++;
  583. privptr->stats.tx_bytes +=
  584. (skb->len - NETIUCV_HDRLEN
  585. - NETIUCV_HDRLEN);
  586. }
  587. }
  588. }
  589. conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head;
  590. conn->tx_buff->len = 0;
  591. spin_lock_irqsave(&conn->collect_lock, saveflags);
  592. while ((skb = skb_dequeue(&conn->collect_queue))) {
  593. header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
  594. memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
  595. NETIUCV_HDRLEN);
  596. memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len);
  597. txbytes += skb->len;
  598. txpackets++;
  599. stat_maxcq++;
  600. atomic_dec(&skb->users);
  601. dev_kfree_skb_any(skb);
  602. }
  603. if (conn->collect_len > conn->prof.maxmulti)
  604. conn->prof.maxmulti = conn->collect_len;
  605. conn->collect_len = 0;
  606. spin_unlock_irqrestore(&conn->collect_lock, saveflags);
  607. if (conn->tx_buff->len) {
  608. int rc;
  609. header.next = 0;
  610. memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
  611. NETIUCV_HDRLEN);
  612. conn->prof.send_stamp = xtime;
  613. rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0,
  614. conn->tx_buff->data, conn->tx_buff->len);
  615. conn->prof.doios_multi++;
  616. conn->prof.txlen += conn->tx_buff->len;
  617. conn->prof.tx_pending++;
  618. if (conn->prof.tx_pending > conn->prof.tx_max_pending)
  619. conn->prof.tx_max_pending = conn->prof.tx_pending;
  620. if (rc) {
  621. conn->prof.tx_pending--;
  622. fsm_newstate(fi, CONN_STATE_IDLE);
  623. if (privptr)
  624. privptr->stats.tx_errors += txpackets;
  625. PRINT_WARN("iucv_send returned %08x\n", rc);
  626. IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
  627. } else {
  628. if (privptr) {
  629. privptr->stats.tx_packets += txpackets;
  630. privptr->stats.tx_bytes += txbytes;
  631. }
  632. if (stat_maxcq > conn->prof.maxcqueue)
  633. conn->prof.maxcqueue = stat_maxcq;
  634. }
  635. } else
  636. fsm_newstate(fi, CONN_STATE_IDLE);
  637. }
  638. static void
  639. conn_action_connaccept(fsm_instance *fi, int event, void *arg)
  640. {
  641. struct iucv_event *ev = (struct iucv_event *)arg;
  642. struct iucv_connection *conn = ev->conn;
  643. iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
  644. struct net_device *netdev = conn->netdev;
  645. struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
  646. int rc;
  647. __u16 msglimit;
  648. __u8 udata[16];
  649. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  650. rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0,
  651. conn->handle, conn, NULL, &msglimit);
  652. if (rc) {
  653. PRINT_WARN("%s: IUCV accept failed with error %d\n",
  654. netdev->name, rc);
  655. IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
  656. return;
  657. }
  658. fsm_newstate(fi, CONN_STATE_IDLE);
  659. conn->pathid = eib->ippathid;
  660. netdev->tx_queue_len = msglimit;
  661. fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
  662. }
  663. static void
  664. conn_action_connreject(fsm_instance *fi, int event, void *arg)
  665. {
  666. struct iucv_event *ev = (struct iucv_event *)arg;
  667. struct iucv_connection *conn = ev->conn;
  668. struct net_device *netdev = conn->netdev;
  669. iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
  670. __u8 udata[16];
  671. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  672. iucv_sever(eib->ippathid, udata);
  673. if (eib->ippathid != conn->pathid) {
  674. PRINT_INFO("%s: IR Connection Pending; "
  675. "pathid %d does not match original pathid %d\n",
  676. netdev->name, eib->ippathid, conn->pathid);
  677. IUCV_DBF_TEXT_(data, 2,
  678. "connreject: IR pathid %d, conn. pathid %d\n",
  679. eib->ippathid, conn->pathid);
  680. iucv_sever(conn->pathid, udata);
  681. }
  682. }
  683. static void
  684. conn_action_connack(fsm_instance *fi, int event, void *arg)
  685. {
  686. struct iucv_event *ev = (struct iucv_event *)arg;
  687. struct iucv_connection *conn = ev->conn;
  688. iucv_ConnectionComplete *eib = (iucv_ConnectionComplete *)ev->data;
  689. struct net_device *netdev = conn->netdev;
  690. struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
  691. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  692. fsm_deltimer(&conn->timer);
  693. fsm_newstate(fi, CONN_STATE_IDLE);
  694. if (eib->ippathid != conn->pathid) {
  695. PRINT_INFO("%s: IR Connection Complete; "
  696. "pathid %d does not match original pathid %d\n",
  697. netdev->name, eib->ippathid, conn->pathid);
  698. IUCV_DBF_TEXT_(data, 2,
  699. "connack: IR pathid %d, conn. pathid %d\n",
  700. eib->ippathid, conn->pathid);
  701. conn->pathid = eib->ippathid;
  702. }
  703. netdev->tx_queue_len = eib->ipmsglim;
  704. fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
  705. }
  706. static void
  707. conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
  708. {
  709. struct iucv_connection *conn = (struct iucv_connection *)arg;
  710. __u8 udata[16];
  711. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  712. fsm_deltimer(&conn->timer);
  713. iucv_sever(conn->pathid, udata);
  714. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  715. }
  716. static void
  717. conn_action_connsever(fsm_instance *fi, int event, void *arg)
  718. {
  719. struct iucv_event *ev = (struct iucv_event *)arg;
  720. struct iucv_connection *conn = ev->conn;
  721. struct net_device *netdev = conn->netdev;
  722. struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
  723. __u8 udata[16];
  724. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  725. fsm_deltimer(&conn->timer);
  726. iucv_sever(conn->pathid, udata);
  727. PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
  728. IUCV_DBF_TEXT(data, 2,
  729. "conn_action_connsever: Remote dropped connection\n");
  730. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  731. fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
  732. }
  733. static void
  734. conn_action_start(fsm_instance *fi, int event, void *arg)
  735. {
  736. struct iucv_event *ev = (struct iucv_event *)arg;
  737. struct iucv_connection *conn = ev->conn;
  738. __u16 msglimit;
  739. int rc;
  740. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  741. if (!conn->handle) {
  742. IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n");
  743. conn->handle =
  744. iucv_register_program(iucvMagic, conn->userid,
  745. netiucv_mask,
  746. &netiucv_ops, conn);
  747. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  748. if (!conn->handle) {
  749. fsm_newstate(fi, CONN_STATE_REGERR);
  750. conn->handle = NULL;
  751. IUCV_DBF_TEXT(setup, 2,
  752. "NULL from iucv_register_program\n");
  753. return;
  754. }
  755. PRINT_DEBUG("%s('%s'): registered successfully\n",
  756. conn->netdev->name, conn->userid);
  757. }
  758. PRINT_DEBUG("%s('%s'): connecting ...\n",
  759. conn->netdev->name, conn->userid);
  760. /* We must set the state before calling iucv_connect because the callback
  761. * handler could be called at any point after the connection request is
  762. * sent */
  763. fsm_newstate(fi, CONN_STATE_SETUPWAIT);
  764. rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic,
  765. conn->userid, iucv_host, 0, NULL, &msglimit,
  766. conn->handle, conn);
  767. switch (rc) {
  768. case 0:
  769. conn->netdev->tx_queue_len = msglimit;
  770. fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
  771. CONN_EVENT_TIMER, conn);
  772. return;
  773. case 11:
  774. PRINT_INFO("%s: User %s is currently not available.\n",
  775. conn->netdev->name,
  776. netiucv_printname(conn->userid));
  777. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  778. return;
  779. case 12:
  780. PRINT_INFO("%s: User %s is currently not ready.\n",
  781. conn->netdev->name,
  782. netiucv_printname(conn->userid));
  783. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  784. return;
  785. case 13:
  786. PRINT_WARN("%s: Too many IUCV connections.\n",
  787. conn->netdev->name);
  788. fsm_newstate(fi, CONN_STATE_CONNERR);
  789. break;
  790. case 14:
  791. PRINT_WARN(
  792. "%s: User %s has too many IUCV connections.\n",
  793. conn->netdev->name,
  794. netiucv_printname(conn->userid));
  795. fsm_newstate(fi, CONN_STATE_CONNERR);
  796. break;
  797. case 15:
  798. PRINT_WARN(
  799. "%s: No IUCV authorization in CP directory.\n",
  800. conn->netdev->name);
  801. fsm_newstate(fi, CONN_STATE_CONNERR);
  802. break;
  803. default:
  804. PRINT_WARN("%s: iucv_connect returned error %d\n",
  805. conn->netdev->name, rc);
  806. fsm_newstate(fi, CONN_STATE_CONNERR);
  807. break;
  808. }
  809. IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
  810. IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
  811. iucv_unregister_program(conn->handle);
  812. conn->handle = NULL;
  813. }
  814. static void
  815. netiucv_purge_skb_queue(struct sk_buff_head *q)
  816. {
  817. struct sk_buff *skb;
  818. while ((skb = skb_dequeue(q))) {
  819. atomic_dec(&skb->users);
  820. dev_kfree_skb_any(skb);
  821. }
  822. }
  823. static void
  824. conn_action_stop(fsm_instance *fi, int event, void *arg)
  825. {
  826. struct iucv_event *ev = (struct iucv_event *)arg;
  827. struct iucv_connection *conn = ev->conn;
  828. struct net_device *netdev = conn->netdev;
  829. struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
  830. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  831. fsm_deltimer(&conn->timer);
  832. fsm_newstate(fi, CONN_STATE_STOPPED);
  833. netiucv_purge_skb_queue(&conn->collect_queue);
  834. if (conn->handle)
  835. IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
  836. iucv_unregister_program(conn->handle);
  837. conn->handle = NULL;
  838. netiucv_purge_skb_queue(&conn->commit_queue);
  839. fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
  840. }
  841. static void
  842. conn_action_inval(fsm_instance *fi, int event, void *arg)
  843. {
  844. struct iucv_event *ev = (struct iucv_event *)arg;
  845. struct iucv_connection *conn = ev->conn;
  846. struct net_device *netdev = conn->netdev;
  847. PRINT_WARN("%s: Cannot connect without username\n",
  848. netdev->name);
  849. IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
  850. }
  851. static const fsm_node conn_fsm[] = {
  852. { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
  853. { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
  854. { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
  855. { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
  856. { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
  857. { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
  858. { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
  859. { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
  860. { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
  861. { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
  862. { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
  863. { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
  864. { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
  865. { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
  866. { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
  867. { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
  868. { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
  869. { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
  870. { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
  871. { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
  872. { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
  873. { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
  874. { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
  875. };
  876. static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
  877. /**
  878. * Actions for interface - statemachine.
  879. *****************************************************************************/
  880. /**
  881. * Startup connection by sending CONN_EVENT_START to it.
  882. *
  883. * @param fi An instance of an interface statemachine.
  884. * @param event The event, just happened.
  885. * @param arg Generic pointer, casted from struct net_device * upon call.
  886. */
  887. static void
  888. dev_action_start(fsm_instance *fi, int event, void *arg)
  889. {
  890. struct net_device *dev = (struct net_device *)arg;
  891. struct netiucv_priv *privptr = dev->priv;
  892. struct iucv_event ev;
  893. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  894. ev.conn = privptr->conn;
  895. fsm_newstate(fi, DEV_STATE_STARTWAIT);
  896. fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev);
  897. }
  898. /**
  899. * Shutdown connection by sending CONN_EVENT_STOP to it.
  900. *
  901. * @param fi An instance of an interface statemachine.
  902. * @param event The event, just happened.
  903. * @param arg Generic pointer, casted from struct net_device * upon call.
  904. */
  905. static void
  906. dev_action_stop(fsm_instance *fi, int event, void *arg)
  907. {
  908. struct net_device *dev = (struct net_device *)arg;
  909. struct netiucv_priv *privptr = dev->priv;
  910. struct iucv_event ev;
  911. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  912. ev.conn = privptr->conn;
  913. fsm_newstate(fi, DEV_STATE_STOPWAIT);
  914. fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
  915. }
  916. /**
  917. * Called from connection statemachine
  918. * when a connection is up and running.
  919. *
  920. * @param fi An instance of an interface statemachine.
  921. * @param event The event, just happened.
  922. * @param arg Generic pointer, casted from struct net_device * upon call.
  923. */
  924. static void
  925. dev_action_connup(fsm_instance *fi, int event, void *arg)
  926. {
  927. struct net_device *dev = (struct net_device *)arg;
  928. struct netiucv_priv *privptr = dev->priv;
  929. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  930. switch (fsm_getstate(fi)) {
  931. case DEV_STATE_STARTWAIT:
  932. fsm_newstate(fi, DEV_STATE_RUNNING);
  933. PRINT_INFO("%s: connected with remote side %s\n",
  934. dev->name, privptr->conn->userid);
  935. IUCV_DBF_TEXT(setup, 3,
  936. "connection is up and running\n");
  937. break;
  938. case DEV_STATE_STOPWAIT:
  939. PRINT_INFO(
  940. "%s: got connection UP event during shutdown!\n",
  941. dev->name);
  942. IUCV_DBF_TEXT(data, 2,
  943. "dev_action_connup: in DEV_STATE_STOPWAIT\n");
  944. break;
  945. }
  946. }
  947. /**
  948. * Called from connection statemachine
  949. * when a connection has been shutdown.
  950. *
  951. * @param fi An instance of an interface statemachine.
  952. * @param event The event, just happened.
  953. * @param arg Generic pointer, casted from struct net_device * upon call.
  954. */
  955. static void
  956. dev_action_conndown(fsm_instance *fi, int event, void *arg)
  957. {
  958. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  959. switch (fsm_getstate(fi)) {
  960. case DEV_STATE_RUNNING:
  961. fsm_newstate(fi, DEV_STATE_STARTWAIT);
  962. break;
  963. case DEV_STATE_STOPWAIT:
  964. fsm_newstate(fi, DEV_STATE_STOPPED);
  965. IUCV_DBF_TEXT(setup, 3, "connection is down\n");
  966. break;
  967. }
  968. }
  969. static const fsm_node dev_fsm[] = {
  970. { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
  971. { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
  972. { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
  973. { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
  974. { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
  975. { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
  976. { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
  977. { DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop },
  978. };
  979. static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
  980. /**
  981. * Transmit a packet.
  982. * This is a helper function for netiucv_tx().
  983. *
  984. * @param conn Connection to be used for sending.
  985. * @param skb Pointer to struct sk_buff of packet to send.
  986. * The linklevel header has already been set up
  987. * by netiucv_tx().
  988. *
  989. * @return 0 on success, -ERRNO on failure. (Never fails.)
  990. */
  991. static int
  992. netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
  993. unsigned long saveflags;
  994. ll_header header;
  995. int rc = 0;
  996. if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
  997. int l = skb->len + NETIUCV_HDRLEN;
  998. spin_lock_irqsave(&conn->collect_lock, saveflags);
  999. if (conn->collect_len + l >
  1000. (conn->max_buffsize - NETIUCV_HDRLEN)) {
  1001. rc = -EBUSY;
  1002. IUCV_DBF_TEXT(data, 2,
  1003. "EBUSY from netiucv_transmit_skb\n");
  1004. } else {
  1005. atomic_inc(&skb->users);
  1006. skb_queue_tail(&conn->collect_queue, skb);
  1007. conn->collect_len += l;
  1008. }
  1009. spin_unlock_irqrestore(&conn->collect_lock, saveflags);
  1010. } else {
  1011. struct sk_buff *nskb = skb;
  1012. /**
  1013. * Copy the skb to a new allocated skb in lowmem only if the
  1014. * data is located above 2G in memory or tailroom is < 2.
  1015. */
  1016. unsigned long hi =
  1017. ((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31;
  1018. int copied = 0;
  1019. if (hi || (skb_tailroom(skb) < 2)) {
  1020. nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
  1021. NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
  1022. if (!nskb) {
  1023. PRINT_WARN("%s: Could not allocate tx_skb\n",
  1024. conn->netdev->name);
  1025. IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
  1026. rc = -ENOMEM;
  1027. return rc;
  1028. } else {
  1029. skb_reserve(nskb, NETIUCV_HDRLEN);
  1030. memcpy(skb_put(nskb, skb->len),
  1031. skb->data, skb->len);
  1032. }
  1033. copied = 1;
  1034. }
  1035. /**
  1036. * skb now is below 2G and has enough room. Add headers.
  1037. */
  1038. header.next = nskb->len + NETIUCV_HDRLEN;
  1039. memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
  1040. header.next = 0;
  1041. memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
  1042. fsm_newstate(conn->fsm, CONN_STATE_TX);
  1043. conn->prof.send_stamp = xtime;
  1044. rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */,
  1045. 0, nskb->data, nskb->len);
  1046. /* Shut up, gcc! nskb is always below 2G. */
  1047. conn->prof.doios_single++;
  1048. conn->prof.txlen += skb->len;
  1049. conn->prof.tx_pending++;
  1050. if (conn->prof.tx_pending > conn->prof.tx_max_pending)
  1051. conn->prof.tx_max_pending = conn->prof.tx_pending;
  1052. if (rc) {
  1053. struct netiucv_priv *privptr;
  1054. fsm_newstate(conn->fsm, CONN_STATE_IDLE);
  1055. conn->prof.tx_pending--;
  1056. privptr = (struct netiucv_priv *)conn->netdev->priv;
  1057. if (privptr)
  1058. privptr->stats.tx_errors++;
  1059. if (copied)
  1060. dev_kfree_skb(nskb);
  1061. else {
  1062. /**
  1063. * Remove our headers. They get added
  1064. * again on retransmit.
  1065. */
  1066. skb_pull(skb, NETIUCV_HDRLEN);
  1067. skb_trim(skb, skb->len - NETIUCV_HDRLEN);
  1068. }
  1069. PRINT_WARN("iucv_send returned %08x\n", rc);
  1070. IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
  1071. } else {
  1072. if (copied)
  1073. dev_kfree_skb(skb);
  1074. atomic_inc(&nskb->users);
  1075. skb_queue_tail(&conn->commit_queue, nskb);
  1076. }
  1077. }
  1078. return rc;
  1079. }
  1080. /**
  1081. * Interface API for upper network layers
  1082. *****************************************************************************/
  1083. /**
  1084. * Open an interface.
  1085. * Called from generic network layer when ifconfig up is run.
  1086. *
  1087. * @param dev Pointer to interface struct.
  1088. *
  1089. * @return 0 on success, -ERRNO on failure. (Never fails.)
  1090. */
  1091. static int
  1092. netiucv_open(struct net_device *dev) {
  1093. fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev);
  1094. return 0;
  1095. }
  1096. /**
  1097. * Close an interface.
  1098. * Called from generic network layer when ifconfig down is run.
  1099. *
  1100. * @param dev Pointer to interface struct.
  1101. *
  1102. * @return 0 on success, -ERRNO on failure. (Never fails.)
  1103. */
  1104. static int
  1105. netiucv_close(struct net_device *dev) {
  1106. fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev);
  1107. return 0;
  1108. }
  1109. /**
  1110. * Start transmission of a packet.
  1111. * Called from generic network device layer.
  1112. *
  1113. * @param skb Pointer to buffer containing the packet.
  1114. * @param dev Pointer to interface struct.
  1115. *
  1116. * @return 0 if packet consumed, !0 if packet rejected.
  1117. * Note: If we return !0, then the packet is free'd by
  1118. * the generic network layer.
  1119. */
  1120. static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
  1121. {
  1122. int rc = 0;
  1123. struct netiucv_priv *privptr = dev->priv;
  1124. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1125. /**
  1126. * Some sanity checks ...
  1127. */
  1128. if (skb == NULL) {
  1129. PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
  1130. IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
  1131. privptr->stats.tx_dropped++;
  1132. return 0;
  1133. }
  1134. if (skb_headroom(skb) < NETIUCV_HDRLEN) {
  1135. PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
  1136. dev->name, NETIUCV_HDRLEN);
  1137. IUCV_DBF_TEXT(data, 2,
  1138. "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
  1139. dev_kfree_skb(skb);
  1140. privptr->stats.tx_dropped++;
  1141. return 0;
  1142. }
  1143. /**
  1144. * If connection is not running, try to restart it
  1145. * and throw away packet.
  1146. */
  1147. if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
  1148. fsm_event(privptr->fsm, DEV_EVENT_START, dev);
  1149. dev_kfree_skb(skb);
  1150. privptr->stats.tx_dropped++;
  1151. privptr->stats.tx_errors++;
  1152. privptr->stats.tx_carrier_errors++;
  1153. return 0;
  1154. }
  1155. if (netiucv_test_and_set_busy(dev)) {
  1156. IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
  1157. return -EBUSY;
  1158. }
  1159. dev->trans_start = jiffies;
  1160. if (netiucv_transmit_skb(privptr->conn, skb))
  1161. rc = 1;
  1162. netiucv_clear_busy(dev);
  1163. return rc;
  1164. }
  1165. /**
  1166. * Returns interface statistics of a device.
  1167. *
  1168. * @param dev Pointer to interface struct.
  1169. *
  1170. * @return Pointer to stats struct of this interface.
  1171. */
  1172. static struct net_device_stats *
  1173. netiucv_stats (struct net_device * dev)
  1174. {
  1175. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1176. return &((struct netiucv_priv *)dev->priv)->stats;
  1177. }
  1178. /**
  1179. * Sets MTU of an interface.
  1180. *
  1181. * @param dev Pointer to interface struct.
  1182. * @param new_mtu The new MTU to use for this interface.
  1183. *
  1184. * @return 0 on success, -EINVAL if MTU is out of valid range.
  1185. * (valid range is 576 .. NETIUCV_MTU_MAX).
  1186. */
  1187. static int
  1188. netiucv_change_mtu (struct net_device * dev, int new_mtu)
  1189. {
  1190. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1191. if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) {
  1192. IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
  1193. return -EINVAL;
  1194. }
  1195. dev->mtu = new_mtu;
  1196. return 0;
  1197. }
  1198. /**
  1199. * attributes in sysfs
  1200. *****************************************************************************/
  1201. static ssize_t
  1202. user_show (struct device *dev, struct device_attribute *attr, char *buf)
  1203. {
  1204. struct netiucv_priv *priv = dev->driver_data;
  1205. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1206. return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
  1207. }
  1208. static ssize_t
  1209. user_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1210. {
  1211. struct netiucv_priv *priv = dev->driver_data;
  1212. struct net_device *ndev = priv->conn->netdev;
  1213. char *p;
  1214. char *tmp;
  1215. char username[9];
  1216. int i;
  1217. struct iucv_connection **clist = &iucv_conns.iucv_connections;
  1218. unsigned long flags;
  1219. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1220. if (count>9) {
  1221. PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
  1222. IUCV_DBF_TEXT_(setup, 2,
  1223. "%d is length of username\n", (int)count);
  1224. return -EINVAL;
  1225. }
  1226. tmp = strsep((char **) &buf, "\n");
  1227. for (i=0, p=tmp; i<8 && *p; i++, p++) {
  1228. if (isalnum(*p) || (*p == '$'))
  1229. username[i]= toupper(*p);
  1230. else if (*p == '\n') {
  1231. /* trailing lf, grr */
  1232. break;
  1233. } else {
  1234. PRINT_WARN("netiucv: Invalid char %c in username!\n",
  1235. *p);
  1236. IUCV_DBF_TEXT_(setup, 2,
  1237. "username: invalid character %c\n",
  1238. *p);
  1239. return -EINVAL;
  1240. }
  1241. }
  1242. while (i<8)
  1243. username[i++] = ' ';
  1244. username[8] = '\0';
  1245. if (memcmp(username, priv->conn->userid, 9)) {
  1246. /* username changed */
  1247. if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
  1248. PRINT_WARN(
  1249. "netiucv: device %s active, connected to %s\n",
  1250. dev->bus_id, priv->conn->userid);
  1251. PRINT_WARN("netiucv: user cannot be updated\n");
  1252. IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
  1253. return -EBUSY;
  1254. }
  1255. }
  1256. read_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
  1257. while (*clist) {
  1258. if (!strncmp(username, (*clist)->userid, 9) ||
  1259. ((*clist)->netdev != ndev))
  1260. break;
  1261. clist = &((*clist)->next);
  1262. }
  1263. read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
  1264. if (*clist) {
  1265. PRINT_WARN("netiucv: Connection to %s already exists\n",
  1266. username);
  1267. return -EEXIST;
  1268. }
  1269. memcpy(priv->conn->userid, username, 9);
  1270. return count;
  1271. }
  1272. static DEVICE_ATTR(user, 0644, user_show, user_write);
  1273. static ssize_t
  1274. buffer_show (struct device *dev, struct device_attribute *attr, char *buf)
  1275. {
  1276. struct netiucv_priv *priv = dev->driver_data;
  1277. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1278. return sprintf(buf, "%d\n", priv->conn->max_buffsize);
  1279. }
  1280. static ssize_t
  1281. buffer_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1282. {
  1283. struct netiucv_priv *priv = dev->driver_data;
  1284. struct net_device *ndev = priv->conn->netdev;
  1285. char *e;
  1286. int bs1;
  1287. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1288. if (count >= 39)
  1289. return -EINVAL;
  1290. bs1 = simple_strtoul(buf, &e, 0);
  1291. if (e && (!isspace(*e))) {
  1292. PRINT_WARN("netiucv: Invalid character in buffer!\n");
  1293. IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
  1294. return -EINVAL;
  1295. }
  1296. if (bs1 > NETIUCV_BUFSIZE_MAX) {
  1297. PRINT_WARN("netiucv: Given buffer size %d too large.\n",
  1298. bs1);
  1299. IUCV_DBF_TEXT_(setup, 2,
  1300. "buffer_write: buffer size %d too large\n",
  1301. bs1);
  1302. return -EINVAL;
  1303. }
  1304. if ((ndev->flags & IFF_RUNNING) &&
  1305. (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
  1306. PRINT_WARN("netiucv: Given buffer size %d too small.\n",
  1307. bs1);
  1308. IUCV_DBF_TEXT_(setup, 2,
  1309. "buffer_write: buffer size %d too small\n",
  1310. bs1);
  1311. return -EINVAL;
  1312. }
  1313. if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
  1314. PRINT_WARN("netiucv: Given buffer size %d too small.\n",
  1315. bs1);
  1316. IUCV_DBF_TEXT_(setup, 2,
  1317. "buffer_write: buffer size %d too small\n",
  1318. bs1);
  1319. return -EINVAL;
  1320. }
  1321. priv->conn->max_buffsize = bs1;
  1322. if (!(ndev->flags & IFF_RUNNING))
  1323. ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
  1324. return count;
  1325. }
  1326. static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
  1327. static ssize_t
  1328. dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
  1329. {
  1330. struct netiucv_priv *priv = dev->driver_data;
  1331. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1332. return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
  1333. }
  1334. static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
  1335. static ssize_t
  1336. conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
  1337. {
  1338. struct netiucv_priv *priv = dev->driver_data;
  1339. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1340. return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
  1341. }
  1342. static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
  1343. static ssize_t
  1344. maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf)
  1345. {
  1346. struct netiucv_priv *priv = dev->driver_data;
  1347. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1348. return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
  1349. }
  1350. static ssize_t
  1351. maxmulti_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1352. {
  1353. struct netiucv_priv *priv = dev->driver_data;
  1354. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1355. priv->conn->prof.maxmulti = 0;
  1356. return count;
  1357. }
  1358. static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
  1359. static ssize_t
  1360. maxcq_show (struct device *dev, struct device_attribute *attr, char *buf)
  1361. {
  1362. struct netiucv_priv *priv = dev->driver_data;
  1363. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1364. return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
  1365. }
  1366. static ssize_t
  1367. maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1368. {
  1369. struct netiucv_priv *priv = dev->driver_data;
  1370. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1371. priv->conn->prof.maxcqueue = 0;
  1372. return count;
  1373. }
  1374. static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
  1375. static ssize_t
  1376. sdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
  1377. {
  1378. struct netiucv_priv *priv = dev->driver_data;
  1379. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1380. return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
  1381. }
  1382. static ssize_t
  1383. sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1384. {
  1385. struct netiucv_priv *priv = dev->driver_data;
  1386. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1387. priv->conn->prof.doios_single = 0;
  1388. return count;
  1389. }
  1390. static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
  1391. static ssize_t
  1392. mdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
  1393. {
  1394. struct netiucv_priv *priv = dev->driver_data;
  1395. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1396. return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
  1397. }
  1398. static ssize_t
  1399. mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1400. {
  1401. struct netiucv_priv *priv = dev->driver_data;
  1402. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1403. priv->conn->prof.doios_multi = 0;
  1404. return count;
  1405. }
  1406. static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
  1407. static ssize_t
  1408. txlen_show (struct device *dev, struct device_attribute *attr, char *buf)
  1409. {
  1410. struct netiucv_priv *priv = dev->driver_data;
  1411. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1412. return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
  1413. }
  1414. static ssize_t
  1415. txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1416. {
  1417. struct netiucv_priv *priv = dev->driver_data;
  1418. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1419. priv->conn->prof.txlen = 0;
  1420. return count;
  1421. }
  1422. static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
  1423. static ssize_t
  1424. txtime_show (struct device *dev, struct device_attribute *attr, char *buf)
  1425. {
  1426. struct netiucv_priv *priv = dev->driver_data;
  1427. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1428. return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
  1429. }
  1430. static ssize_t
  1431. txtime_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1432. {
  1433. struct netiucv_priv *priv = dev->driver_data;
  1434. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1435. priv->conn->prof.tx_time = 0;
  1436. return count;
  1437. }
  1438. static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
  1439. static ssize_t
  1440. txpend_show (struct device *dev, struct device_attribute *attr, char *buf)
  1441. {
  1442. struct netiucv_priv *priv = dev->driver_data;
  1443. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1444. return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
  1445. }
  1446. static ssize_t
  1447. txpend_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1448. {
  1449. struct netiucv_priv *priv = dev->driver_data;
  1450. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1451. priv->conn->prof.tx_pending = 0;
  1452. return count;
  1453. }
  1454. static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
  1455. static ssize_t
  1456. txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf)
  1457. {
  1458. struct netiucv_priv *priv = dev->driver_data;
  1459. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1460. return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
  1461. }
  1462. static ssize_t
  1463. txmpnd_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1464. {
  1465. struct netiucv_priv *priv = dev->driver_data;
  1466. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1467. priv->conn->prof.tx_max_pending = 0;
  1468. return count;
  1469. }
  1470. static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
  1471. static struct attribute *netiucv_attrs[] = {
  1472. &dev_attr_buffer.attr,
  1473. &dev_attr_user.attr,
  1474. NULL,
  1475. };
  1476. static struct attribute_group netiucv_attr_group = {
  1477. .attrs = netiucv_attrs,
  1478. };
  1479. static struct attribute *netiucv_stat_attrs[] = {
  1480. &dev_attr_device_fsm_state.attr,
  1481. &dev_attr_connection_fsm_state.attr,
  1482. &dev_attr_max_tx_buffer_used.attr,
  1483. &dev_attr_max_chained_skbs.attr,
  1484. &dev_attr_tx_single_write_ops.attr,
  1485. &dev_attr_tx_multi_write_ops.attr,
  1486. &dev_attr_netto_bytes.attr,
  1487. &dev_attr_max_tx_io_time.attr,
  1488. &dev_attr_tx_pending.attr,
  1489. &dev_attr_tx_max_pending.attr,
  1490. NULL,
  1491. };
  1492. static struct attribute_group netiucv_stat_attr_group = {
  1493. .name = "stats",
  1494. .attrs = netiucv_stat_attrs,
  1495. };
  1496. static inline int
  1497. netiucv_add_files(struct device *dev)
  1498. {
  1499. int ret;
  1500. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1501. ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
  1502. if (ret)
  1503. return ret;
  1504. ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
  1505. if (ret)
  1506. sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
  1507. return ret;
  1508. }
  1509. static inline void
  1510. netiucv_remove_files(struct device *dev)
  1511. {
  1512. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1513. sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
  1514. sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
  1515. }
  1516. static int
  1517. netiucv_register_device(struct net_device *ndev)
  1518. {
  1519. struct netiucv_priv *priv = ndev->priv;
  1520. struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
  1521. int ret;
  1522. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1523. if (dev) {
  1524. snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
  1525. dev->bus = &iucv_bus;
  1526. dev->parent = iucv_root;
  1527. /*
  1528. * The release function could be called after the
  1529. * module has been unloaded. It's _only_ task is to
  1530. * free the struct. Therefore, we specify kfree()
  1531. * directly here. (Probably a little bit obfuscating
  1532. * but legitime ...).
  1533. */
  1534. dev->release = (void (*)(struct device *))kfree;
  1535. dev->driver = &netiucv_driver;
  1536. } else
  1537. return -ENOMEM;
  1538. ret = device_register(dev);
  1539. if (ret)
  1540. return ret;
  1541. ret = netiucv_add_files(dev);
  1542. if (ret)
  1543. goto out_unreg;
  1544. priv->dev = dev;
  1545. dev->driver_data = priv;
  1546. return 0;
  1547. out_unreg:
  1548. device_unregister(dev);
  1549. return ret;
  1550. }
  1551. static void
  1552. netiucv_unregister_device(struct device *dev)
  1553. {
  1554. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1555. netiucv_remove_files(dev);
  1556. device_unregister(dev);
  1557. }
  1558. /**
  1559. * Allocate and initialize a new connection structure.
  1560. * Add it to the list of netiucv connections;
  1561. */
  1562. static struct iucv_connection *
  1563. netiucv_new_connection(struct net_device *dev, char *username)
  1564. {
  1565. unsigned long flags;
  1566. struct iucv_connection **clist = &iucv_conns.iucv_connections;
  1567. struct iucv_connection *conn =
  1568. kzalloc(sizeof(struct iucv_connection), GFP_KERNEL);
  1569. if (conn) {
  1570. skb_queue_head_init(&conn->collect_queue);
  1571. skb_queue_head_init(&conn->commit_queue);
  1572. spin_lock_init(&conn->collect_lock);
  1573. conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
  1574. conn->netdev = dev;
  1575. conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
  1576. GFP_KERNEL | GFP_DMA);
  1577. if (!conn->rx_buff) {
  1578. kfree(conn);
  1579. return NULL;
  1580. }
  1581. conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
  1582. GFP_KERNEL | GFP_DMA);
  1583. if (!conn->tx_buff) {
  1584. kfree_skb(conn->rx_buff);
  1585. kfree(conn);
  1586. return NULL;
  1587. }
  1588. conn->fsm = init_fsm("netiucvconn", conn_state_names,
  1589. conn_event_names, NR_CONN_STATES,
  1590. NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
  1591. GFP_KERNEL);
  1592. if (!conn->fsm) {
  1593. kfree_skb(conn->tx_buff);
  1594. kfree_skb(conn->rx_buff);
  1595. kfree(conn);
  1596. return NULL;
  1597. }
  1598. fsm_settimer(conn->fsm, &conn->timer);
  1599. fsm_newstate(conn->fsm, CONN_STATE_INVALID);
  1600. if (username) {
  1601. memcpy(conn->userid, username, 9);
  1602. fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
  1603. }
  1604. write_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
  1605. conn->next = *clist;
  1606. *clist = conn;
  1607. write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
  1608. }
  1609. return conn;
  1610. }
  1611. /**
  1612. * Release a connection structure and remove it from the
  1613. * list of netiucv connections.
  1614. */
  1615. static void
  1616. netiucv_remove_connection(struct iucv_connection *conn)
  1617. {
  1618. struct iucv_connection **clist = &iucv_conns.iucv_connections;
  1619. unsigned long flags;
  1620. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1621. if (conn == NULL)
  1622. return;
  1623. write_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
  1624. while (*clist) {
  1625. if (*clist == conn) {
  1626. *clist = conn->next;
  1627. write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
  1628. if (conn->handle) {
  1629. iucv_unregister_program(conn->handle);
  1630. conn->handle = NULL;
  1631. }
  1632. fsm_deltimer(&conn->timer);
  1633. kfree_fsm(conn->fsm);
  1634. kfree_skb(conn->rx_buff);
  1635. kfree_skb(conn->tx_buff);
  1636. return;
  1637. }
  1638. clist = &((*clist)->next);
  1639. }
  1640. write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
  1641. }
  1642. /**
  1643. * Release everything of a net device.
  1644. */
  1645. static void
  1646. netiucv_free_netdevice(struct net_device *dev)
  1647. {
  1648. struct netiucv_priv *privptr;
  1649. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1650. if (!dev)
  1651. return;
  1652. privptr = (struct netiucv_priv *)dev->priv;
  1653. if (privptr) {
  1654. if (privptr->conn)
  1655. netiucv_remove_connection(privptr->conn);
  1656. if (privptr->fsm)
  1657. kfree_fsm(privptr->fsm);
  1658. privptr->conn = NULL; privptr->fsm = NULL;
  1659. /* privptr gets freed by free_netdev() */
  1660. }
  1661. free_netdev(dev);
  1662. }
  1663. /**
  1664. * Initialize a net device. (Called from kernel in alloc_netdev())
  1665. */
  1666. static void
  1667. netiucv_setup_netdevice(struct net_device *dev)
  1668. {
  1669. memset(dev->priv, 0, sizeof(struct netiucv_priv));
  1670. dev->mtu = NETIUCV_MTU_DEFAULT;
  1671. dev->hard_start_xmit = netiucv_tx;
  1672. dev->open = netiucv_open;
  1673. dev->stop = netiucv_close;
  1674. dev->get_stats = netiucv_stats;
  1675. dev->change_mtu = netiucv_change_mtu;
  1676. dev->destructor = netiucv_free_netdevice;
  1677. dev->hard_header_len = NETIUCV_HDRLEN;
  1678. dev->addr_len = 0;
  1679. dev->type = ARPHRD_SLIP;
  1680. dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
  1681. dev->flags = IFF_POINTOPOINT | IFF_NOARP;
  1682. SET_MODULE_OWNER(dev);
  1683. }
  1684. /**
  1685. * Allocate and initialize everything of a net device.
  1686. */
  1687. static struct net_device *
  1688. netiucv_init_netdevice(char *username)
  1689. {
  1690. struct netiucv_priv *privptr;
  1691. struct net_device *dev;
  1692. dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
  1693. netiucv_setup_netdevice);
  1694. if (!dev)
  1695. return NULL;
  1696. if (dev_alloc_name(dev, dev->name) < 0) {
  1697. free_netdev(dev);
  1698. return NULL;
  1699. }
  1700. privptr = (struct netiucv_priv *)dev->priv;
  1701. privptr->fsm = init_fsm("netiucvdev", dev_state_names,
  1702. dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
  1703. dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
  1704. if (!privptr->fsm) {
  1705. free_netdev(dev);
  1706. return NULL;
  1707. }
  1708. privptr->conn = netiucv_new_connection(dev, username);
  1709. if (!privptr->conn) {
  1710. kfree_fsm(privptr->fsm);
  1711. free_netdev(dev);
  1712. IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
  1713. return NULL;
  1714. }
  1715. fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
  1716. return dev;
  1717. }
  1718. static ssize_t
  1719. conn_write(struct device_driver *drv, const char *buf, size_t count)
  1720. {
  1721. char *p;
  1722. char username[9];
  1723. int i, ret;
  1724. struct net_device *dev;
  1725. struct iucv_connection **clist = &iucv_conns.iucv_connections;
  1726. unsigned long flags;
  1727. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1728. if (count>9) {
  1729. PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
  1730. IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
  1731. return -EINVAL;
  1732. }
  1733. for (i=0, p=(char *)buf; i<8 && *p; i++, p++) {
  1734. if (isalnum(*p) || (*p == '$'))
  1735. username[i]= toupper(*p);
  1736. else if (*p == '\n') {
  1737. /* trailing lf, grr */
  1738. break;
  1739. } else {
  1740. PRINT_WARN("netiucv: Invalid character in username!\n");
  1741. IUCV_DBF_TEXT_(setup, 2,
  1742. "conn_write: invalid character %c\n", *p);
  1743. return -EINVAL;
  1744. }
  1745. }
  1746. while (i<8)
  1747. username[i++] = ' ';
  1748. username[8] = '\0';
  1749. read_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
  1750. while (*clist) {
  1751. if (!strncmp(username, (*clist)->userid, 9))
  1752. break;
  1753. clist = &((*clist)->next);
  1754. }
  1755. read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
  1756. if (*clist) {
  1757. PRINT_WARN("netiucv: Connection to %s already exists\n",
  1758. username);
  1759. return -EEXIST;
  1760. }
  1761. dev = netiucv_init_netdevice(username);
  1762. if (!dev) {
  1763. PRINT_WARN(
  1764. "netiucv: Could not allocate network device structure "
  1765. "for user '%s'\n", netiucv_printname(username));
  1766. IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
  1767. return -ENODEV;
  1768. }
  1769. if ((ret = netiucv_register_device(dev))) {
  1770. IUCV_DBF_TEXT_(setup, 2,
  1771. "ret %d from netiucv_register_device\n", ret);
  1772. goto out_free_ndev;
  1773. }
  1774. /* sysfs magic */
  1775. SET_NETDEV_DEV(dev,
  1776. (struct device*)((struct netiucv_priv*)dev->priv)->dev);
  1777. if ((ret = register_netdev(dev))) {
  1778. netiucv_unregister_device((struct device*)
  1779. ((struct netiucv_priv*)dev->priv)->dev);
  1780. goto out_free_ndev;
  1781. }
  1782. PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
  1783. return count;
  1784. out_free_ndev:
  1785. PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
  1786. IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
  1787. netiucv_free_netdevice(dev);
  1788. return ret;
  1789. }
  1790. DRIVER_ATTR(connection, 0200, NULL, conn_write);
  1791. static ssize_t
  1792. remove_write (struct device_driver *drv, const char *buf, size_t count)
  1793. {
  1794. struct iucv_connection **clist = &iucv_conns.iucv_connections;
  1795. unsigned long flags;
  1796. struct net_device *ndev;
  1797. struct netiucv_priv *priv;
  1798. struct device *dev;
  1799. char name[IFNAMSIZ];
  1800. char *p;
  1801. int i;
  1802. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1803. if (count >= IFNAMSIZ)
  1804. count = IFNAMSIZ - 1;;
  1805. for (i=0, p=(char *)buf; i<count && *p; i++, p++) {
  1806. if ((*p == '\n') || (*p == ' ')) {
  1807. /* trailing lf, grr */
  1808. break;
  1809. } else {
  1810. name[i]=*p;
  1811. }
  1812. }
  1813. name[i] = '\0';
  1814. read_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
  1815. while (*clist) {
  1816. ndev = (*clist)->netdev;
  1817. priv = (struct netiucv_priv*)ndev->priv;
  1818. dev = priv->dev;
  1819. if (strncmp(name, ndev->name, count)) {
  1820. clist = &((*clist)->next);
  1821. continue;
  1822. }
  1823. read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
  1824. if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
  1825. PRINT_WARN(
  1826. "netiucv: net device %s active with peer %s\n",
  1827. ndev->name, priv->conn->userid);
  1828. PRINT_WARN("netiucv: %s cannot be removed\n",
  1829. ndev->name);
  1830. IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
  1831. return -EBUSY;
  1832. }
  1833. unregister_netdev(ndev);
  1834. netiucv_unregister_device(dev);
  1835. return count;
  1836. }
  1837. read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
  1838. PRINT_WARN("netiucv: net device %s unknown\n", name);
  1839. IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
  1840. return -EINVAL;
  1841. }
  1842. DRIVER_ATTR(remove, 0200, NULL, remove_write);
  1843. static void
  1844. netiucv_banner(void)
  1845. {
  1846. PRINT_INFO("NETIUCV driver initialized\n");
  1847. }
  1848. static void __exit
  1849. netiucv_exit(void)
  1850. {
  1851. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1852. while (iucv_conns.iucv_connections) {
  1853. struct net_device *ndev = iucv_conns.iucv_connections->netdev;
  1854. struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv;
  1855. struct device *dev = priv->dev;
  1856. unregister_netdev(ndev);
  1857. netiucv_unregister_device(dev);
  1858. }
  1859. driver_remove_file(&netiucv_driver, &driver_attr_connection);
  1860. driver_remove_file(&netiucv_driver, &driver_attr_remove);
  1861. driver_unregister(&netiucv_driver);
  1862. iucv_unregister_dbf_views();
  1863. PRINT_INFO("NETIUCV driver unloaded\n");
  1864. return;
  1865. }
  1866. static int __init
  1867. netiucv_init(void)
  1868. {
  1869. int ret;
  1870. ret = iucv_register_dbf_views();
  1871. if (ret) {
  1872. PRINT_WARN("netiucv_init failed, "
  1873. "iucv_register_dbf_views rc = %d\n", ret);
  1874. return ret;
  1875. }
  1876. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1877. ret = driver_register(&netiucv_driver);
  1878. if (ret) {
  1879. PRINT_ERR("NETIUCV: failed to register driver.\n");
  1880. IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret);
  1881. iucv_unregister_dbf_views();
  1882. return ret;
  1883. }
  1884. /* Add entry for specifying connections. */
  1885. ret = driver_create_file(&netiucv_driver, &driver_attr_connection);
  1886. if (!ret) {
  1887. ret = driver_create_file(&netiucv_driver, &driver_attr_remove);
  1888. netiucv_banner();
  1889. rwlock_init(&iucv_conns.iucv_rwlock);
  1890. } else {
  1891. PRINT_ERR("NETIUCV: failed to add driver attribute.\n");
  1892. IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret);
  1893. driver_unregister(&netiucv_driver);
  1894. iucv_unregister_dbf_views();
  1895. }
  1896. return ret;
  1897. }
  1898. module_init(netiucv_init);
  1899. module_exit(netiucv_exit);
  1900. MODULE_LICENSE("GPL");