netiucv.c 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165
  1. /*
  2. * IUCV network driver
  3. *
  4. * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
  5. * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
  6. *
  7. * Sysfs integration and all bugs therein by Cornelia Huck
  8. * (cornelia.huck@de.ibm.com)
  9. *
  10. * Documentation used:
  11. * the source of the original IUCV driver by:
  12. * Stefan Hegewald <hegewald@de.ibm.com>
  13. * Hartmut Penner <hpenner@de.ibm.com>
  14. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  15. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  16. * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
  17. *
  18. * This program is free software; you can redistribute it and/or modify
  19. * it under the terms of the GNU General Public License as published by
  20. * the Free Software Foundation; either version 2, or (at your option)
  21. * any later version.
  22. *
  23. * This program is distributed in the hope that it will be useful,
  24. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  25. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  26. * GNU General Public License for more details.
  27. *
  28. * You should have received a copy of the GNU General Public License
  29. * along with this program; if not, write to the Free Software
  30. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  31. *
  32. */
  33. #undef DEBUG
  34. #include <linux/module.h>
  35. #include <linux/init.h>
  36. #include <linux/kernel.h>
  37. #include <linux/slab.h>
  38. #include <linux/errno.h>
  39. #include <linux/types.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/timer.h>
  42. #include <linux/bitops.h>
  43. #include <linux/signal.h>
  44. #include <linux/string.h>
  45. #include <linux/device.h>
  46. #include <linux/ip.h>
  47. #include <linux/if_arp.h>
  48. #include <linux/tcp.h>
  49. #include <linux/skbuff.h>
  50. #include <linux/ctype.h>
  51. #include <net/dst.h>
  52. #include <asm/io.h>
  53. #include <asm/uaccess.h>
  54. #include <net/iucv/iucv.h>
  55. #include "fsm.h"
  56. MODULE_AUTHOR
  57. ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
  58. MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
  59. /**
  60. * Debug Facility stuff
  61. */
  62. #define IUCV_DBF_SETUP_NAME "iucv_setup"
  63. #define IUCV_DBF_SETUP_LEN 32
  64. #define IUCV_DBF_SETUP_PAGES 2
  65. #define IUCV_DBF_SETUP_NR_AREAS 1
  66. #define IUCV_DBF_SETUP_LEVEL 3
  67. #define IUCV_DBF_DATA_NAME "iucv_data"
  68. #define IUCV_DBF_DATA_LEN 128
  69. #define IUCV_DBF_DATA_PAGES 2
  70. #define IUCV_DBF_DATA_NR_AREAS 1
  71. #define IUCV_DBF_DATA_LEVEL 2
  72. #define IUCV_DBF_TRACE_NAME "iucv_trace"
  73. #define IUCV_DBF_TRACE_LEN 16
  74. #define IUCV_DBF_TRACE_PAGES 4
  75. #define IUCV_DBF_TRACE_NR_AREAS 1
  76. #define IUCV_DBF_TRACE_LEVEL 3
  77. #define IUCV_DBF_TEXT(name,level,text) \
  78. do { \
  79. debug_text_event(iucv_dbf_##name,level,text); \
  80. } while (0)
  81. #define IUCV_DBF_HEX(name,level,addr,len) \
  82. do { \
  83. debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
  84. } while (0)
  85. DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
  86. #define IUCV_DBF_TEXT_(name,level,text...) \
  87. do { \
  88. char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
  89. sprintf(iucv_dbf_txt_buf, text); \
  90. debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
  91. put_cpu_var(iucv_dbf_txt_buf); \
  92. } while (0)
  93. #define IUCV_DBF_SPRINTF(name,level,text...) \
  94. do { \
  95. debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
  96. debug_sprintf_event(iucv_dbf_trace, level, text ); \
  97. } while (0)
  98. /**
  99. * some more debug stuff
  100. */
  101. #define IUCV_HEXDUMP16(importance,header,ptr) \
  102. PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
  103. "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
  104. *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
  105. *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
  106. *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
  107. *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
  108. *(((char*)ptr)+12),*(((char*)ptr)+13), \
  109. *(((char*)ptr)+14),*(((char*)ptr)+15)); \
  110. PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
  111. "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
  112. *(((char*)ptr)+16),*(((char*)ptr)+17), \
  113. *(((char*)ptr)+18),*(((char*)ptr)+19), \
  114. *(((char*)ptr)+20),*(((char*)ptr)+21), \
  115. *(((char*)ptr)+22),*(((char*)ptr)+23), \
  116. *(((char*)ptr)+24),*(((char*)ptr)+25), \
  117. *(((char*)ptr)+26),*(((char*)ptr)+27), \
  118. *(((char*)ptr)+28),*(((char*)ptr)+29), \
  119. *(((char*)ptr)+30),*(((char*)ptr)+31));
  120. #define PRINTK_HEADER " iucv: " /* for debugging */
  121. static struct device_driver netiucv_driver = {
  122. .name = "netiucv",
  123. .bus = &iucv_bus,
  124. };
  125. static int netiucv_callback_connreq(struct iucv_path *,
  126. u8 ipvmid[8], u8 ipuser[16]);
  127. static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
  128. static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
  129. static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
  130. static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
  131. static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
  132. static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
  133. static struct iucv_handler netiucv_handler = {
  134. .path_pending = netiucv_callback_connreq,
  135. .path_complete = netiucv_callback_connack,
  136. .path_severed = netiucv_callback_connrej,
  137. .path_quiesced = netiucv_callback_connsusp,
  138. .path_resumed = netiucv_callback_connres,
  139. .message_pending = netiucv_callback_rx,
  140. .message_complete = netiucv_callback_txdone
  141. };
  142. /**
  143. * Per connection profiling data
  144. */
  145. struct connection_profile {
  146. unsigned long maxmulti;
  147. unsigned long maxcqueue;
  148. unsigned long doios_single;
  149. unsigned long doios_multi;
  150. unsigned long txlen;
  151. unsigned long tx_time;
  152. struct timespec send_stamp;
  153. unsigned long tx_pending;
  154. unsigned long tx_max_pending;
  155. };
  156. /**
  157. * Representation of one iucv connection
  158. */
  159. struct iucv_connection {
  160. struct list_head list;
  161. struct iucv_path *path;
  162. struct sk_buff *rx_buff;
  163. struct sk_buff *tx_buff;
  164. struct sk_buff_head collect_queue;
  165. struct sk_buff_head commit_queue;
  166. spinlock_t collect_lock;
  167. int collect_len;
  168. int max_buffsize;
  169. fsm_timer timer;
  170. fsm_instance *fsm;
  171. struct net_device *netdev;
  172. struct connection_profile prof;
  173. char userid[9];
  174. };
  175. /**
  176. * Linked list of all connection structs.
  177. */
  178. static struct list_head iucv_connection_list =
  179. LIST_HEAD_INIT(iucv_connection_list);
  180. static DEFINE_RWLOCK(iucv_connection_rwlock);
  181. /**
  182. * Representation of event-data for the
  183. * connection state machine.
  184. */
  185. struct iucv_event {
  186. struct iucv_connection *conn;
  187. void *data;
  188. };
  189. /**
  190. * Private part of the network device structure
  191. */
  192. struct netiucv_priv {
  193. struct net_device_stats stats;
  194. unsigned long tbusy;
  195. fsm_instance *fsm;
  196. struct iucv_connection *conn;
  197. struct device *dev;
  198. };
  199. /**
  200. * Link level header for a packet.
  201. */
  202. struct ll_header {
  203. u16 next;
  204. };
  205. #define NETIUCV_HDRLEN (sizeof(struct ll_header))
  206. #define NETIUCV_BUFSIZE_MAX 32768
  207. #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
  208. #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
  209. #define NETIUCV_MTU_DEFAULT 9216
  210. #define NETIUCV_QUEUELEN_DEFAULT 50
  211. #define NETIUCV_TIMEOUT_5SEC 5000
  212. /**
  213. * Compatibility macros for busy handling
  214. * of network devices.
  215. */
  216. static inline void netiucv_clear_busy(struct net_device *dev)
  217. {
  218. struct netiucv_priv *priv = netdev_priv(dev);
  219. clear_bit(0, &priv->tbusy);
  220. netif_wake_queue(dev);
  221. }
  222. static inline int netiucv_test_and_set_busy(struct net_device *dev)
  223. {
  224. struct netiucv_priv *priv = netdev_priv(dev);
  225. netif_stop_queue(dev);
  226. return test_and_set_bit(0, &priv->tbusy);
  227. }
  228. static u8 iucvMagic[16] = {
  229. 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
  230. 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
  231. };
  232. /**
  233. * Convert an iucv userId to its printable
  234. * form (strip whitespace at end).
  235. *
  236. * @param An iucv userId
  237. *
  238. * @returns The printable string (static data!!)
  239. */
  240. static char *netiucv_printname(char *name)
  241. {
  242. static char tmp[9];
  243. char *p = tmp;
  244. memcpy(tmp, name, 8);
  245. tmp[8] = '\0';
  246. while (*p && (!isspace(*p)))
  247. p++;
  248. *p = '\0';
  249. return tmp;
  250. }
  251. /**
  252. * States of the interface statemachine.
  253. */
  254. enum dev_states {
  255. DEV_STATE_STOPPED,
  256. DEV_STATE_STARTWAIT,
  257. DEV_STATE_STOPWAIT,
  258. DEV_STATE_RUNNING,
  259. /**
  260. * MUST be always the last element!!
  261. */
  262. NR_DEV_STATES
  263. };
  264. static const char *dev_state_names[] = {
  265. "Stopped",
  266. "StartWait",
  267. "StopWait",
  268. "Running",
  269. };
  270. /**
  271. * Events of the interface statemachine.
  272. */
  273. enum dev_events {
  274. DEV_EVENT_START,
  275. DEV_EVENT_STOP,
  276. DEV_EVENT_CONUP,
  277. DEV_EVENT_CONDOWN,
  278. /**
  279. * MUST be always the last element!!
  280. */
  281. NR_DEV_EVENTS
  282. };
  283. static const char *dev_event_names[] = {
  284. "Start",
  285. "Stop",
  286. "Connection up",
  287. "Connection down",
  288. };
  289. /**
  290. * Events of the connection statemachine
  291. */
  292. enum conn_events {
  293. /**
  294. * Events, representing callbacks from
  295. * lowlevel iucv layer)
  296. */
  297. CONN_EVENT_CONN_REQ,
  298. CONN_EVENT_CONN_ACK,
  299. CONN_EVENT_CONN_REJ,
  300. CONN_EVENT_CONN_SUS,
  301. CONN_EVENT_CONN_RES,
  302. CONN_EVENT_RX,
  303. CONN_EVENT_TXDONE,
  304. /**
  305. * Events, representing errors return codes from
  306. * calls to lowlevel iucv layer
  307. */
  308. /**
  309. * Event, representing timer expiry.
  310. */
  311. CONN_EVENT_TIMER,
  312. /**
  313. * Events, representing commands from upper levels.
  314. */
  315. CONN_EVENT_START,
  316. CONN_EVENT_STOP,
  317. /**
  318. * MUST be always the last element!!
  319. */
  320. NR_CONN_EVENTS,
  321. };
  322. static const char *conn_event_names[] = {
  323. "Remote connection request",
  324. "Remote connection acknowledge",
  325. "Remote connection reject",
  326. "Connection suspended",
  327. "Connection resumed",
  328. "Data received",
  329. "Data sent",
  330. "Timer",
  331. "Start",
  332. "Stop",
  333. };
  334. /**
  335. * States of the connection statemachine.
  336. */
  337. enum conn_states {
  338. /**
  339. * Connection not assigned to any device,
  340. * initial state, invalid
  341. */
  342. CONN_STATE_INVALID,
  343. /**
  344. * Userid assigned but not operating
  345. */
  346. CONN_STATE_STOPPED,
  347. /**
  348. * Connection registered,
  349. * no connection request sent yet,
  350. * no connection request received
  351. */
  352. CONN_STATE_STARTWAIT,
  353. /**
  354. * Connection registered and connection request sent,
  355. * no acknowledge and no connection request received yet.
  356. */
  357. CONN_STATE_SETUPWAIT,
  358. /**
  359. * Connection up and running idle
  360. */
  361. CONN_STATE_IDLE,
  362. /**
  363. * Data sent, awaiting CONN_EVENT_TXDONE
  364. */
  365. CONN_STATE_TX,
  366. /**
  367. * Error during registration.
  368. */
  369. CONN_STATE_REGERR,
  370. /**
  371. * Error during registration.
  372. */
  373. CONN_STATE_CONNERR,
  374. /**
  375. * MUST be always the last element!!
  376. */
  377. NR_CONN_STATES,
  378. };
  379. static const char *conn_state_names[] = {
  380. "Invalid",
  381. "Stopped",
  382. "StartWait",
  383. "SetupWait",
  384. "Idle",
  385. "TX",
  386. "Terminating",
  387. "Registration error",
  388. "Connect error",
  389. };
  390. /**
  391. * Debug Facility Stuff
  392. */
  393. static debug_info_t *iucv_dbf_setup = NULL;
  394. static debug_info_t *iucv_dbf_data = NULL;
  395. static debug_info_t *iucv_dbf_trace = NULL;
  396. DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
  397. static void iucv_unregister_dbf_views(void)
  398. {
  399. if (iucv_dbf_setup)
  400. debug_unregister(iucv_dbf_setup);
  401. if (iucv_dbf_data)
  402. debug_unregister(iucv_dbf_data);
  403. if (iucv_dbf_trace)
  404. debug_unregister(iucv_dbf_trace);
  405. }
  406. static int iucv_register_dbf_views(void)
  407. {
  408. iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
  409. IUCV_DBF_SETUP_PAGES,
  410. IUCV_DBF_SETUP_NR_AREAS,
  411. IUCV_DBF_SETUP_LEN);
  412. iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
  413. IUCV_DBF_DATA_PAGES,
  414. IUCV_DBF_DATA_NR_AREAS,
  415. IUCV_DBF_DATA_LEN);
  416. iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
  417. IUCV_DBF_TRACE_PAGES,
  418. IUCV_DBF_TRACE_NR_AREAS,
  419. IUCV_DBF_TRACE_LEN);
  420. if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
  421. (iucv_dbf_trace == NULL)) {
  422. iucv_unregister_dbf_views();
  423. return -ENOMEM;
  424. }
  425. debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
  426. debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
  427. debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
  428. debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
  429. debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
  430. debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
  431. return 0;
  432. }
  433. /*
  434. * Callback-wrappers, called from lowlevel iucv layer.
  435. */
  436. static void netiucv_callback_rx(struct iucv_path *path,
  437. struct iucv_message *msg)
  438. {
  439. struct iucv_connection *conn = path->private;
  440. struct iucv_event ev;
  441. ev.conn = conn;
  442. ev.data = msg;
  443. fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
  444. }
  445. static void netiucv_callback_txdone(struct iucv_path *path,
  446. struct iucv_message *msg)
  447. {
  448. struct iucv_connection *conn = path->private;
  449. struct iucv_event ev;
  450. ev.conn = conn;
  451. ev.data = msg;
  452. fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
  453. }
  454. static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
  455. {
  456. struct iucv_connection *conn = path->private;
  457. fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
  458. }
  459. static int netiucv_callback_connreq(struct iucv_path *path,
  460. u8 ipvmid[8], u8 ipuser[16])
  461. {
  462. struct iucv_connection *conn = path->private;
  463. struct iucv_event ev;
  464. int rc;
  465. if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
  466. /* ipuser must match iucvMagic. */
  467. return -EINVAL;
  468. rc = -EINVAL;
  469. read_lock_bh(&iucv_connection_rwlock);
  470. list_for_each_entry(conn, &iucv_connection_list, list) {
  471. if (strncmp(ipvmid, conn->userid, 8))
  472. continue;
  473. /* Found a matching connection for this path. */
  474. conn->path = path;
  475. ev.conn = conn;
  476. ev.data = path;
  477. fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
  478. rc = 0;
  479. }
  480. read_unlock_bh(&iucv_connection_rwlock);
  481. return rc;
  482. }
  483. static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
  484. {
  485. struct iucv_connection *conn = path->private;
  486. fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
  487. }
  488. static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
  489. {
  490. struct iucv_connection *conn = path->private;
  491. fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
  492. }
  493. static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
  494. {
  495. struct iucv_connection *conn = path->private;
  496. fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
  497. }
  498. /**
  499. * Dummy NOP action for all statemachines
  500. */
  501. static void fsm_action_nop(fsm_instance *fi, int event, void *arg)
  502. {
  503. }
  504. /*
  505. * Actions of the connection statemachine
  506. */
  507. /**
  508. * netiucv_unpack_skb
  509. * @conn: The connection where this skb has been received.
  510. * @pskb: The received skb.
  511. *
  512. * Unpack a just received skb and hand it over to upper layers.
  513. * Helper function for conn_action_rx.
  514. */
  515. static void netiucv_unpack_skb(struct iucv_connection *conn,
  516. struct sk_buff *pskb)
  517. {
  518. struct net_device *dev = conn->netdev;
  519. struct netiucv_priv *privptr = netdev_priv(dev);
  520. u16 offset = 0;
  521. skb_put(pskb, NETIUCV_HDRLEN);
  522. pskb->dev = dev;
  523. pskb->ip_summed = CHECKSUM_NONE;
  524. pskb->protocol = ntohs(ETH_P_IP);
  525. while (1) {
  526. struct sk_buff *skb;
  527. struct ll_header *header = (struct ll_header *) pskb->data;
  528. if (!header->next)
  529. break;
  530. skb_pull(pskb, NETIUCV_HDRLEN);
  531. header->next -= offset;
  532. offset += header->next;
  533. header->next -= NETIUCV_HDRLEN;
  534. if (skb_tailroom(pskb) < header->next) {
  535. PRINT_WARN("%s: Illegal next field in iucv header: "
  536. "%d > %d\n",
  537. dev->name, header->next, skb_tailroom(pskb));
  538. IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
  539. header->next, skb_tailroom(pskb));
  540. return;
  541. }
  542. skb_put(pskb, header->next);
  543. skb_reset_mac_header(pskb);
  544. skb = dev_alloc_skb(pskb->len);
  545. if (!skb) {
  546. PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
  547. dev->name);
  548. IUCV_DBF_TEXT(data, 2,
  549. "Out of memory in netiucv_unpack_skb\n");
  550. privptr->stats.rx_dropped++;
  551. return;
  552. }
  553. skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
  554. pskb->len);
  555. skb_reset_mac_header(skb);
  556. skb->dev = pskb->dev;
  557. skb->protocol = pskb->protocol;
  558. pskb->ip_summed = CHECKSUM_UNNECESSARY;
  559. /*
  560. * Since receiving is always initiated from a tasklet (in iucv.c),
  561. * we must use netif_rx_ni() instead of netif_rx()
  562. */
  563. netif_rx_ni(skb);
  564. dev->last_rx = jiffies;
  565. privptr->stats.rx_packets++;
  566. privptr->stats.rx_bytes += skb->len;
  567. skb_pull(pskb, header->next);
  568. skb_put(pskb, NETIUCV_HDRLEN);
  569. }
  570. }
  571. static void conn_action_rx(fsm_instance *fi, int event, void *arg)
  572. {
  573. struct iucv_event *ev = arg;
  574. struct iucv_connection *conn = ev->conn;
  575. struct iucv_message *msg = ev->data;
  576. struct netiucv_priv *privptr = netdev_priv(conn->netdev);
  577. int rc;
  578. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  579. if (!conn->netdev) {
  580. iucv_message_reject(conn->path, msg);
  581. PRINT_WARN("Received data for unlinked connection\n");
  582. IUCV_DBF_TEXT(data, 2,
  583. "Received data for unlinked connection\n");
  584. return;
  585. }
  586. if (msg->length > conn->max_buffsize) {
  587. iucv_message_reject(conn->path, msg);
  588. privptr->stats.rx_dropped++;
  589. PRINT_WARN("msglen %d > max_buffsize %d\n",
  590. msg->length, conn->max_buffsize);
  591. IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
  592. msg->length, conn->max_buffsize);
  593. return;
  594. }
  595. conn->rx_buff->data = conn->rx_buff->head;
  596. skb_reset_tail_pointer(conn->rx_buff);
  597. conn->rx_buff->len = 0;
  598. rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
  599. msg->length, NULL);
  600. if (rc || msg->length < 5) {
  601. privptr->stats.rx_errors++;
  602. PRINT_WARN("iucv_receive returned %08x\n", rc);
  603. IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
  604. return;
  605. }
  606. netiucv_unpack_skb(conn, conn->rx_buff);
  607. }
  608. static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
  609. {
  610. struct iucv_event *ev = arg;
  611. struct iucv_connection *conn = ev->conn;
  612. struct iucv_message *msg = ev->data;
  613. struct iucv_message txmsg;
  614. struct netiucv_priv *privptr = NULL;
  615. u32 single_flag = msg->tag;
  616. u32 txbytes = 0;
  617. u32 txpackets = 0;
  618. u32 stat_maxcq = 0;
  619. struct sk_buff *skb;
  620. unsigned long saveflags;
  621. struct ll_header header;
  622. int rc;
  623. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  624. if (conn && conn->netdev)
  625. privptr = netdev_priv(conn->netdev);
  626. conn->prof.tx_pending--;
  627. if (single_flag) {
  628. if ((skb = skb_dequeue(&conn->commit_queue))) {
  629. atomic_dec(&skb->users);
  630. dev_kfree_skb_any(skb);
  631. if (privptr) {
  632. privptr->stats.tx_packets++;
  633. privptr->stats.tx_bytes +=
  634. (skb->len - NETIUCV_HDRLEN
  635. - NETIUCV_HDRLEN);
  636. }
  637. }
  638. }
  639. conn->tx_buff->data = conn->tx_buff->head;
  640. skb_reset_tail_pointer(conn->tx_buff);
  641. conn->tx_buff->len = 0;
  642. spin_lock_irqsave(&conn->collect_lock, saveflags);
  643. while ((skb = skb_dequeue(&conn->collect_queue))) {
  644. header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
  645. memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
  646. NETIUCV_HDRLEN);
  647. skb_copy_from_linear_data(skb,
  648. skb_put(conn->tx_buff, skb->len),
  649. skb->len);
  650. txbytes += skb->len;
  651. txpackets++;
  652. stat_maxcq++;
  653. atomic_dec(&skb->users);
  654. dev_kfree_skb_any(skb);
  655. }
  656. if (conn->collect_len > conn->prof.maxmulti)
  657. conn->prof.maxmulti = conn->collect_len;
  658. conn->collect_len = 0;
  659. spin_unlock_irqrestore(&conn->collect_lock, saveflags);
  660. if (conn->tx_buff->len == 0) {
  661. fsm_newstate(fi, CONN_STATE_IDLE);
  662. return;
  663. }
  664. header.next = 0;
  665. memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
  666. conn->prof.send_stamp = current_kernel_time();
  667. txmsg.class = 0;
  668. txmsg.tag = 0;
  669. rc = iucv_message_send(conn->path, &txmsg, 0, 0,
  670. conn->tx_buff->data, conn->tx_buff->len);
  671. conn->prof.doios_multi++;
  672. conn->prof.txlen += conn->tx_buff->len;
  673. conn->prof.tx_pending++;
  674. if (conn->prof.tx_pending > conn->prof.tx_max_pending)
  675. conn->prof.tx_max_pending = conn->prof.tx_pending;
  676. if (rc) {
  677. conn->prof.tx_pending--;
  678. fsm_newstate(fi, CONN_STATE_IDLE);
  679. if (privptr)
  680. privptr->stats.tx_errors += txpackets;
  681. PRINT_WARN("iucv_send returned %08x\n", rc);
  682. IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
  683. } else {
  684. if (privptr) {
  685. privptr->stats.tx_packets += txpackets;
  686. privptr->stats.tx_bytes += txbytes;
  687. }
  688. if (stat_maxcq > conn->prof.maxcqueue)
  689. conn->prof.maxcqueue = stat_maxcq;
  690. }
  691. }
  692. static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
  693. {
  694. struct iucv_event *ev = arg;
  695. struct iucv_connection *conn = ev->conn;
  696. struct iucv_path *path = ev->data;
  697. struct net_device *netdev = conn->netdev;
  698. struct netiucv_priv *privptr = netdev_priv(netdev);
  699. int rc;
  700. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  701. conn->path = path;
  702. path->msglim = NETIUCV_QUEUELEN_DEFAULT;
  703. path->flags = 0;
  704. rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
  705. if (rc) {
  706. PRINT_WARN("%s: IUCV accept failed with error %d\n",
  707. netdev->name, rc);
  708. IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
  709. return;
  710. }
  711. fsm_newstate(fi, CONN_STATE_IDLE);
  712. netdev->tx_queue_len = conn->path->msglim;
  713. fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
  714. }
  715. static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
  716. {
  717. struct iucv_event *ev = arg;
  718. struct iucv_path *path = ev->data;
  719. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  720. iucv_path_sever(path, NULL);
  721. }
  722. static void conn_action_connack(fsm_instance *fi, int event, void *arg)
  723. {
  724. struct iucv_connection *conn = arg;
  725. struct net_device *netdev = conn->netdev;
  726. struct netiucv_priv *privptr = netdev_priv(netdev);
  727. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  728. fsm_deltimer(&conn->timer);
  729. fsm_newstate(fi, CONN_STATE_IDLE);
  730. netdev->tx_queue_len = conn->path->msglim;
  731. fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
  732. }
  733. static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
  734. {
  735. struct iucv_connection *conn = arg;
  736. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  737. fsm_deltimer(&conn->timer);
  738. iucv_path_sever(conn->path, NULL);
  739. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  740. }
  741. static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
  742. {
  743. struct iucv_connection *conn = arg;
  744. struct net_device *netdev = conn->netdev;
  745. struct netiucv_priv *privptr = netdev_priv(netdev);
  746. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  747. fsm_deltimer(&conn->timer);
  748. iucv_path_sever(conn->path, NULL);
  749. PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
  750. IUCV_DBF_TEXT(data, 2,
  751. "conn_action_connsever: Remote dropped connection\n");
  752. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  753. fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
  754. }
  755. static void conn_action_start(fsm_instance *fi, int event, void *arg)
  756. {
  757. struct iucv_connection *conn = arg;
  758. int rc;
  759. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  760. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  761. PRINT_DEBUG("%s('%s'): connecting ...\n",
  762. conn->netdev->name, conn->userid);
  763. /*
  764. * We must set the state before calling iucv_connect because the
  765. * callback handler could be called at any point after the connection
  766. * request is sent
  767. */
  768. fsm_newstate(fi, CONN_STATE_SETUPWAIT);
  769. conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
  770. rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
  771. NULL, iucvMagic, conn);
  772. switch (rc) {
  773. case 0:
  774. conn->netdev->tx_queue_len = conn->path->msglim;
  775. fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
  776. CONN_EVENT_TIMER, conn);
  777. return;
  778. case 11:
  779. PRINT_INFO("%s: User %s is currently not available.\n",
  780. conn->netdev->name,
  781. netiucv_printname(conn->userid));
  782. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  783. break;
  784. case 12:
  785. PRINT_INFO("%s: User %s is currently not ready.\n",
  786. conn->netdev->name,
  787. netiucv_printname(conn->userid));
  788. fsm_newstate(fi, CONN_STATE_STARTWAIT);
  789. break;
  790. case 13:
  791. PRINT_WARN("%s: Too many IUCV connections.\n",
  792. conn->netdev->name);
  793. fsm_newstate(fi, CONN_STATE_CONNERR);
  794. break;
  795. case 14:
  796. PRINT_WARN("%s: User %s has too many IUCV connections.\n",
  797. conn->netdev->name,
  798. netiucv_printname(conn->userid));
  799. fsm_newstate(fi, CONN_STATE_CONNERR);
  800. break;
  801. case 15:
  802. PRINT_WARN("%s: No IUCV authorization in CP directory.\n",
  803. conn->netdev->name);
  804. fsm_newstate(fi, CONN_STATE_CONNERR);
  805. break;
  806. default:
  807. PRINT_WARN("%s: iucv_connect returned error %d\n",
  808. conn->netdev->name, rc);
  809. fsm_newstate(fi, CONN_STATE_CONNERR);
  810. break;
  811. }
  812. IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
  813. kfree(conn->path);
  814. conn->path = NULL;
  815. }
  816. static void netiucv_purge_skb_queue(struct sk_buff_head *q)
  817. {
  818. struct sk_buff *skb;
  819. while ((skb = skb_dequeue(q))) {
  820. atomic_dec(&skb->users);
  821. dev_kfree_skb_any(skb);
  822. }
  823. }
  824. static void conn_action_stop(fsm_instance *fi, int event, void *arg)
  825. {
  826. struct iucv_event *ev = arg;
  827. struct iucv_connection *conn = ev->conn;
  828. struct net_device *netdev = conn->netdev;
  829. struct netiucv_priv *privptr = netdev_priv(netdev);
  830. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  831. fsm_deltimer(&conn->timer);
  832. fsm_newstate(fi, CONN_STATE_STOPPED);
  833. netiucv_purge_skb_queue(&conn->collect_queue);
  834. if (conn->path) {
  835. IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
  836. iucv_path_sever(conn->path, iucvMagic);
  837. kfree(conn->path);
  838. conn->path = NULL;
  839. }
  840. netiucv_purge_skb_queue(&conn->commit_queue);
  841. fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
  842. }
  843. static void conn_action_inval(fsm_instance *fi, int event, void *arg)
  844. {
  845. struct iucv_connection *conn = arg;
  846. struct net_device *netdev = conn->netdev;
  847. PRINT_WARN("%s: Cannot connect without username\n", netdev->name);
  848. IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
  849. }
  850. static const fsm_node conn_fsm[] = {
  851. { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
  852. { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
  853. { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
  854. { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
  855. { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
  856. { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
  857. { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
  858. { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
  859. { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
  860. { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
  861. { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
  862. { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
  863. { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
  864. { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
  865. { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
  866. { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
  867. { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
  868. { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
  869. { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
  870. { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
  871. { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
  872. { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
  873. { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
  874. };
  875. static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
  876. /*
  877. * Actions for interface - statemachine.
  878. */
  879. /**
  880. * dev_action_start
  881. * @fi: An instance of an interface statemachine.
  882. * @event: The event, just happened.
  883. * @arg: Generic pointer, casted from struct net_device * upon call.
  884. *
  885. * Startup connection by sending CONN_EVENT_START to it.
  886. */
  887. static void dev_action_start(fsm_instance *fi, int event, void *arg)
  888. {
  889. struct net_device *dev = arg;
  890. struct netiucv_priv *privptr = netdev_priv(dev);
  891. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  892. fsm_newstate(fi, DEV_STATE_STARTWAIT);
  893. fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
  894. }
  895. /**
  896. * Shutdown connection by sending CONN_EVENT_STOP to it.
  897. *
  898. * @param fi An instance of an interface statemachine.
  899. * @param event The event, just happened.
  900. * @param arg Generic pointer, casted from struct net_device * upon call.
  901. */
  902. static void
  903. dev_action_stop(fsm_instance *fi, int event, void *arg)
  904. {
  905. struct net_device *dev = arg;
  906. struct netiucv_priv *privptr = netdev_priv(dev);
  907. struct iucv_event ev;
  908. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  909. ev.conn = privptr->conn;
  910. fsm_newstate(fi, DEV_STATE_STOPWAIT);
  911. fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
  912. }
  913. /**
  914. * Called from connection statemachine
  915. * when a connection is up and running.
  916. *
  917. * @param fi An instance of an interface statemachine.
  918. * @param event The event, just happened.
  919. * @param arg Generic pointer, casted from struct net_device * upon call.
  920. */
  921. static void
  922. dev_action_connup(fsm_instance *fi, int event, void *arg)
  923. {
  924. struct net_device *dev = arg;
  925. struct netiucv_priv *privptr = netdev_priv(dev);
  926. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  927. switch (fsm_getstate(fi)) {
  928. case DEV_STATE_STARTWAIT:
  929. fsm_newstate(fi, DEV_STATE_RUNNING);
  930. PRINT_INFO("%s: connected with remote side %s\n",
  931. dev->name, privptr->conn->userid);
  932. IUCV_DBF_TEXT(setup, 3,
  933. "connection is up and running\n");
  934. break;
  935. case DEV_STATE_STOPWAIT:
  936. PRINT_INFO(
  937. "%s: got connection UP event during shutdown!\n",
  938. dev->name);
  939. IUCV_DBF_TEXT(data, 2,
  940. "dev_action_connup: in DEV_STATE_STOPWAIT\n");
  941. break;
  942. }
  943. }
  944. /**
  945. * Called from connection statemachine
  946. * when a connection has been shutdown.
  947. *
  948. * @param fi An instance of an interface statemachine.
  949. * @param event The event, just happened.
  950. * @param arg Generic pointer, casted from struct net_device * upon call.
  951. */
  952. static void
  953. dev_action_conndown(fsm_instance *fi, int event, void *arg)
  954. {
  955. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  956. switch (fsm_getstate(fi)) {
  957. case DEV_STATE_RUNNING:
  958. fsm_newstate(fi, DEV_STATE_STARTWAIT);
  959. break;
  960. case DEV_STATE_STOPWAIT:
  961. fsm_newstate(fi, DEV_STATE_STOPPED);
  962. IUCV_DBF_TEXT(setup, 3, "connection is down\n");
  963. break;
  964. }
  965. }
  966. static const fsm_node dev_fsm[] = {
  967. { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
  968. { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
  969. { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
  970. { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
  971. { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
  972. { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
  973. { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
  974. { DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop },
  975. };
  976. static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
  977. /**
  978. * Transmit a packet.
  979. * This is a helper function for netiucv_tx().
  980. *
  981. * @param conn Connection to be used for sending.
  982. * @param skb Pointer to struct sk_buff of packet to send.
  983. * The linklevel header has already been set up
  984. * by netiucv_tx().
  985. *
  986. * @return 0 on success, -ERRNO on failure. (Never fails.)
  987. */
  988. static int netiucv_transmit_skb(struct iucv_connection *conn,
  989. struct sk_buff *skb)
  990. {
  991. struct iucv_message msg;
  992. unsigned long saveflags;
  993. struct ll_header header;
  994. int rc;
  995. if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
  996. int l = skb->len + NETIUCV_HDRLEN;
  997. spin_lock_irqsave(&conn->collect_lock, saveflags);
  998. if (conn->collect_len + l >
  999. (conn->max_buffsize - NETIUCV_HDRLEN)) {
  1000. rc = -EBUSY;
  1001. IUCV_DBF_TEXT(data, 2,
  1002. "EBUSY from netiucv_transmit_skb\n");
  1003. } else {
  1004. atomic_inc(&skb->users);
  1005. skb_queue_tail(&conn->collect_queue, skb);
  1006. conn->collect_len += l;
  1007. rc = 0;
  1008. }
  1009. spin_unlock_irqrestore(&conn->collect_lock, saveflags);
  1010. } else {
  1011. struct sk_buff *nskb = skb;
  1012. /**
  1013. * Copy the skb to a new allocated skb in lowmem only if the
  1014. * data is located above 2G in memory or tailroom is < 2.
  1015. */
  1016. unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
  1017. NETIUCV_HDRLEN)) >> 31;
  1018. int copied = 0;
  1019. if (hi || (skb_tailroom(skb) < 2)) {
  1020. nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
  1021. NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
  1022. if (!nskb) {
  1023. PRINT_WARN("%s: Could not allocate tx_skb\n",
  1024. conn->netdev->name);
  1025. IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
  1026. rc = -ENOMEM;
  1027. return rc;
  1028. } else {
  1029. skb_reserve(nskb, NETIUCV_HDRLEN);
  1030. memcpy(skb_put(nskb, skb->len),
  1031. skb->data, skb->len);
  1032. }
  1033. copied = 1;
  1034. }
  1035. /**
  1036. * skb now is below 2G and has enough room. Add headers.
  1037. */
  1038. header.next = nskb->len + NETIUCV_HDRLEN;
  1039. memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
  1040. header.next = 0;
  1041. memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
  1042. fsm_newstate(conn->fsm, CONN_STATE_TX);
  1043. conn->prof.send_stamp = current_kernel_time();
  1044. msg.tag = 1;
  1045. msg.class = 0;
  1046. rc = iucv_message_send(conn->path, &msg, 0, 0,
  1047. nskb->data, nskb->len);
  1048. conn->prof.doios_single++;
  1049. conn->prof.txlen += skb->len;
  1050. conn->prof.tx_pending++;
  1051. if (conn->prof.tx_pending > conn->prof.tx_max_pending)
  1052. conn->prof.tx_max_pending = conn->prof.tx_pending;
  1053. if (rc) {
  1054. struct netiucv_priv *privptr;
  1055. fsm_newstate(conn->fsm, CONN_STATE_IDLE);
  1056. conn->prof.tx_pending--;
  1057. privptr = netdev_priv(conn->netdev);
  1058. if (privptr)
  1059. privptr->stats.tx_errors++;
  1060. if (copied)
  1061. dev_kfree_skb(nskb);
  1062. else {
  1063. /**
  1064. * Remove our headers. They get added
  1065. * again on retransmit.
  1066. */
  1067. skb_pull(skb, NETIUCV_HDRLEN);
  1068. skb_trim(skb, skb->len - NETIUCV_HDRLEN);
  1069. }
  1070. PRINT_WARN("iucv_send returned %08x\n", rc);
  1071. IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
  1072. } else {
  1073. if (copied)
  1074. dev_kfree_skb(skb);
  1075. atomic_inc(&nskb->users);
  1076. skb_queue_tail(&conn->commit_queue, nskb);
  1077. }
  1078. }
  1079. return rc;
  1080. }
  1081. /*
  1082. * Interface API for upper network layers
  1083. */
  1084. /**
  1085. * Open an interface.
  1086. * Called from generic network layer when ifconfig up is run.
  1087. *
  1088. * @param dev Pointer to interface struct.
  1089. *
  1090. * @return 0 on success, -ERRNO on failure. (Never fails.)
  1091. */
  1092. static int netiucv_open(struct net_device *dev)
  1093. {
  1094. struct netiucv_priv *priv = netdev_priv(dev);
  1095. fsm_event(priv->fsm, DEV_EVENT_START, dev);
  1096. return 0;
  1097. }
  1098. /**
  1099. * Close an interface.
  1100. * Called from generic network layer when ifconfig down is run.
  1101. *
  1102. * @param dev Pointer to interface struct.
  1103. *
  1104. * @return 0 on success, -ERRNO on failure. (Never fails.)
  1105. */
  1106. static int netiucv_close(struct net_device *dev)
  1107. {
  1108. struct netiucv_priv *priv = netdev_priv(dev);
  1109. fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
  1110. return 0;
  1111. }
  1112. /**
  1113. * Start transmission of a packet.
  1114. * Called from generic network device layer.
  1115. *
  1116. * @param skb Pointer to buffer containing the packet.
  1117. * @param dev Pointer to interface struct.
  1118. *
  1119. * @return 0 if packet consumed, !0 if packet rejected.
  1120. * Note: If we return !0, then the packet is free'd by
  1121. * the generic network layer.
  1122. */
  1123. static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
  1124. {
  1125. struct netiucv_priv *privptr = netdev_priv(dev);
  1126. int rc;
  1127. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1128. /**
  1129. * Some sanity checks ...
  1130. */
  1131. if (skb == NULL) {
  1132. PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
  1133. IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
  1134. privptr->stats.tx_dropped++;
  1135. return 0;
  1136. }
  1137. if (skb_headroom(skb) < NETIUCV_HDRLEN) {
  1138. PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
  1139. dev->name, NETIUCV_HDRLEN);
  1140. IUCV_DBF_TEXT(data, 2,
  1141. "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
  1142. dev_kfree_skb(skb);
  1143. privptr->stats.tx_dropped++;
  1144. return 0;
  1145. }
  1146. /**
  1147. * If connection is not running, try to restart it
  1148. * and throw away packet.
  1149. */
  1150. if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
  1151. if (!in_atomic())
  1152. fsm_event(privptr->fsm, DEV_EVENT_START, dev);
  1153. dev_kfree_skb(skb);
  1154. privptr->stats.tx_dropped++;
  1155. privptr->stats.tx_errors++;
  1156. privptr->stats.tx_carrier_errors++;
  1157. return 0;
  1158. }
  1159. if (netiucv_test_and_set_busy(dev)) {
  1160. IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
  1161. return -EBUSY;
  1162. }
  1163. dev->trans_start = jiffies;
  1164. rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
  1165. netiucv_clear_busy(dev);
  1166. return rc;
  1167. }
  1168. /**
  1169. * netiucv_stats
  1170. * @dev: Pointer to interface struct.
  1171. *
  1172. * Returns interface statistics of a device.
  1173. *
  1174. * Returns pointer to stats struct of this interface.
  1175. */
  1176. static struct net_device_stats *netiucv_stats (struct net_device * dev)
  1177. {
  1178. struct netiucv_priv *priv = netdev_priv(dev);
  1179. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1180. return &priv->stats;
  1181. }
  1182. /**
  1183. * netiucv_change_mtu
  1184. * @dev: Pointer to interface struct.
  1185. * @new_mtu: The new MTU to use for this interface.
  1186. *
  1187. * Sets MTU of an interface.
  1188. *
  1189. * Returns 0 on success, -EINVAL if MTU is out of valid range.
  1190. * (valid range is 576 .. NETIUCV_MTU_MAX).
  1191. */
  1192. static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
  1193. {
  1194. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1195. if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
  1196. IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
  1197. return -EINVAL;
  1198. }
  1199. dev->mtu = new_mtu;
  1200. return 0;
  1201. }
  1202. /*
  1203. * attributes in sysfs
  1204. */
  1205. static ssize_t user_show(struct device *dev, struct device_attribute *attr,
  1206. char *buf)
  1207. {
  1208. struct netiucv_priv *priv = dev->driver_data;
  1209. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1210. return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
  1211. }
  1212. static ssize_t user_write(struct device *dev, struct device_attribute *attr,
  1213. const char *buf, size_t count)
  1214. {
  1215. struct netiucv_priv *priv = dev->driver_data;
  1216. struct net_device *ndev = priv->conn->netdev;
  1217. char *p;
  1218. char *tmp;
  1219. char username[9];
  1220. int i;
  1221. struct iucv_connection *cp;
  1222. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1223. if (count > 9) {
  1224. PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
  1225. IUCV_DBF_TEXT_(setup, 2,
  1226. "%d is length of username\n", (int) count);
  1227. return -EINVAL;
  1228. }
  1229. tmp = strsep((char **) &buf, "\n");
  1230. for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
  1231. if (isalnum(*p) || (*p == '$')) {
  1232. username[i]= toupper(*p);
  1233. continue;
  1234. }
  1235. if (*p == '\n') {
  1236. /* trailing lf, grr */
  1237. break;
  1238. }
  1239. PRINT_WARN("netiucv: Invalid char %c in username!\n", *p);
  1240. IUCV_DBF_TEXT_(setup, 2,
  1241. "username: invalid character %c\n", *p);
  1242. return -EINVAL;
  1243. }
  1244. while (i < 8)
  1245. username[i++] = ' ';
  1246. username[8] = '\0';
  1247. if (memcmp(username, priv->conn->userid, 9) &&
  1248. (ndev->flags & (IFF_UP | IFF_RUNNING))) {
  1249. /* username changed while the interface is active. */
  1250. PRINT_WARN("netiucv: device %s active, connected to %s\n",
  1251. dev->bus_id, priv->conn->userid);
  1252. PRINT_WARN("netiucv: user cannot be updated\n");
  1253. IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
  1254. return -EBUSY;
  1255. }
  1256. read_lock_bh(&iucv_connection_rwlock);
  1257. list_for_each_entry(cp, &iucv_connection_list, list) {
  1258. if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
  1259. read_unlock_bh(&iucv_connection_rwlock);
  1260. PRINT_WARN("netiucv: Connection to %s already "
  1261. "exists\n", username);
  1262. return -EEXIST;
  1263. }
  1264. }
  1265. read_unlock_bh(&iucv_connection_rwlock);
  1266. memcpy(priv->conn->userid, username, 9);
  1267. return count;
  1268. }
  1269. static DEVICE_ATTR(user, 0644, user_show, user_write);
  1270. static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
  1271. char *buf)
  1272. { struct netiucv_priv *priv = dev->driver_data;
  1273. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1274. return sprintf(buf, "%d\n", priv->conn->max_buffsize);
  1275. }
  1276. static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
  1277. const char *buf, size_t count)
  1278. {
  1279. struct netiucv_priv *priv = dev->driver_data;
  1280. struct net_device *ndev = priv->conn->netdev;
  1281. char *e;
  1282. int bs1;
  1283. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1284. if (count >= 39)
  1285. return -EINVAL;
  1286. bs1 = simple_strtoul(buf, &e, 0);
  1287. if (e && (!isspace(*e))) {
  1288. PRINT_WARN("netiucv: Invalid character in buffer!\n");
  1289. IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
  1290. return -EINVAL;
  1291. }
  1292. if (bs1 > NETIUCV_BUFSIZE_MAX) {
  1293. PRINT_WARN("netiucv: Given buffer size %d too large.\n",
  1294. bs1);
  1295. IUCV_DBF_TEXT_(setup, 2,
  1296. "buffer_write: buffer size %d too large\n",
  1297. bs1);
  1298. return -EINVAL;
  1299. }
  1300. if ((ndev->flags & IFF_RUNNING) &&
  1301. (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
  1302. PRINT_WARN("netiucv: Given buffer size %d too small.\n",
  1303. bs1);
  1304. IUCV_DBF_TEXT_(setup, 2,
  1305. "buffer_write: buffer size %d too small\n",
  1306. bs1);
  1307. return -EINVAL;
  1308. }
  1309. if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
  1310. PRINT_WARN("netiucv: Given buffer size %d too small.\n",
  1311. bs1);
  1312. IUCV_DBF_TEXT_(setup, 2,
  1313. "buffer_write: buffer size %d too small\n",
  1314. bs1);
  1315. return -EINVAL;
  1316. }
  1317. priv->conn->max_buffsize = bs1;
  1318. if (!(ndev->flags & IFF_RUNNING))
  1319. ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
  1320. return count;
  1321. }
  1322. static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
  1323. static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
  1324. char *buf)
  1325. {
  1326. struct netiucv_priv *priv = dev->driver_data;
  1327. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1328. return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
  1329. }
  1330. static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
  1331. static ssize_t conn_fsm_show (struct device *dev,
  1332. struct device_attribute *attr, char *buf)
  1333. {
  1334. struct netiucv_priv *priv = dev->driver_data;
  1335. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1336. return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
  1337. }
  1338. static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
  1339. static ssize_t maxmulti_show (struct device *dev,
  1340. struct device_attribute *attr, char *buf)
  1341. {
  1342. struct netiucv_priv *priv = dev->driver_data;
  1343. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1344. return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
  1345. }
  1346. static ssize_t maxmulti_write (struct device *dev,
  1347. struct device_attribute *attr,
  1348. const char *buf, size_t count)
  1349. {
  1350. struct netiucv_priv *priv = dev->driver_data;
  1351. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1352. priv->conn->prof.maxmulti = 0;
  1353. return count;
  1354. }
  1355. static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
  1356. static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
  1357. char *buf)
  1358. {
  1359. struct netiucv_priv *priv = dev->driver_data;
  1360. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1361. return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
  1362. }
  1363. static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
  1364. const char *buf, size_t count)
  1365. {
  1366. struct netiucv_priv *priv = dev->driver_data;
  1367. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1368. priv->conn->prof.maxcqueue = 0;
  1369. return count;
  1370. }
  1371. static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
  1372. static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
  1373. char *buf)
  1374. {
  1375. struct netiucv_priv *priv = dev->driver_data;
  1376. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1377. return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
  1378. }
  1379. static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
  1380. const char *buf, size_t count)
  1381. {
  1382. struct netiucv_priv *priv = dev->driver_data;
  1383. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1384. priv->conn->prof.doios_single = 0;
  1385. return count;
  1386. }
  1387. static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
  1388. static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
  1389. char *buf)
  1390. {
  1391. struct netiucv_priv *priv = dev->driver_data;
  1392. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1393. return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
  1394. }
  1395. static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
  1396. const char *buf, size_t count)
  1397. {
  1398. struct netiucv_priv *priv = dev->driver_data;
  1399. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1400. priv->conn->prof.doios_multi = 0;
  1401. return count;
  1402. }
  1403. static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
  1404. static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
  1405. char *buf)
  1406. {
  1407. struct netiucv_priv *priv = dev->driver_data;
  1408. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1409. return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
  1410. }
  1411. static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
  1412. const char *buf, size_t count)
  1413. {
  1414. struct netiucv_priv *priv = dev->driver_data;
  1415. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1416. priv->conn->prof.txlen = 0;
  1417. return count;
  1418. }
  1419. static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
  1420. static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
  1421. char *buf)
  1422. {
  1423. struct netiucv_priv *priv = dev->driver_data;
  1424. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1425. return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
  1426. }
  1427. static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
  1428. const char *buf, size_t count)
  1429. {
  1430. struct netiucv_priv *priv = dev->driver_data;
  1431. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1432. priv->conn->prof.tx_time = 0;
  1433. return count;
  1434. }
  1435. static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
  1436. static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
  1437. char *buf)
  1438. {
  1439. struct netiucv_priv *priv = dev->driver_data;
  1440. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1441. return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
  1442. }
  1443. static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
  1444. const char *buf, size_t count)
  1445. {
  1446. struct netiucv_priv *priv = dev->driver_data;
  1447. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1448. priv->conn->prof.tx_pending = 0;
  1449. return count;
  1450. }
  1451. static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
  1452. static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
  1453. char *buf)
  1454. {
  1455. struct netiucv_priv *priv = dev->driver_data;
  1456. IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
  1457. return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
  1458. }
  1459. static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
  1460. const char *buf, size_t count)
  1461. {
  1462. struct netiucv_priv *priv = dev->driver_data;
  1463. IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
  1464. priv->conn->prof.tx_max_pending = 0;
  1465. return count;
  1466. }
  1467. static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
  1468. static struct attribute *netiucv_attrs[] = {
  1469. &dev_attr_buffer.attr,
  1470. &dev_attr_user.attr,
  1471. NULL,
  1472. };
  1473. static struct attribute_group netiucv_attr_group = {
  1474. .attrs = netiucv_attrs,
  1475. };
  1476. static struct attribute *netiucv_stat_attrs[] = {
  1477. &dev_attr_device_fsm_state.attr,
  1478. &dev_attr_connection_fsm_state.attr,
  1479. &dev_attr_max_tx_buffer_used.attr,
  1480. &dev_attr_max_chained_skbs.attr,
  1481. &dev_attr_tx_single_write_ops.attr,
  1482. &dev_attr_tx_multi_write_ops.attr,
  1483. &dev_attr_netto_bytes.attr,
  1484. &dev_attr_max_tx_io_time.attr,
  1485. &dev_attr_tx_pending.attr,
  1486. &dev_attr_tx_max_pending.attr,
  1487. NULL,
  1488. };
  1489. static struct attribute_group netiucv_stat_attr_group = {
  1490. .name = "stats",
  1491. .attrs = netiucv_stat_attrs,
  1492. };
  1493. static int netiucv_add_files(struct device *dev)
  1494. {
  1495. int ret;
  1496. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1497. ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
  1498. if (ret)
  1499. return ret;
  1500. ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
  1501. if (ret)
  1502. sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
  1503. return ret;
  1504. }
  1505. static void netiucv_remove_files(struct device *dev)
  1506. {
  1507. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1508. sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
  1509. sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
  1510. }
  1511. static int netiucv_register_device(struct net_device *ndev)
  1512. {
  1513. struct netiucv_priv *priv = netdev_priv(ndev);
  1514. struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
  1515. int ret;
  1516. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1517. if (dev) {
  1518. snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
  1519. dev->bus = &iucv_bus;
  1520. dev->parent = iucv_root;
  1521. /*
  1522. * The release function could be called after the
  1523. * module has been unloaded. It's _only_ task is to
  1524. * free the struct. Therefore, we specify kfree()
  1525. * directly here. (Probably a little bit obfuscating
  1526. * but legitime ...).
  1527. */
  1528. dev->release = (void (*)(struct device *))kfree;
  1529. dev->driver = &netiucv_driver;
  1530. } else
  1531. return -ENOMEM;
  1532. ret = device_register(dev);
  1533. if (ret)
  1534. return ret;
  1535. ret = netiucv_add_files(dev);
  1536. if (ret)
  1537. goto out_unreg;
  1538. priv->dev = dev;
  1539. dev->driver_data = priv;
  1540. return 0;
  1541. out_unreg:
  1542. device_unregister(dev);
  1543. return ret;
  1544. }
  1545. static void netiucv_unregister_device(struct device *dev)
  1546. {
  1547. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1548. netiucv_remove_files(dev);
  1549. device_unregister(dev);
  1550. }
  1551. /**
  1552. * Allocate and initialize a new connection structure.
  1553. * Add it to the list of netiucv connections;
  1554. */
  1555. static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
  1556. char *username)
  1557. {
  1558. struct iucv_connection *conn;
  1559. conn = kzalloc(sizeof(*conn), GFP_KERNEL);
  1560. if (!conn)
  1561. goto out;
  1562. skb_queue_head_init(&conn->collect_queue);
  1563. skb_queue_head_init(&conn->commit_queue);
  1564. spin_lock_init(&conn->collect_lock);
  1565. conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
  1566. conn->netdev = dev;
  1567. conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
  1568. if (!conn->rx_buff)
  1569. goto out_conn;
  1570. conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
  1571. if (!conn->tx_buff)
  1572. goto out_rx;
  1573. conn->fsm = init_fsm("netiucvconn", conn_state_names,
  1574. conn_event_names, NR_CONN_STATES,
  1575. NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
  1576. GFP_KERNEL);
  1577. if (!conn->fsm)
  1578. goto out_tx;
  1579. fsm_settimer(conn->fsm, &conn->timer);
  1580. fsm_newstate(conn->fsm, CONN_STATE_INVALID);
  1581. if (username) {
  1582. memcpy(conn->userid, username, 9);
  1583. fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
  1584. }
  1585. write_lock_bh(&iucv_connection_rwlock);
  1586. list_add_tail(&conn->list, &iucv_connection_list);
  1587. write_unlock_bh(&iucv_connection_rwlock);
  1588. return conn;
  1589. out_tx:
  1590. kfree_skb(conn->tx_buff);
  1591. out_rx:
  1592. kfree_skb(conn->rx_buff);
  1593. out_conn:
  1594. kfree(conn);
  1595. out:
  1596. return NULL;
  1597. }
  1598. /**
  1599. * Release a connection structure and remove it from the
  1600. * list of netiucv connections.
  1601. */
  1602. static void netiucv_remove_connection(struct iucv_connection *conn)
  1603. {
  1604. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1605. write_lock_bh(&iucv_connection_rwlock);
  1606. list_del_init(&conn->list);
  1607. write_unlock_bh(&iucv_connection_rwlock);
  1608. fsm_deltimer(&conn->timer);
  1609. netiucv_purge_skb_queue(&conn->collect_queue);
  1610. if (conn->path) {
  1611. iucv_path_sever(conn->path, iucvMagic);
  1612. kfree(conn->path);
  1613. conn->path = NULL;
  1614. }
  1615. netiucv_purge_skb_queue(&conn->commit_queue);
  1616. kfree_fsm(conn->fsm);
  1617. kfree_skb(conn->rx_buff);
  1618. kfree_skb(conn->tx_buff);
  1619. }
  1620. /**
  1621. * Release everything of a net device.
  1622. */
  1623. static void netiucv_free_netdevice(struct net_device *dev)
  1624. {
  1625. struct netiucv_priv *privptr = netdev_priv(dev);
  1626. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1627. if (!dev)
  1628. return;
  1629. if (privptr) {
  1630. if (privptr->conn)
  1631. netiucv_remove_connection(privptr->conn);
  1632. if (privptr->fsm)
  1633. kfree_fsm(privptr->fsm);
  1634. privptr->conn = NULL; privptr->fsm = NULL;
  1635. /* privptr gets freed by free_netdev() */
  1636. }
  1637. free_netdev(dev);
  1638. }
  1639. /**
  1640. * Initialize a net device. (Called from kernel in alloc_netdev())
  1641. */
  1642. static void netiucv_setup_netdevice(struct net_device *dev)
  1643. {
  1644. dev->mtu = NETIUCV_MTU_DEFAULT;
  1645. dev->hard_start_xmit = netiucv_tx;
  1646. dev->open = netiucv_open;
  1647. dev->stop = netiucv_close;
  1648. dev->get_stats = netiucv_stats;
  1649. dev->change_mtu = netiucv_change_mtu;
  1650. dev->destructor = netiucv_free_netdevice;
  1651. dev->hard_header_len = NETIUCV_HDRLEN;
  1652. dev->addr_len = 0;
  1653. dev->type = ARPHRD_SLIP;
  1654. dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
  1655. dev->flags = IFF_POINTOPOINT | IFF_NOARP;
  1656. }
  1657. /**
  1658. * Allocate and initialize everything of a net device.
  1659. */
  1660. static struct net_device *netiucv_init_netdevice(char *username)
  1661. {
  1662. struct netiucv_priv *privptr;
  1663. struct net_device *dev;
  1664. dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
  1665. netiucv_setup_netdevice);
  1666. if (!dev)
  1667. return NULL;
  1668. if (dev_alloc_name(dev, dev->name) < 0)
  1669. goto out_netdev;
  1670. privptr = netdev_priv(dev);
  1671. privptr->fsm = init_fsm("netiucvdev", dev_state_names,
  1672. dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
  1673. dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
  1674. if (!privptr->fsm)
  1675. goto out_netdev;
  1676. privptr->conn = netiucv_new_connection(dev, username);
  1677. if (!privptr->conn) {
  1678. IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
  1679. goto out_fsm;
  1680. }
  1681. fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
  1682. return dev;
  1683. out_fsm:
  1684. kfree_fsm(privptr->fsm);
  1685. out_netdev:
  1686. free_netdev(dev);
  1687. return NULL;
  1688. }
  1689. static ssize_t conn_write(struct device_driver *drv,
  1690. const char *buf, size_t count)
  1691. {
  1692. const char *p;
  1693. char username[9];
  1694. int i, rc;
  1695. struct net_device *dev;
  1696. struct netiucv_priv *priv;
  1697. struct iucv_connection *cp;
  1698. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1699. if (count>9) {
  1700. PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
  1701. IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
  1702. return -EINVAL;
  1703. }
  1704. for (i = 0, p = buf; i < 8 && *p; i++, p++) {
  1705. if (isalnum(*p) || *p == '$') {
  1706. username[i] = toupper(*p);
  1707. continue;
  1708. }
  1709. if (*p == '\n')
  1710. /* trailing lf, grr */
  1711. break;
  1712. PRINT_WARN("netiucv: Invalid character in username!\n");
  1713. IUCV_DBF_TEXT_(setup, 2,
  1714. "conn_write: invalid character %c\n", *p);
  1715. return -EINVAL;
  1716. }
  1717. while (i < 8)
  1718. username[i++] = ' ';
  1719. username[8] = '\0';
  1720. read_lock_bh(&iucv_connection_rwlock);
  1721. list_for_each_entry(cp, &iucv_connection_list, list) {
  1722. if (!strncmp(username, cp->userid, 9)) {
  1723. read_unlock_bh(&iucv_connection_rwlock);
  1724. PRINT_WARN("netiucv: Connection to %s already "
  1725. "exists\n", username);
  1726. return -EEXIST;
  1727. }
  1728. }
  1729. read_unlock_bh(&iucv_connection_rwlock);
  1730. dev = netiucv_init_netdevice(username);
  1731. if (!dev) {
  1732. PRINT_WARN("netiucv: Could not allocate network device "
  1733. "structure for user '%s'\n",
  1734. netiucv_printname(username));
  1735. IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
  1736. return -ENODEV;
  1737. }
  1738. rc = netiucv_register_device(dev);
  1739. if (rc) {
  1740. IUCV_DBF_TEXT_(setup, 2,
  1741. "ret %d from netiucv_register_device\n", rc);
  1742. goto out_free_ndev;
  1743. }
  1744. /* sysfs magic */
  1745. priv = netdev_priv(dev);
  1746. SET_NETDEV_DEV(dev, priv->dev);
  1747. rc = register_netdev(dev);
  1748. if (rc)
  1749. goto out_unreg;
  1750. PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
  1751. return count;
  1752. out_unreg:
  1753. netiucv_unregister_device(priv->dev);
  1754. out_free_ndev:
  1755. PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
  1756. IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
  1757. netiucv_free_netdevice(dev);
  1758. return rc;
  1759. }
  1760. static DRIVER_ATTR(connection, 0200, NULL, conn_write);
  1761. static ssize_t remove_write (struct device_driver *drv,
  1762. const char *buf, size_t count)
  1763. {
  1764. struct iucv_connection *cp;
  1765. struct net_device *ndev;
  1766. struct netiucv_priv *priv;
  1767. struct device *dev;
  1768. char name[IFNAMSIZ];
  1769. const char *p;
  1770. int i;
  1771. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1772. if (count >= IFNAMSIZ)
  1773. count = IFNAMSIZ - 1;;
  1774. for (i = 0, p = buf; i < count && *p; i++, p++) {
  1775. if (*p == '\n' || *p == ' ')
  1776. /* trailing lf, grr */
  1777. break;
  1778. name[i] = *p;
  1779. }
  1780. name[i] = '\0';
  1781. read_lock_bh(&iucv_connection_rwlock);
  1782. list_for_each_entry(cp, &iucv_connection_list, list) {
  1783. ndev = cp->netdev;
  1784. priv = netdev_priv(ndev);
  1785. dev = priv->dev;
  1786. if (strncmp(name, ndev->name, count))
  1787. continue;
  1788. read_unlock_bh(&iucv_connection_rwlock);
  1789. if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
  1790. PRINT_WARN("netiucv: net device %s active with peer "
  1791. "%s\n", ndev->name, priv->conn->userid);
  1792. PRINT_WARN("netiucv: %s cannot be removed\n",
  1793. ndev->name);
  1794. IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
  1795. return -EBUSY;
  1796. }
  1797. unregister_netdev(ndev);
  1798. netiucv_unregister_device(dev);
  1799. return count;
  1800. }
  1801. read_unlock_bh(&iucv_connection_rwlock);
  1802. PRINT_WARN("netiucv: net device %s unknown\n", name);
  1803. IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
  1804. return -EINVAL;
  1805. }
  1806. static DRIVER_ATTR(remove, 0200, NULL, remove_write);
  1807. static struct attribute * netiucv_drv_attrs[] = {
  1808. &driver_attr_connection.attr,
  1809. &driver_attr_remove.attr,
  1810. NULL,
  1811. };
  1812. static struct attribute_group netiucv_drv_attr_group = {
  1813. .attrs = netiucv_drv_attrs,
  1814. };
  1815. static void netiucv_banner(void)
  1816. {
  1817. PRINT_INFO("NETIUCV driver initialized\n");
  1818. }
  1819. static void __exit netiucv_exit(void)
  1820. {
  1821. struct iucv_connection *cp;
  1822. struct net_device *ndev;
  1823. struct netiucv_priv *priv;
  1824. struct device *dev;
  1825. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1826. while (!list_empty(&iucv_connection_list)) {
  1827. cp = list_entry(iucv_connection_list.next,
  1828. struct iucv_connection, list);
  1829. ndev = cp->netdev;
  1830. priv = netdev_priv(ndev);
  1831. dev = priv->dev;
  1832. unregister_netdev(ndev);
  1833. netiucv_unregister_device(dev);
  1834. }
  1835. sysfs_remove_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
  1836. driver_unregister(&netiucv_driver);
  1837. iucv_unregister(&netiucv_handler, 1);
  1838. iucv_unregister_dbf_views();
  1839. PRINT_INFO("NETIUCV driver unloaded\n");
  1840. return;
  1841. }
  1842. static int __init netiucv_init(void)
  1843. {
  1844. int rc;
  1845. rc = iucv_register_dbf_views();
  1846. if (rc)
  1847. goto out;
  1848. rc = iucv_register(&netiucv_handler, 1);
  1849. if (rc)
  1850. goto out_dbf;
  1851. IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
  1852. rc = driver_register(&netiucv_driver);
  1853. if (rc) {
  1854. PRINT_ERR("NETIUCV: failed to register driver.\n");
  1855. IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
  1856. goto out_iucv;
  1857. }
  1858. rc = sysfs_create_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
  1859. if (rc) {
  1860. PRINT_ERR("NETIUCV: failed to add driver attributes.\n");
  1861. IUCV_DBF_TEXT_(setup, 2,
  1862. "ret %d - netiucv_drv_attr_group\n", rc);
  1863. goto out_driver;
  1864. }
  1865. netiucv_banner();
  1866. return rc;
  1867. out_driver:
  1868. driver_unregister(&netiucv_driver);
  1869. out_iucv:
  1870. iucv_unregister(&netiucv_handler, 1);
  1871. out_dbf:
  1872. iucv_unregister_dbf_views();
  1873. out:
  1874. return rc;
  1875. }
  1876. module_init(netiucv_init);
  1877. module_exit(netiucv_exit);
  1878. MODULE_LICENSE("GPL");