ctcmain.c 82 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077
  1. /*
  2. * CTC / ESCON network driver
  3. *
  4. * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
  5. * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
  6. * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
  7. * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  8. Peter Tiedemann (ptiedem@de.ibm.com)
  9. * Driver Model stuff by : Cornelia Huck <cornelia.huck@de.ibm.com>
  10. *
  11. * Documentation used:
  12. * - Principles of Operation (IBM doc#: SA22-7201-06)
  13. * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
  14. * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
  15. * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
  16. * - ESCON I/O Interface (IBM doc#: SA22-7202-029
  17. *
  18. * and the source of the original CTC driver by:
  19. * Dieter Wellerdiek (wel@de.ibm.com)
  20. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  21. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  22. * Jochen Röhrig (roehrig@de.ibm.com)
  23. *
  24. * This program is free software; you can redistribute it and/or modify
  25. * it under the terms of the GNU General Public License as published by
  26. * the Free Software Foundation; either version 2, or (at your option)
  27. * any later version.
  28. *
  29. * This program is distributed in the hope that it will be useful,
  30. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  31. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  32. * GNU General Public License for more details.
  33. *
  34. * You should have received a copy of the GNU General Public License
  35. * along with this program; if not, write to the Free Software
  36. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  37. *
  38. */
  39. #undef DEBUG
  40. #include <linux/module.h>
  41. #include <linux/init.h>
  42. #include <linux/kernel.h>
  43. #include <linux/slab.h>
  44. #include <linux/errno.h>
  45. #include <linux/types.h>
  46. #include <linux/interrupt.h>
  47. #include <linux/timer.h>
  48. #include <linux/sched.h>
  49. #include <linux/bitops.h>
  50. #include <linux/signal.h>
  51. #include <linux/string.h>
  52. #include <linux/ip.h>
  53. #include <linux/if_arp.h>
  54. #include <linux/tcp.h>
  55. #include <linux/skbuff.h>
  56. #include <linux/ctype.h>
  57. #include <net/dst.h>
  58. #include <asm/io.h>
  59. #include <asm/ccwdev.h>
  60. #include <asm/ccwgroup.h>
  61. #include <asm/uaccess.h>
  62. #include <asm/idals.h>
  63. #include "fsm.h"
  64. #include "cu3088.h"
  65. #include "ctcdbug.h"
  66. #include "ctcmain.h"
  67. MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
  68. MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
  69. MODULE_LICENSE("GPL");
  70. /**
  71. * States of the interface statemachine.
  72. */
  73. enum dev_states {
  74. DEV_STATE_STOPPED,
  75. DEV_STATE_STARTWAIT_RXTX,
  76. DEV_STATE_STARTWAIT_RX,
  77. DEV_STATE_STARTWAIT_TX,
  78. DEV_STATE_STOPWAIT_RXTX,
  79. DEV_STATE_STOPWAIT_RX,
  80. DEV_STATE_STOPWAIT_TX,
  81. DEV_STATE_RUNNING,
  82. /**
  83. * MUST be always the last element!!
  84. */
  85. CTC_NR_DEV_STATES
  86. };
  87. static const char *dev_state_names[] = {
  88. "Stopped",
  89. "StartWait RXTX",
  90. "StartWait RX",
  91. "StartWait TX",
  92. "StopWait RXTX",
  93. "StopWait RX",
  94. "StopWait TX",
  95. "Running",
  96. };
  97. /**
  98. * Events of the interface statemachine.
  99. */
  100. enum dev_events {
  101. DEV_EVENT_START,
  102. DEV_EVENT_STOP,
  103. DEV_EVENT_RXUP,
  104. DEV_EVENT_TXUP,
  105. DEV_EVENT_RXDOWN,
  106. DEV_EVENT_TXDOWN,
  107. DEV_EVENT_RESTART,
  108. /**
  109. * MUST be always the last element!!
  110. */
  111. CTC_NR_DEV_EVENTS
  112. };
  113. static const char *dev_event_names[] = {
  114. "Start",
  115. "Stop",
  116. "RX up",
  117. "TX up",
  118. "RX down",
  119. "TX down",
  120. "Restart",
  121. };
  122. /**
  123. * Events of the channel statemachine
  124. */
  125. enum ch_events {
  126. /**
  127. * Events, representing return code of
  128. * I/O operations (ccw_device_start, ccw_device_halt et al.)
  129. */
  130. CH_EVENT_IO_SUCCESS,
  131. CH_EVENT_IO_EBUSY,
  132. CH_EVENT_IO_ENODEV,
  133. CH_EVENT_IO_EIO,
  134. CH_EVENT_IO_UNKNOWN,
  135. CH_EVENT_ATTNBUSY,
  136. CH_EVENT_ATTN,
  137. CH_EVENT_BUSY,
  138. /**
  139. * Events, representing unit-check
  140. */
  141. CH_EVENT_UC_RCRESET,
  142. CH_EVENT_UC_RSRESET,
  143. CH_EVENT_UC_TXTIMEOUT,
  144. CH_EVENT_UC_TXPARITY,
  145. CH_EVENT_UC_HWFAIL,
  146. CH_EVENT_UC_RXPARITY,
  147. CH_EVENT_UC_ZERO,
  148. CH_EVENT_UC_UNKNOWN,
  149. /**
  150. * Events, representing subchannel-check
  151. */
  152. CH_EVENT_SC_UNKNOWN,
  153. /**
  154. * Events, representing machine checks
  155. */
  156. CH_EVENT_MC_FAIL,
  157. CH_EVENT_MC_GOOD,
  158. /**
  159. * Event, representing normal IRQ
  160. */
  161. CH_EVENT_IRQ,
  162. CH_EVENT_FINSTAT,
  163. /**
  164. * Event, representing timer expiry.
  165. */
  166. CH_EVENT_TIMER,
  167. /**
  168. * Events, representing commands from upper levels.
  169. */
  170. CH_EVENT_START,
  171. CH_EVENT_STOP,
  172. /**
  173. * MUST be always the last element!!
  174. */
  175. NR_CH_EVENTS,
  176. };
  177. /**
  178. * States of the channel statemachine.
  179. */
  180. enum ch_states {
  181. /**
  182. * Channel not assigned to any device,
  183. * initial state, direction invalid
  184. */
  185. CH_STATE_IDLE,
  186. /**
  187. * Channel assigned but not operating
  188. */
  189. CH_STATE_STOPPED,
  190. CH_STATE_STARTWAIT,
  191. CH_STATE_STARTRETRY,
  192. CH_STATE_SETUPWAIT,
  193. CH_STATE_RXINIT,
  194. CH_STATE_TXINIT,
  195. CH_STATE_RX,
  196. CH_STATE_TX,
  197. CH_STATE_RXIDLE,
  198. CH_STATE_TXIDLE,
  199. CH_STATE_RXERR,
  200. CH_STATE_TXERR,
  201. CH_STATE_TERM,
  202. CH_STATE_DTERM,
  203. CH_STATE_NOTOP,
  204. /**
  205. * MUST be always the last element!!
  206. */
  207. NR_CH_STATES,
  208. };
  209. static int loglevel = CTC_LOGLEVEL_DEFAULT;
  210. /**
  211. * Linked list of all detected channels.
  212. */
  213. static struct channel *channels = NULL;
  214. /**
  215. * Print Banner.
  216. */
  217. static void
  218. print_banner(void)
  219. {
  220. static int printed = 0;
  221. if (printed)
  222. return;
  223. printk(KERN_INFO "CTC driver initialized\n");
  224. printed = 1;
  225. }
  226. /**
  227. * Return type of a detected device.
  228. */
  229. static enum channel_types
  230. get_channel_type(struct ccw_device_id *id)
  231. {
  232. enum channel_types type = (enum channel_types) id->driver_info;
  233. if (type == channel_type_ficon)
  234. type = channel_type_escon;
  235. return type;
  236. }
  237. static const char *ch_event_names[] = {
  238. "ccw_device success",
  239. "ccw_device busy",
  240. "ccw_device enodev",
  241. "ccw_device ioerr",
  242. "ccw_device unknown",
  243. "Status ATTN & BUSY",
  244. "Status ATTN",
  245. "Status BUSY",
  246. "Unit check remote reset",
  247. "Unit check remote system reset",
  248. "Unit check TX timeout",
  249. "Unit check TX parity",
  250. "Unit check Hardware failure",
  251. "Unit check RX parity",
  252. "Unit check ZERO",
  253. "Unit check Unknown",
  254. "SubChannel check Unknown",
  255. "Machine check failure",
  256. "Machine check operational",
  257. "IRQ normal",
  258. "IRQ final",
  259. "Timer",
  260. "Start",
  261. "Stop",
  262. };
  263. static const char *ch_state_names[] = {
  264. "Idle",
  265. "Stopped",
  266. "StartWait",
  267. "StartRetry",
  268. "SetupWait",
  269. "RX init",
  270. "TX init",
  271. "RX",
  272. "TX",
  273. "RX idle",
  274. "TX idle",
  275. "RX error",
  276. "TX error",
  277. "Terminating",
  278. "Restarting",
  279. "Not operational",
  280. };
  281. #ifdef DEBUG
  282. /**
  283. * Dump header and first 16 bytes of an sk_buff for debugging purposes.
  284. *
  285. * @param skb The sk_buff to dump.
  286. * @param offset Offset relative to skb-data, where to start the dump.
  287. */
  288. static void
  289. ctc_dump_skb(struct sk_buff *skb, int offset)
  290. {
  291. unsigned char *p = skb->data;
  292. __u16 bl;
  293. struct ll_header *header;
  294. int i;
  295. if (!(loglevel & CTC_LOGLEVEL_DEBUG))
  296. return;
  297. p += offset;
  298. bl = *((__u16 *) p);
  299. p += 2;
  300. header = (struct ll_header *) p;
  301. p -= 2;
  302. printk(KERN_DEBUG "dump:\n");
  303. printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
  304. printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
  305. header->length);
  306. printk(KERN_DEBUG "h->type=%04x\n", header->type);
  307. printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
  308. if (bl > 16)
  309. bl = 16;
  310. printk(KERN_DEBUG "data: ");
  311. for (i = 0; i < bl; i++)
  312. printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
  313. printk("\n");
  314. }
  315. #else
  316. static inline void
  317. ctc_dump_skb(struct sk_buff *skb, int offset)
  318. {
  319. }
  320. #endif
  321. /**
  322. * Unpack a just received skb and hand it over to
  323. * upper layers.
  324. *
  325. * @param ch The channel where this skb has been received.
  326. * @param pskb The received skb.
  327. */
  328. static __inline__ void
  329. ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
  330. {
  331. struct net_device *dev = ch->netdev;
  332. struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
  333. __u16 len = *((__u16 *) pskb->data);
  334. DBF_TEXT(trace, 4, __FUNCTION__);
  335. skb_put(pskb, 2 + LL_HEADER_LENGTH);
  336. skb_pull(pskb, 2);
  337. pskb->dev = dev;
  338. pskb->ip_summed = CHECKSUM_UNNECESSARY;
  339. while (len > 0) {
  340. struct sk_buff *skb;
  341. struct ll_header *header = (struct ll_header *) pskb->data;
  342. skb_pull(pskb, LL_HEADER_LENGTH);
  343. if ((ch->protocol == CTC_PROTO_S390) &&
  344. (header->type != ETH_P_IP)) {
  345. #ifndef DEBUG
  346. if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
  347. #endif
  348. /**
  349. * Check packet type only if we stick strictly
  350. * to S/390's protocol of OS390. This only
  351. * supports IP. Otherwise allow any packet
  352. * type.
  353. */
  354. ctc_pr_warn(
  355. "%s Illegal packet type 0x%04x received, dropping\n",
  356. dev->name, header->type);
  357. ch->logflags |= LOG_FLAG_ILLEGALPKT;
  358. #ifndef DEBUG
  359. }
  360. #endif
  361. #ifdef DEBUG
  362. ctc_dump_skb(pskb, -6);
  363. #endif
  364. privptr->stats.rx_dropped++;
  365. privptr->stats.rx_frame_errors++;
  366. return;
  367. }
  368. pskb->protocol = ntohs(header->type);
  369. if (header->length <= LL_HEADER_LENGTH) {
  370. #ifndef DEBUG
  371. if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
  372. #endif
  373. ctc_pr_warn(
  374. "%s Illegal packet size %d "
  375. "received (MTU=%d blocklen=%d), "
  376. "dropping\n", dev->name, header->length,
  377. dev->mtu, len);
  378. ch->logflags |= LOG_FLAG_ILLEGALSIZE;
  379. #ifndef DEBUG
  380. }
  381. #endif
  382. #ifdef DEBUG
  383. ctc_dump_skb(pskb, -6);
  384. #endif
  385. privptr->stats.rx_dropped++;
  386. privptr->stats.rx_length_errors++;
  387. return;
  388. }
  389. header->length -= LL_HEADER_LENGTH;
  390. len -= LL_HEADER_LENGTH;
  391. if ((header->length > skb_tailroom(pskb)) ||
  392. (header->length > len)) {
  393. #ifndef DEBUG
  394. if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
  395. #endif
  396. ctc_pr_warn(
  397. "%s Illegal packet size %d "
  398. "(beyond the end of received data), "
  399. "dropping\n", dev->name, header->length);
  400. ch->logflags |= LOG_FLAG_OVERRUN;
  401. #ifndef DEBUG
  402. }
  403. #endif
  404. #ifdef DEBUG
  405. ctc_dump_skb(pskb, -6);
  406. #endif
  407. privptr->stats.rx_dropped++;
  408. privptr->stats.rx_length_errors++;
  409. return;
  410. }
  411. skb_put(pskb, header->length);
  412. pskb->mac.raw = pskb->data;
  413. len -= header->length;
  414. skb = dev_alloc_skb(pskb->len);
  415. if (!skb) {
  416. #ifndef DEBUG
  417. if (!(ch->logflags & LOG_FLAG_NOMEM)) {
  418. #endif
  419. ctc_pr_warn(
  420. "%s Out of memory in ctc_unpack_skb\n",
  421. dev->name);
  422. ch->logflags |= LOG_FLAG_NOMEM;
  423. #ifndef DEBUG
  424. }
  425. #endif
  426. privptr->stats.rx_dropped++;
  427. return;
  428. }
  429. memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
  430. skb->mac.raw = skb->data;
  431. skb->dev = pskb->dev;
  432. skb->protocol = pskb->protocol;
  433. pskb->ip_summed = CHECKSUM_UNNECESSARY;
  434. netif_rx_ni(skb);
  435. /**
  436. * Successful rx; reset logflags
  437. */
  438. ch->logflags = 0;
  439. dev->last_rx = jiffies;
  440. privptr->stats.rx_packets++;
  441. privptr->stats.rx_bytes += skb->len;
  442. if (len > 0) {
  443. skb_pull(pskb, header->length);
  444. if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
  445. #ifndef DEBUG
  446. if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
  447. #endif
  448. ctc_pr_warn(
  449. "%s Overrun in ctc_unpack_skb\n",
  450. dev->name);
  451. ch->logflags |= LOG_FLAG_OVERRUN;
  452. #ifndef DEBUG
  453. }
  454. #endif
  455. return;
  456. }
  457. skb_put(pskb, LL_HEADER_LENGTH);
  458. }
  459. }
  460. }
  461. /**
  462. * Check return code of a preceeding ccw_device call, halt_IO etc...
  463. *
  464. * @param ch The channel, the error belongs to.
  465. * @param return_code The error code to inspect.
  466. */
  467. static void inline
  468. ccw_check_return_code(struct channel *ch, int return_code, char *msg)
  469. {
  470. DBF_TEXT(trace, 5, __FUNCTION__);
  471. switch (return_code) {
  472. case 0:
  473. fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
  474. break;
  475. case -EBUSY:
  476. ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
  477. fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
  478. break;
  479. case -ENODEV:
  480. ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
  481. ch->id, msg);
  482. fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
  483. break;
  484. case -EIO:
  485. ctc_pr_emerg("%s (%s): Status pending... \n",
  486. ch->id, msg);
  487. fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
  488. break;
  489. default:
  490. ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
  491. ch->id, msg, return_code);
  492. fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
  493. }
  494. }
  495. /**
  496. * Check sense of a unit check.
  497. *
  498. * @param ch The channel, the sense code belongs to.
  499. * @param sense The sense code to inspect.
  500. */
  501. static void inline
  502. ccw_unit_check(struct channel *ch, unsigned char sense)
  503. {
  504. DBF_TEXT(trace, 5, __FUNCTION__);
  505. if (sense & SNS0_INTERVENTION_REQ) {
  506. if (sense & 0x01) {
  507. ctc_pr_debug("%s: Interface disc. or Sel. reset "
  508. "(remote)\n", ch->id);
  509. fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
  510. } else {
  511. ctc_pr_debug("%s: System reset (remote)\n", ch->id);
  512. fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
  513. }
  514. } else if (sense & SNS0_EQUIPMENT_CHECK) {
  515. if (sense & SNS0_BUS_OUT_CHECK) {
  516. ctc_pr_warn("%s: Hardware malfunction (remote)\n",
  517. ch->id);
  518. fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
  519. } else {
  520. ctc_pr_warn("%s: Read-data parity error (remote)\n",
  521. ch->id);
  522. fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
  523. }
  524. } else if (sense & SNS0_BUS_OUT_CHECK) {
  525. if (sense & 0x04) {
  526. ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
  527. fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
  528. } else {
  529. ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
  530. fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
  531. }
  532. } else if (sense & SNS0_CMD_REJECT) {
  533. ctc_pr_warn("%s: Command reject\n", ch->id);
  534. } else if (sense == 0) {
  535. ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
  536. fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
  537. } else {
  538. ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
  539. ch->id, sense);
  540. fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
  541. }
  542. }
  543. static void
  544. ctc_purge_skb_queue(struct sk_buff_head *q)
  545. {
  546. struct sk_buff *skb;
  547. DBF_TEXT(trace, 5, __FUNCTION__);
  548. while ((skb = skb_dequeue(q))) {
  549. atomic_dec(&skb->users);
  550. dev_kfree_skb_irq(skb);
  551. }
  552. }
  553. static __inline__ int
  554. ctc_checkalloc_buffer(struct channel *ch, int warn)
  555. {
  556. DBF_TEXT(trace, 5, __FUNCTION__);
  557. if ((ch->trans_skb == NULL) ||
  558. (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
  559. if (ch->trans_skb != NULL)
  560. dev_kfree_skb(ch->trans_skb);
  561. clear_normalized_cda(&ch->ccw[1]);
  562. ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
  563. GFP_ATOMIC | GFP_DMA);
  564. if (ch->trans_skb == NULL) {
  565. if (warn)
  566. ctc_pr_warn(
  567. "%s: Couldn't alloc %s trans_skb\n",
  568. ch->id,
  569. (CHANNEL_DIRECTION(ch->flags) == READ) ?
  570. "RX" : "TX");
  571. return -ENOMEM;
  572. }
  573. ch->ccw[1].count = ch->max_bufsize;
  574. if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
  575. dev_kfree_skb(ch->trans_skb);
  576. ch->trans_skb = NULL;
  577. if (warn)
  578. ctc_pr_warn(
  579. "%s: set_normalized_cda for %s "
  580. "trans_skb failed, dropping packets\n",
  581. ch->id,
  582. (CHANNEL_DIRECTION(ch->flags) == READ) ?
  583. "RX" : "TX");
  584. return -ENOMEM;
  585. }
  586. ch->ccw[1].count = 0;
  587. ch->trans_skb_data = ch->trans_skb->data;
  588. ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
  589. }
  590. return 0;
  591. }
  592. /**
  593. * Dummy NOP action for statemachines
  594. */
  595. static void
  596. fsm_action_nop(fsm_instance * fi, int event, void *arg)
  597. {
  598. }
  599. /**
  600. * Actions for channel - statemachines.
  601. *****************************************************************************/
  602. /**
  603. * Normal data has been send. Free the corresponding
  604. * skb (it's in io_queue), reset dev->tbusy and
  605. * revert to idle state.
  606. *
  607. * @param fi An instance of a channel statemachine.
  608. * @param event The event, just happened.
  609. * @param arg Generic pointer, casted from channel * upon call.
  610. */
  611. static void
  612. ch_action_txdone(fsm_instance * fi, int event, void *arg)
  613. {
  614. struct channel *ch = (struct channel *) arg;
  615. struct net_device *dev = ch->netdev;
  616. struct ctc_priv *privptr = dev->priv;
  617. struct sk_buff *skb;
  618. int first = 1;
  619. int i;
  620. unsigned long duration;
  621. struct timespec done_stamp = xtime;
  622. DBF_TEXT(trace, 4, __FUNCTION__);
  623. duration =
  624. (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
  625. (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
  626. if (duration > ch->prof.tx_time)
  627. ch->prof.tx_time = duration;
  628. if (ch->irb->scsw.count != 0)
  629. ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
  630. dev->name, ch->irb->scsw.count);
  631. fsm_deltimer(&ch->timer);
  632. while ((skb = skb_dequeue(&ch->io_queue))) {
  633. privptr->stats.tx_packets++;
  634. privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
  635. if (first) {
  636. privptr->stats.tx_bytes += 2;
  637. first = 0;
  638. }
  639. atomic_dec(&skb->users);
  640. dev_kfree_skb_irq(skb);
  641. }
  642. spin_lock(&ch->collect_lock);
  643. clear_normalized_cda(&ch->ccw[4]);
  644. if (ch->collect_len > 0) {
  645. int rc;
  646. if (ctc_checkalloc_buffer(ch, 1)) {
  647. spin_unlock(&ch->collect_lock);
  648. return;
  649. }
  650. ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
  651. ch->trans_skb->len = 0;
  652. if (ch->prof.maxmulti < (ch->collect_len + 2))
  653. ch->prof.maxmulti = ch->collect_len + 2;
  654. if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
  655. ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
  656. *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
  657. i = 0;
  658. while ((skb = skb_dequeue(&ch->collect_queue))) {
  659. memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
  660. skb->len);
  661. privptr->stats.tx_packets++;
  662. privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
  663. atomic_dec(&skb->users);
  664. dev_kfree_skb_irq(skb);
  665. i++;
  666. }
  667. ch->collect_len = 0;
  668. spin_unlock(&ch->collect_lock);
  669. ch->ccw[1].count = ch->trans_skb->len;
  670. fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
  671. ch->prof.send_stamp = xtime;
  672. rc = ccw_device_start(ch->cdev, &ch->ccw[0],
  673. (unsigned long) ch, 0xff, 0);
  674. ch->prof.doios_multi++;
  675. if (rc != 0) {
  676. privptr->stats.tx_dropped += i;
  677. privptr->stats.tx_errors += i;
  678. fsm_deltimer(&ch->timer);
  679. ccw_check_return_code(ch, rc, "chained TX");
  680. }
  681. } else {
  682. spin_unlock(&ch->collect_lock);
  683. fsm_newstate(fi, CH_STATE_TXIDLE);
  684. }
  685. ctc_clear_busy(dev);
  686. }
  687. /**
  688. * Initial data is sent.
  689. * Notify device statemachine that we are up and
  690. * running.
  691. *
  692. * @param fi An instance of a channel statemachine.
  693. * @param event The event, just happened.
  694. * @param arg Generic pointer, casted from channel * upon call.
  695. */
  696. static void
  697. ch_action_txidle(fsm_instance * fi, int event, void *arg)
  698. {
  699. struct channel *ch = (struct channel *) arg;
  700. DBF_TEXT(trace, 4, __FUNCTION__);
  701. fsm_deltimer(&ch->timer);
  702. fsm_newstate(fi, CH_STATE_TXIDLE);
  703. fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
  704. ch->netdev);
  705. }
  706. /**
  707. * Got normal data, check for sanity, queue it up, allocate new buffer
  708. * trigger bottom half, and initiate next read.
  709. *
  710. * @param fi An instance of a channel statemachine.
  711. * @param event The event, just happened.
  712. * @param arg Generic pointer, casted from channel * upon call.
  713. */
  714. static void
  715. ch_action_rx(fsm_instance * fi, int event, void *arg)
  716. {
  717. struct channel *ch = (struct channel *) arg;
  718. struct net_device *dev = ch->netdev;
  719. struct ctc_priv *privptr = dev->priv;
  720. int len = ch->max_bufsize - ch->irb->scsw.count;
  721. struct sk_buff *skb = ch->trans_skb;
  722. __u16 block_len = *((__u16 *) skb->data);
  723. int check_len;
  724. int rc;
  725. DBF_TEXT(trace, 4, __FUNCTION__);
  726. fsm_deltimer(&ch->timer);
  727. if (len < 8) {
  728. ctc_pr_debug("%s: got packet with length %d < 8\n",
  729. dev->name, len);
  730. privptr->stats.rx_dropped++;
  731. privptr->stats.rx_length_errors++;
  732. goto again;
  733. }
  734. if (len > ch->max_bufsize) {
  735. ctc_pr_debug("%s: got packet with length %d > %d\n",
  736. dev->name, len, ch->max_bufsize);
  737. privptr->stats.rx_dropped++;
  738. privptr->stats.rx_length_errors++;
  739. goto again;
  740. }
  741. /**
  742. * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
  743. */
  744. switch (ch->protocol) {
  745. case CTC_PROTO_S390:
  746. case CTC_PROTO_OS390:
  747. check_len = block_len + 2;
  748. break;
  749. default:
  750. check_len = block_len;
  751. break;
  752. }
  753. if ((len < block_len) || (len > check_len)) {
  754. ctc_pr_debug("%s: got block length %d != rx length %d\n",
  755. dev->name, block_len, len);
  756. #ifdef DEBUG
  757. ctc_dump_skb(skb, 0);
  758. #endif
  759. *((__u16 *) skb->data) = len;
  760. privptr->stats.rx_dropped++;
  761. privptr->stats.rx_length_errors++;
  762. goto again;
  763. }
  764. block_len -= 2;
  765. if (block_len > 0) {
  766. *((__u16 *) skb->data) = block_len;
  767. ctc_unpack_skb(ch, skb);
  768. }
  769. again:
  770. skb->data = skb->tail = ch->trans_skb_data;
  771. skb->len = 0;
  772. if (ctc_checkalloc_buffer(ch, 1))
  773. return;
  774. ch->ccw[1].count = ch->max_bufsize;
  775. rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
  776. if (rc != 0)
  777. ccw_check_return_code(ch, rc, "normal RX");
  778. }
  779. static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
  780. /**
  781. * Initialize connection by sending a __u16 of value 0.
  782. *
  783. * @param fi An instance of a channel statemachine.
  784. * @param event The event, just happened.
  785. * @param arg Generic pointer, casted from channel * upon call.
  786. */
  787. static void
  788. ch_action_firstio(fsm_instance * fi, int event, void *arg)
  789. {
  790. struct channel *ch = (struct channel *) arg;
  791. int rc;
  792. DBF_TEXT(trace, 4, __FUNCTION__);
  793. if (fsm_getstate(fi) == CH_STATE_TXIDLE)
  794. ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
  795. fsm_deltimer(&ch->timer);
  796. if (ctc_checkalloc_buffer(ch, 1))
  797. return;
  798. if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
  799. (ch->protocol == CTC_PROTO_OS390)) {
  800. /* OS/390 resp. z/OS */
  801. if (CHANNEL_DIRECTION(ch->flags) == READ) {
  802. *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
  803. fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
  804. CH_EVENT_TIMER, ch);
  805. ch_action_rxidle(fi, event, arg);
  806. } else {
  807. struct net_device *dev = ch->netdev;
  808. fsm_newstate(fi, CH_STATE_TXIDLE);
  809. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  810. DEV_EVENT_TXUP, dev);
  811. }
  812. return;
  813. }
  814. /**
  815. * Don´t setup a timer for receiving the initial RX frame
  816. * if in compatibility mode, since VM TCP delays the initial
  817. * frame until it has some data to send.
  818. */
  819. if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
  820. (ch->protocol != CTC_PROTO_S390))
  821. fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
  822. *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
  823. ch->ccw[1].count = 2; /* Transfer only length */
  824. fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
  825. ? CH_STATE_RXINIT : CH_STATE_TXINIT);
  826. rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
  827. if (rc != 0) {
  828. fsm_deltimer(&ch->timer);
  829. fsm_newstate(fi, CH_STATE_SETUPWAIT);
  830. ccw_check_return_code(ch, rc, "init IO");
  831. }
  832. /**
  833. * If in compatibility mode since we don´t setup a timer, we
  834. * also signal RX channel up immediately. This enables us
  835. * to send packets early which in turn usually triggers some
  836. * reply from VM TCP which brings up the RX channel to it´s
  837. * final state.
  838. */
  839. if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
  840. (ch->protocol == CTC_PROTO_S390)) {
  841. struct net_device *dev = ch->netdev;
  842. fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
  843. dev);
  844. }
  845. }
  846. /**
  847. * Got initial data, check it. If OK,
  848. * notify device statemachine that we are up and
  849. * running.
  850. *
  851. * @param fi An instance of a channel statemachine.
  852. * @param event The event, just happened.
  853. * @param arg Generic pointer, casted from channel * upon call.
  854. */
  855. static void
  856. ch_action_rxidle(fsm_instance * fi, int event, void *arg)
  857. {
  858. struct channel *ch = (struct channel *) arg;
  859. struct net_device *dev = ch->netdev;
  860. __u16 buflen;
  861. int rc;
  862. DBF_TEXT(trace, 4, __FUNCTION__);
  863. fsm_deltimer(&ch->timer);
  864. buflen = *((__u16 *) ch->trans_skb->data);
  865. #ifdef DEBUG
  866. ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
  867. #endif
  868. if (buflen >= CTC_INITIAL_BLOCKLEN) {
  869. if (ctc_checkalloc_buffer(ch, 1))
  870. return;
  871. ch->ccw[1].count = ch->max_bufsize;
  872. fsm_newstate(fi, CH_STATE_RXIDLE);
  873. rc = ccw_device_start(ch->cdev, &ch->ccw[0],
  874. (unsigned long) ch, 0xff, 0);
  875. if (rc != 0) {
  876. fsm_newstate(fi, CH_STATE_RXINIT);
  877. ccw_check_return_code(ch, rc, "initial RX");
  878. } else
  879. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  880. DEV_EVENT_RXUP, dev);
  881. } else {
  882. ctc_pr_debug("%s: Initial RX count %d not %d\n",
  883. dev->name, buflen, CTC_INITIAL_BLOCKLEN);
  884. ch_action_firstio(fi, event, arg);
  885. }
  886. }
  887. /**
  888. * Set channel into extended mode.
  889. *
  890. * @param fi An instance of a channel statemachine.
  891. * @param event The event, just happened.
  892. * @param arg Generic pointer, casted from channel * upon call.
  893. */
  894. static void
  895. ch_action_setmode(fsm_instance * fi, int event, void *arg)
  896. {
  897. struct channel *ch = (struct channel *) arg;
  898. int rc;
  899. unsigned long saveflags;
  900. DBF_TEXT(trace, 4, __FUNCTION__);
  901. fsm_deltimer(&ch->timer);
  902. fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
  903. fsm_newstate(fi, CH_STATE_SETUPWAIT);
  904. saveflags = 0; /* avoids compiler warning with
  905. spin_unlock_irqrestore */
  906. if (event == CH_EVENT_TIMER) // only for timer not yet locked
  907. spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
  908. rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
  909. if (event == CH_EVENT_TIMER)
  910. spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
  911. if (rc != 0) {
  912. fsm_deltimer(&ch->timer);
  913. fsm_newstate(fi, CH_STATE_STARTWAIT);
  914. ccw_check_return_code(ch, rc, "set Mode");
  915. } else
  916. ch->retry = 0;
  917. }
  918. /**
  919. * Setup channel.
  920. *
  921. * @param fi An instance of a channel statemachine.
  922. * @param event The event, just happened.
  923. * @param arg Generic pointer, casted from channel * upon call.
  924. */
  925. static void
  926. ch_action_start(fsm_instance * fi, int event, void *arg)
  927. {
  928. struct channel *ch = (struct channel *) arg;
  929. unsigned long saveflags;
  930. int rc;
  931. struct net_device *dev;
  932. DBF_TEXT(trace, 4, __FUNCTION__);
  933. if (ch == NULL) {
  934. ctc_pr_warn("ch_action_start ch=NULL\n");
  935. return;
  936. }
  937. if (ch->netdev == NULL) {
  938. ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
  939. return;
  940. }
  941. dev = ch->netdev;
  942. #ifdef DEBUG
  943. ctc_pr_debug("%s: %s channel start\n", dev->name,
  944. (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
  945. #endif
  946. if (ch->trans_skb != NULL) {
  947. clear_normalized_cda(&ch->ccw[1]);
  948. dev_kfree_skb(ch->trans_skb);
  949. ch->trans_skb = NULL;
  950. }
  951. if (CHANNEL_DIRECTION(ch->flags) == READ) {
  952. ch->ccw[1].cmd_code = CCW_CMD_READ;
  953. ch->ccw[1].flags = CCW_FLAG_SLI;
  954. ch->ccw[1].count = 0;
  955. } else {
  956. ch->ccw[1].cmd_code = CCW_CMD_WRITE;
  957. ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  958. ch->ccw[1].count = 0;
  959. }
  960. if (ctc_checkalloc_buffer(ch, 0)) {
  961. ctc_pr_notice(
  962. "%s: Could not allocate %s trans_skb, delaying "
  963. "allocation until first transfer\n",
  964. dev->name,
  965. (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
  966. }
  967. ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
  968. ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  969. ch->ccw[0].count = 0;
  970. ch->ccw[0].cda = 0;
  971. ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
  972. ch->ccw[2].flags = CCW_FLAG_SLI;
  973. ch->ccw[2].count = 0;
  974. ch->ccw[2].cda = 0;
  975. memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
  976. ch->ccw[4].cda = 0;
  977. ch->ccw[4].flags &= ~CCW_FLAG_IDA;
  978. fsm_newstate(fi, CH_STATE_STARTWAIT);
  979. fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
  980. spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
  981. rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
  982. spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
  983. if (rc != 0) {
  984. if (rc != -EBUSY)
  985. fsm_deltimer(&ch->timer);
  986. ccw_check_return_code(ch, rc, "initial HaltIO");
  987. }
  988. #ifdef DEBUG
  989. ctc_pr_debug("ctc: %s(): leaving\n", __func__);
  990. #endif
  991. }
  992. /**
  993. * Shutdown a channel.
  994. *
  995. * @param fi An instance of a channel statemachine.
  996. * @param event The event, just happened.
  997. * @param arg Generic pointer, casted from channel * upon call.
  998. */
  999. static void
  1000. ch_action_haltio(fsm_instance * fi, int event, void *arg)
  1001. {
  1002. struct channel *ch = (struct channel *) arg;
  1003. unsigned long saveflags;
  1004. int rc;
  1005. int oldstate;
  1006. DBF_TEXT(trace, 3, __FUNCTION__);
  1007. fsm_deltimer(&ch->timer);
  1008. fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
  1009. saveflags = 0; /* avoids comp warning with
  1010. spin_unlock_irqrestore */
  1011. if (event == CH_EVENT_STOP) // only for STOP not yet locked
  1012. spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
  1013. oldstate = fsm_getstate(fi);
  1014. fsm_newstate(fi, CH_STATE_TERM);
  1015. rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
  1016. if (event == CH_EVENT_STOP)
  1017. spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
  1018. if (rc != 0) {
  1019. if (rc != -EBUSY) {
  1020. fsm_deltimer(&ch->timer);
  1021. fsm_newstate(fi, oldstate);
  1022. }
  1023. ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
  1024. }
  1025. }
  1026. /**
  1027. * A channel has successfully been halted.
  1028. * Cleanup it's queue and notify interface statemachine.
  1029. *
  1030. * @param fi An instance of a channel statemachine.
  1031. * @param event The event, just happened.
  1032. * @param arg Generic pointer, casted from channel * upon call.
  1033. */
  1034. static void
  1035. ch_action_stopped(fsm_instance * fi, int event, void *arg)
  1036. {
  1037. struct channel *ch = (struct channel *) arg;
  1038. struct net_device *dev = ch->netdev;
  1039. DBF_TEXT(trace, 3, __FUNCTION__);
  1040. fsm_deltimer(&ch->timer);
  1041. fsm_newstate(fi, CH_STATE_STOPPED);
  1042. if (ch->trans_skb != NULL) {
  1043. clear_normalized_cda(&ch->ccw[1]);
  1044. dev_kfree_skb(ch->trans_skb);
  1045. ch->trans_skb = NULL;
  1046. }
  1047. if (CHANNEL_DIRECTION(ch->flags) == READ) {
  1048. skb_queue_purge(&ch->io_queue);
  1049. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  1050. DEV_EVENT_RXDOWN, dev);
  1051. } else {
  1052. ctc_purge_skb_queue(&ch->io_queue);
  1053. spin_lock(&ch->collect_lock);
  1054. ctc_purge_skb_queue(&ch->collect_queue);
  1055. ch->collect_len = 0;
  1056. spin_unlock(&ch->collect_lock);
  1057. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  1058. DEV_EVENT_TXDOWN, dev);
  1059. }
  1060. }
  1061. /**
  1062. * A stop command from device statemachine arrived and we are in
  1063. * not operational mode. Set state to stopped.
  1064. *
  1065. * @param fi An instance of a channel statemachine.
  1066. * @param event The event, just happened.
  1067. * @param arg Generic pointer, casted from channel * upon call.
  1068. */
  1069. static void
  1070. ch_action_stop(fsm_instance * fi, int event, void *arg)
  1071. {
  1072. fsm_newstate(fi, CH_STATE_STOPPED);
  1073. }
  1074. /**
  1075. * A machine check for no path, not operational status or gone device has
  1076. * happened.
  1077. * Cleanup queue and notify interface statemachine.
  1078. *
  1079. * @param fi An instance of a channel statemachine.
  1080. * @param event The event, just happened.
  1081. * @param arg Generic pointer, casted from channel * upon call.
  1082. */
  1083. static void
  1084. ch_action_fail(fsm_instance * fi, int event, void *arg)
  1085. {
  1086. struct channel *ch = (struct channel *) arg;
  1087. struct net_device *dev = ch->netdev;
  1088. DBF_TEXT(trace, 3, __FUNCTION__);
  1089. fsm_deltimer(&ch->timer);
  1090. fsm_newstate(fi, CH_STATE_NOTOP);
  1091. if (CHANNEL_DIRECTION(ch->flags) == READ) {
  1092. skb_queue_purge(&ch->io_queue);
  1093. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  1094. DEV_EVENT_RXDOWN, dev);
  1095. } else {
  1096. ctc_purge_skb_queue(&ch->io_queue);
  1097. spin_lock(&ch->collect_lock);
  1098. ctc_purge_skb_queue(&ch->collect_queue);
  1099. ch->collect_len = 0;
  1100. spin_unlock(&ch->collect_lock);
  1101. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  1102. DEV_EVENT_TXDOWN, dev);
  1103. }
  1104. }
  1105. /**
  1106. * Handle error during setup of channel.
  1107. *
  1108. * @param fi An instance of a channel statemachine.
  1109. * @param event The event, just happened.
  1110. * @param arg Generic pointer, casted from channel * upon call.
  1111. */
  1112. static void
  1113. ch_action_setuperr(fsm_instance * fi, int event, void *arg)
  1114. {
  1115. struct channel *ch = (struct channel *) arg;
  1116. struct net_device *dev = ch->netdev;
  1117. DBF_TEXT(setup, 3, __FUNCTION__);
  1118. /**
  1119. * Special case: Got UC_RCRESET on setmode.
  1120. * This means that remote side isn't setup. In this case
  1121. * simply retry after some 10 secs...
  1122. */
  1123. if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
  1124. ((event == CH_EVENT_UC_RCRESET) ||
  1125. (event == CH_EVENT_UC_RSRESET))) {
  1126. fsm_newstate(fi, CH_STATE_STARTRETRY);
  1127. fsm_deltimer(&ch->timer);
  1128. fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
  1129. if (CHANNEL_DIRECTION(ch->flags) == READ) {
  1130. int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
  1131. if (rc != 0)
  1132. ccw_check_return_code(
  1133. ch, rc, "HaltIO in ch_action_setuperr");
  1134. }
  1135. return;
  1136. }
  1137. ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
  1138. dev->name, ch_event_names[event],
  1139. (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
  1140. fsm_getstate_str(fi));
  1141. if (CHANNEL_DIRECTION(ch->flags) == READ) {
  1142. fsm_newstate(fi, CH_STATE_RXERR);
  1143. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  1144. DEV_EVENT_RXDOWN, dev);
  1145. } else {
  1146. fsm_newstate(fi, CH_STATE_TXERR);
  1147. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  1148. DEV_EVENT_TXDOWN, dev);
  1149. }
  1150. }
  1151. /**
  1152. * Restart a channel after an error.
  1153. *
  1154. * @param fi An instance of a channel statemachine.
  1155. * @param event The event, just happened.
  1156. * @param arg Generic pointer, casted from channel * upon call.
  1157. */
  1158. static void
  1159. ch_action_restart(fsm_instance * fi, int event, void *arg)
  1160. {
  1161. unsigned long saveflags;
  1162. int oldstate;
  1163. int rc;
  1164. struct channel *ch = (struct channel *) arg;
  1165. struct net_device *dev = ch->netdev;
  1166. DBF_TEXT(trace, 3, __FUNCTION__);
  1167. fsm_deltimer(&ch->timer);
  1168. ctc_pr_debug("%s: %s channel restart\n", dev->name,
  1169. (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
  1170. fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
  1171. oldstate = fsm_getstate(fi);
  1172. fsm_newstate(fi, CH_STATE_STARTWAIT);
  1173. saveflags = 0; /* avoids compiler warning with
  1174. spin_unlock_irqrestore */
  1175. if (event == CH_EVENT_TIMER) // only for timer not yet locked
  1176. spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
  1177. rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
  1178. if (event == CH_EVENT_TIMER)
  1179. spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
  1180. if (rc != 0) {
  1181. if (rc != -EBUSY) {
  1182. fsm_deltimer(&ch->timer);
  1183. fsm_newstate(fi, oldstate);
  1184. }
  1185. ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
  1186. }
  1187. }
  1188. /**
  1189. * Handle error during RX initial handshake (exchange of
  1190. * 0-length block header)
  1191. *
  1192. * @param fi An instance of a channel statemachine.
  1193. * @param event The event, just happened.
  1194. * @param arg Generic pointer, casted from channel * upon call.
  1195. */
  1196. static void
  1197. ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
  1198. {
  1199. struct channel *ch = (struct channel *) arg;
  1200. struct net_device *dev = ch->netdev;
  1201. DBF_TEXT(setup, 3, __FUNCTION__);
  1202. if (event == CH_EVENT_TIMER) {
  1203. fsm_deltimer(&ch->timer);
  1204. ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
  1205. if (ch->retry++ < 3)
  1206. ch_action_restart(fi, event, arg);
  1207. else {
  1208. fsm_newstate(fi, CH_STATE_RXERR);
  1209. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  1210. DEV_EVENT_RXDOWN, dev);
  1211. }
  1212. } else
  1213. ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
  1214. }
  1215. /**
  1216. * Notify device statemachine if we gave up initialization
  1217. * of RX channel.
  1218. *
  1219. * @param fi An instance of a channel statemachine.
  1220. * @param event The event, just happened.
  1221. * @param arg Generic pointer, casted from channel * upon call.
  1222. */
  1223. static void
  1224. ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
  1225. {
  1226. struct channel *ch = (struct channel *) arg;
  1227. struct net_device *dev = ch->netdev;
  1228. DBF_TEXT(setup, 3, __FUNCTION__);
  1229. fsm_newstate(fi, CH_STATE_RXERR);
  1230. ctc_pr_warn("%s: RX initialization failed\n", dev->name);
  1231. ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
  1232. fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
  1233. }
  1234. /**
  1235. * Handle RX Unit check remote reset (remote disconnected)
  1236. *
  1237. * @param fi An instance of a channel statemachine.
  1238. * @param event The event, just happened.
  1239. * @param arg Generic pointer, casted from channel * upon call.
  1240. */
  1241. static void
  1242. ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
  1243. {
  1244. struct channel *ch = (struct channel *) arg;
  1245. struct channel *ch2;
  1246. struct net_device *dev = ch->netdev;
  1247. DBF_TEXT(trace, 3, __FUNCTION__);
  1248. fsm_deltimer(&ch->timer);
  1249. ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
  1250. dev->name);
  1251. /**
  1252. * Notify device statemachine
  1253. */
  1254. fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
  1255. fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
  1256. fsm_newstate(fi, CH_STATE_DTERM);
  1257. ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
  1258. fsm_newstate(ch2->fsm, CH_STATE_DTERM);
  1259. ccw_device_halt(ch->cdev, (unsigned long) ch);
  1260. ccw_device_halt(ch2->cdev, (unsigned long) ch2);
  1261. }
  1262. /**
  1263. * Handle error during TX channel initialization.
  1264. *
  1265. * @param fi An instance of a channel statemachine.
  1266. * @param event The event, just happened.
  1267. * @param arg Generic pointer, casted from channel * upon call.
  1268. */
  1269. static void
  1270. ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
  1271. {
  1272. struct channel *ch = (struct channel *) arg;
  1273. struct net_device *dev = ch->netdev;
  1274. DBF_TEXT(setup, 2, __FUNCTION__);
  1275. if (event == CH_EVENT_TIMER) {
  1276. fsm_deltimer(&ch->timer);
  1277. ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
  1278. if (ch->retry++ < 3)
  1279. ch_action_restart(fi, event, arg);
  1280. else {
  1281. fsm_newstate(fi, CH_STATE_TXERR);
  1282. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  1283. DEV_EVENT_TXDOWN, dev);
  1284. }
  1285. } else
  1286. ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
  1287. }
  1288. /**
  1289. * Handle TX timeout by retrying operation.
  1290. *
  1291. * @param fi An instance of a channel statemachine.
  1292. * @param event The event, just happened.
  1293. * @param arg Generic pointer, casted from channel * upon call.
  1294. */
  1295. static void
  1296. ch_action_txretry(fsm_instance * fi, int event, void *arg)
  1297. {
  1298. struct channel *ch = (struct channel *) arg;
  1299. struct net_device *dev = ch->netdev;
  1300. unsigned long saveflags;
  1301. DBF_TEXT(trace, 4, __FUNCTION__);
  1302. fsm_deltimer(&ch->timer);
  1303. if (ch->retry++ > 3) {
  1304. ctc_pr_debug("%s: TX retry failed, restarting channel\n",
  1305. dev->name);
  1306. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  1307. DEV_EVENT_TXDOWN, dev);
  1308. ch_action_restart(fi, event, arg);
  1309. } else {
  1310. struct sk_buff *skb;
  1311. ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
  1312. if ((skb = skb_peek(&ch->io_queue))) {
  1313. int rc = 0;
  1314. clear_normalized_cda(&ch->ccw[4]);
  1315. ch->ccw[4].count = skb->len;
  1316. if (set_normalized_cda(&ch->ccw[4], skb->data)) {
  1317. ctc_pr_debug(
  1318. "%s: IDAL alloc failed, chan restart\n",
  1319. dev->name);
  1320. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  1321. DEV_EVENT_TXDOWN, dev);
  1322. ch_action_restart(fi, event, arg);
  1323. return;
  1324. }
  1325. fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
  1326. saveflags = 0; /* avoids compiler warning with
  1327. spin_unlock_irqrestore */
  1328. if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
  1329. spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
  1330. saveflags);
  1331. rc = ccw_device_start(ch->cdev, &ch->ccw[3],
  1332. (unsigned long) ch, 0xff, 0);
  1333. if (event == CH_EVENT_TIMER)
  1334. spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
  1335. saveflags);
  1336. if (rc != 0) {
  1337. fsm_deltimer(&ch->timer);
  1338. ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
  1339. ctc_purge_skb_queue(&ch->io_queue);
  1340. }
  1341. }
  1342. }
  1343. }
  1344. /**
  1345. * Handle fatal errors during an I/O command.
  1346. *
  1347. * @param fi An instance of a channel statemachine.
  1348. * @param event The event, just happened.
  1349. * @param arg Generic pointer, casted from channel * upon call.
  1350. */
  1351. static void
  1352. ch_action_iofatal(fsm_instance * fi, int event, void *arg)
  1353. {
  1354. struct channel *ch = (struct channel *) arg;
  1355. struct net_device *dev = ch->netdev;
  1356. DBF_TEXT(trace, 3, __FUNCTION__);
  1357. fsm_deltimer(&ch->timer);
  1358. if (CHANNEL_DIRECTION(ch->flags) == READ) {
  1359. ctc_pr_debug("%s: RX I/O error\n", dev->name);
  1360. fsm_newstate(fi, CH_STATE_RXERR);
  1361. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  1362. DEV_EVENT_RXDOWN, dev);
  1363. } else {
  1364. ctc_pr_debug("%s: TX I/O error\n", dev->name);
  1365. fsm_newstate(fi, CH_STATE_TXERR);
  1366. fsm_event(((struct ctc_priv *) dev->priv)->fsm,
  1367. DEV_EVENT_TXDOWN, dev);
  1368. }
  1369. }
  1370. static void
  1371. ch_action_reinit(fsm_instance *fi, int event, void *arg)
  1372. {
  1373. struct channel *ch = (struct channel *)arg;
  1374. struct net_device *dev = ch->netdev;
  1375. struct ctc_priv *privptr = dev->priv;
  1376. DBF_TEXT(trace, 4, __FUNCTION__);
  1377. ch_action_iofatal(fi, event, arg);
  1378. fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
  1379. }
  1380. /**
  1381. * The statemachine for a channel.
  1382. */
  1383. static const fsm_node ch_fsm[] = {
  1384. {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
  1385. {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
  1386. {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
  1387. {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
  1388. {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
  1389. {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
  1390. {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
  1391. {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
  1392. {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
  1393. {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
  1394. {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
  1395. {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
  1396. {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
  1397. {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
  1398. {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
  1399. {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
  1400. {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
  1401. {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
  1402. {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
  1403. {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
  1404. {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
  1405. {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
  1406. {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
  1407. {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
  1408. {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
  1409. {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
  1410. {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
  1411. {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
  1412. {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
  1413. {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
  1414. {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
  1415. {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
  1416. {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
  1417. {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
  1418. {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
  1419. {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
  1420. {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
  1421. {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
  1422. {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
  1423. {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
  1424. {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
  1425. {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
  1426. {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
  1427. {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
  1428. // {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
  1429. {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
  1430. {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
  1431. {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
  1432. {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
  1433. {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
  1434. {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
  1435. {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
  1436. {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
  1437. {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
  1438. {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
  1439. {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
  1440. {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
  1441. {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
  1442. {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
  1443. {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
  1444. {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
  1445. {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
  1446. {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
  1447. {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
  1448. {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
  1449. {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
  1450. {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
  1451. {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
  1452. {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
  1453. {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
  1454. {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
  1455. {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
  1456. {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
  1457. {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
  1458. {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
  1459. {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
  1460. {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
  1461. {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
  1462. {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
  1463. {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
  1464. {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
  1465. {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
  1466. {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
  1467. {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
  1468. {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
  1469. {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
  1470. {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
  1471. {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
  1472. {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
  1473. {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
  1474. {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
  1475. };
  1476. static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
  1477. /**
  1478. * Functions related to setup and device detection.
  1479. *****************************************************************************/
  1480. static inline int
  1481. less_than(char *id1, char *id2)
  1482. {
  1483. int dev1, dev2, i;
  1484. for (i = 0; i < 5; i++) {
  1485. id1++;
  1486. id2++;
  1487. }
  1488. dev1 = simple_strtoul(id1, &id1, 16);
  1489. dev2 = simple_strtoul(id2, &id2, 16);
  1490. return (dev1 < dev2);
  1491. }
  1492. /**
  1493. * Add a new channel to the list of channels.
  1494. * Keeps the channel list sorted.
  1495. *
  1496. * @param cdev The ccw_device to be added.
  1497. * @param type The type class of the new channel.
  1498. *
  1499. * @return 0 on success, !0 on error.
  1500. */
  1501. static int
  1502. add_channel(struct ccw_device *cdev, enum channel_types type)
  1503. {
  1504. struct channel **c = &channels;
  1505. struct channel *ch;
  1506. DBF_TEXT(trace, 2, __FUNCTION__);
  1507. if ((ch =
  1508. (struct channel *) kmalloc(sizeof (struct channel),
  1509. GFP_KERNEL)) == NULL) {
  1510. ctc_pr_warn("ctc: Out of memory in add_channel\n");
  1511. return -1;
  1512. }
  1513. memset(ch, 0, sizeof (struct channel));
  1514. if ((ch->ccw = (struct ccw1 *) kmalloc(8*sizeof(struct ccw1),
  1515. GFP_KERNEL | GFP_DMA)) == NULL) {
  1516. kfree(ch);
  1517. ctc_pr_warn("ctc: Out of memory in add_channel\n");
  1518. return -1;
  1519. }
  1520. memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
  1521. /**
  1522. * "static" ccws are used in the following way:
  1523. *
  1524. * ccw[0..2] (Channel program for generic I/O):
  1525. * 0: prepare
  1526. * 1: read or write (depending on direction) with fixed
  1527. * buffer (idal allocated once when buffer is allocated)
  1528. * 2: nop
  1529. * ccw[3..5] (Channel program for direct write of packets)
  1530. * 3: prepare
  1531. * 4: write (idal allocated on every write).
  1532. * 5: nop
  1533. * ccw[6..7] (Channel program for initial channel setup):
  1534. * 6: set extended mode
  1535. * 7: nop
  1536. *
  1537. * ch->ccw[0..5] are initialized in ch_action_start because
  1538. * the channel's direction is yet unknown here.
  1539. */
  1540. ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
  1541. ch->ccw[6].flags = CCW_FLAG_SLI;
  1542. ch->ccw[7].cmd_code = CCW_CMD_NOOP;
  1543. ch->ccw[7].flags = CCW_FLAG_SLI;
  1544. ch->cdev = cdev;
  1545. snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
  1546. ch->type = type;
  1547. ch->fsm = init_fsm(ch->id, ch_state_names,
  1548. ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
  1549. ch_fsm, CH_FSM_LEN, GFP_KERNEL);
  1550. if (ch->fsm == NULL) {
  1551. ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
  1552. kfree(ch->ccw);
  1553. kfree(ch);
  1554. return -1;
  1555. }
  1556. fsm_newstate(ch->fsm, CH_STATE_IDLE);
  1557. if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb),
  1558. GFP_KERNEL)) == NULL) {
  1559. ctc_pr_warn("ctc: Out of memory in add_channel\n");
  1560. kfree_fsm(ch->fsm);
  1561. kfree(ch->ccw);
  1562. kfree(ch);
  1563. return -1;
  1564. }
  1565. memset(ch->irb, 0, sizeof (struct irb));
  1566. while (*c && less_than((*c)->id, ch->id))
  1567. c = &(*c)->next;
  1568. if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
  1569. ctc_pr_debug(
  1570. "ctc: add_channel: device %s already in list, "
  1571. "using old entry\n", (*c)->id);
  1572. kfree(ch->irb);
  1573. kfree_fsm(ch->fsm);
  1574. kfree(ch->ccw);
  1575. kfree(ch);
  1576. return 0;
  1577. }
  1578. spin_lock_init(&ch->collect_lock);
  1579. fsm_settimer(ch->fsm, &ch->timer);
  1580. skb_queue_head_init(&ch->io_queue);
  1581. skb_queue_head_init(&ch->collect_queue);
  1582. ch->next = *c;
  1583. *c = ch;
  1584. return 0;
  1585. }
  1586. /**
  1587. * Release a specific channel in the channel list.
  1588. *
  1589. * @param ch Pointer to channel struct to be released.
  1590. */
  1591. static void
  1592. channel_free(struct channel *ch)
  1593. {
  1594. ch->flags &= ~CHANNEL_FLAGS_INUSE;
  1595. fsm_newstate(ch->fsm, CH_STATE_IDLE);
  1596. }
  1597. /**
  1598. * Remove a specific channel in the channel list.
  1599. *
  1600. * @param ch Pointer to channel struct to be released.
  1601. */
  1602. static void
  1603. channel_remove(struct channel *ch)
  1604. {
  1605. struct channel **c = &channels;
  1606. DBF_TEXT(trace, 2, __FUNCTION__);
  1607. if (ch == NULL)
  1608. return;
  1609. channel_free(ch);
  1610. while (*c) {
  1611. if (*c == ch) {
  1612. *c = ch->next;
  1613. fsm_deltimer(&ch->timer);
  1614. kfree_fsm(ch->fsm);
  1615. clear_normalized_cda(&ch->ccw[4]);
  1616. if (ch->trans_skb != NULL) {
  1617. clear_normalized_cda(&ch->ccw[1]);
  1618. dev_kfree_skb(ch->trans_skb);
  1619. }
  1620. kfree(ch->ccw);
  1621. kfree(ch->irb);
  1622. kfree(ch);
  1623. return;
  1624. }
  1625. c = &((*c)->next);
  1626. }
  1627. }
  1628. /**
  1629. * Get a specific channel from the channel list.
  1630. *
  1631. * @param type Type of channel we are interested in.
  1632. * @param id Id of channel we are interested in.
  1633. * @param direction Direction we want to use this channel for.
  1634. *
  1635. * @return Pointer to a channel or NULL if no matching channel available.
  1636. */
  1637. static struct channel
  1638. *
  1639. channel_get(enum channel_types type, char *id, int direction)
  1640. {
  1641. struct channel *ch = channels;
  1642. DBF_TEXT(trace, 3, __FUNCTION__);
  1643. #ifdef DEBUG
  1644. ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
  1645. __func__, id, type);
  1646. #endif
  1647. while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
  1648. #ifdef DEBUG
  1649. ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
  1650. __func__, ch, ch->id, ch->type);
  1651. #endif
  1652. ch = ch->next;
  1653. }
  1654. #ifdef DEBUG
  1655. ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
  1656. __func__, ch, ch->id, ch->type);
  1657. #endif
  1658. if (!ch) {
  1659. ctc_pr_warn("ctc: %s(): channel with id %s "
  1660. "and type %d not found in channel list\n",
  1661. __func__, id, type);
  1662. } else {
  1663. if (ch->flags & CHANNEL_FLAGS_INUSE)
  1664. ch = NULL;
  1665. else {
  1666. ch->flags |= CHANNEL_FLAGS_INUSE;
  1667. ch->flags &= ~CHANNEL_FLAGS_RWMASK;
  1668. ch->flags |= (direction == WRITE)
  1669. ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
  1670. fsm_newstate(ch->fsm, CH_STATE_STOPPED);
  1671. }
  1672. }
  1673. return ch;
  1674. }
  1675. /**
  1676. * Return the channel type by name.
  1677. *
  1678. * @param name Name of network interface.
  1679. *
  1680. * @return Type class of channel to be used for that interface.
  1681. */
  1682. static enum channel_types inline
  1683. extract_channel_media(char *name)
  1684. {
  1685. enum channel_types ret = channel_type_unknown;
  1686. if (name != NULL) {
  1687. if (strncmp(name, "ctc", 3) == 0)
  1688. ret = channel_type_parallel;
  1689. if (strncmp(name, "escon", 5) == 0)
  1690. ret = channel_type_escon;
  1691. }
  1692. return ret;
  1693. }
  1694. static long
  1695. __ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
  1696. {
  1697. if (!IS_ERR(irb))
  1698. return 0;
  1699. switch (PTR_ERR(irb)) {
  1700. case -EIO:
  1701. ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
  1702. // CTC_DBF_TEXT(trace, 2, "ckirberr");
  1703. // CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
  1704. break;
  1705. case -ETIMEDOUT:
  1706. ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
  1707. // CTC_DBF_TEXT(trace, 2, "ckirberr");
  1708. // CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
  1709. break;
  1710. default:
  1711. ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
  1712. cdev->dev.bus_id);
  1713. // CTC_DBF_TEXT(trace, 2, "ckirberr");
  1714. // CTC_DBF_TEXT(trace, 2, " rc???");
  1715. }
  1716. return PTR_ERR(irb);
  1717. }
  1718. /**
  1719. * Main IRQ handler.
  1720. *
  1721. * @param cdev The ccw_device the interrupt is for.
  1722. * @param intparm interruption parameter.
  1723. * @param irb interruption response block.
  1724. */
  1725. static void
  1726. ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
  1727. {
  1728. struct channel *ch;
  1729. struct net_device *dev;
  1730. struct ctc_priv *priv;
  1731. DBF_TEXT(trace, 5, __FUNCTION__);
  1732. if (__ctc_check_irb_error(cdev, irb))
  1733. return;
  1734. /* Check for unsolicited interrupts. */
  1735. if (!cdev->dev.driver_data) {
  1736. ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
  1737. cdev->dev.bus_id, irb->scsw.cstat,
  1738. irb->scsw.dstat);
  1739. return;
  1740. }
  1741. priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
  1742. ->dev.driver_data;
  1743. /* Try to extract channel from driver data. */
  1744. if (priv->channel[READ]->cdev == cdev)
  1745. ch = priv->channel[READ];
  1746. else if (priv->channel[WRITE]->cdev == cdev)
  1747. ch = priv->channel[WRITE];
  1748. else {
  1749. ctc_pr_err("ctc: Can't determine channel for interrupt, "
  1750. "device %s\n", cdev->dev.bus_id);
  1751. return;
  1752. }
  1753. dev = (struct net_device *) (ch->netdev);
  1754. if (dev == NULL) {
  1755. ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
  1756. cdev->dev.bus_id, ch);
  1757. return;
  1758. }
  1759. #ifdef DEBUG
  1760. ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
  1761. dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
  1762. #endif
  1763. /* Copy interruption response block. */
  1764. memcpy(ch->irb, irb, sizeof(struct irb));
  1765. /* Check for good subchannel return code, otherwise error message */
  1766. if (ch->irb->scsw.cstat) {
  1767. fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
  1768. ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
  1769. dev->name, ch->id, ch->irb->scsw.cstat,
  1770. ch->irb->scsw.dstat);
  1771. return;
  1772. }
  1773. /* Check the reason-code of a unit check */
  1774. if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
  1775. ccw_unit_check(ch, ch->irb->ecw[0]);
  1776. return;
  1777. }
  1778. if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
  1779. if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
  1780. fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
  1781. else
  1782. fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
  1783. return;
  1784. }
  1785. if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
  1786. fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
  1787. return;
  1788. }
  1789. if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
  1790. (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
  1791. (ch->irb->scsw.stctl ==
  1792. (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
  1793. fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
  1794. else
  1795. fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
  1796. }
  1797. /**
  1798. * Actions for interface - statemachine.
  1799. *****************************************************************************/
  1800. /**
  1801. * Startup channels by sending CH_EVENT_START to each channel.
  1802. *
  1803. * @param fi An instance of an interface statemachine.
  1804. * @param event The event, just happened.
  1805. * @param arg Generic pointer, casted from struct net_device * upon call.
  1806. */
  1807. static void
  1808. dev_action_start(fsm_instance * fi, int event, void *arg)
  1809. {
  1810. struct net_device *dev = (struct net_device *) arg;
  1811. struct ctc_priv *privptr = dev->priv;
  1812. int direction;
  1813. DBF_TEXT(setup, 3, __FUNCTION__);
  1814. fsm_deltimer(&privptr->restart_timer);
  1815. fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
  1816. for (direction = READ; direction <= WRITE; direction++) {
  1817. struct channel *ch = privptr->channel[direction];
  1818. fsm_event(ch->fsm, CH_EVENT_START, ch);
  1819. }
  1820. }
  1821. /**
  1822. * Shutdown channels by sending CH_EVENT_STOP to each channel.
  1823. *
  1824. * @param fi An instance of an interface statemachine.
  1825. * @param event The event, just happened.
  1826. * @param arg Generic pointer, casted from struct net_device * upon call.
  1827. */
  1828. static void
  1829. dev_action_stop(fsm_instance * fi, int event, void *arg)
  1830. {
  1831. struct net_device *dev = (struct net_device *) arg;
  1832. struct ctc_priv *privptr = dev->priv;
  1833. int direction;
  1834. DBF_TEXT(trace, 3, __FUNCTION__);
  1835. fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
  1836. for (direction = READ; direction <= WRITE; direction++) {
  1837. struct channel *ch = privptr->channel[direction];
  1838. fsm_event(ch->fsm, CH_EVENT_STOP, ch);
  1839. }
  1840. }
  1841. static void
  1842. dev_action_restart(fsm_instance *fi, int event, void *arg)
  1843. {
  1844. struct net_device *dev = (struct net_device *)arg;
  1845. struct ctc_priv *privptr = dev->priv;
  1846. DBF_TEXT(trace, 3, __FUNCTION__);
  1847. ctc_pr_debug("%s: Restarting\n", dev->name);
  1848. dev_action_stop(fi, event, arg);
  1849. fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
  1850. fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
  1851. DEV_EVENT_START, dev);
  1852. }
  1853. /**
  1854. * Called from channel statemachine
  1855. * when a channel is up and running.
  1856. *
  1857. * @param fi An instance of an interface statemachine.
  1858. * @param event The event, just happened.
  1859. * @param arg Generic pointer, casted from struct net_device * upon call.
  1860. */
  1861. static void
  1862. dev_action_chup(fsm_instance * fi, int event, void *arg)
  1863. {
  1864. struct net_device *dev = (struct net_device *) arg;
  1865. DBF_TEXT(trace, 3, __FUNCTION__);
  1866. switch (fsm_getstate(fi)) {
  1867. case DEV_STATE_STARTWAIT_RXTX:
  1868. if (event == DEV_EVENT_RXUP)
  1869. fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
  1870. else
  1871. fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
  1872. break;
  1873. case DEV_STATE_STARTWAIT_RX:
  1874. if (event == DEV_EVENT_RXUP) {
  1875. fsm_newstate(fi, DEV_STATE_RUNNING);
  1876. ctc_pr_info("%s: connected with remote side\n",
  1877. dev->name);
  1878. ctc_clear_busy(dev);
  1879. }
  1880. break;
  1881. case DEV_STATE_STARTWAIT_TX:
  1882. if (event == DEV_EVENT_TXUP) {
  1883. fsm_newstate(fi, DEV_STATE_RUNNING);
  1884. ctc_pr_info("%s: connected with remote side\n",
  1885. dev->name);
  1886. ctc_clear_busy(dev);
  1887. }
  1888. break;
  1889. case DEV_STATE_STOPWAIT_TX:
  1890. if (event == DEV_EVENT_RXUP)
  1891. fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
  1892. break;
  1893. case DEV_STATE_STOPWAIT_RX:
  1894. if (event == DEV_EVENT_TXUP)
  1895. fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
  1896. break;
  1897. }
  1898. }
  1899. /**
  1900. * Called from channel statemachine
  1901. * when a channel has been shutdown.
  1902. *
  1903. * @param fi An instance of an interface statemachine.
  1904. * @param event The event, just happened.
  1905. * @param arg Generic pointer, casted from struct net_device * upon call.
  1906. */
  1907. static void
  1908. dev_action_chdown(fsm_instance * fi, int event, void *arg)
  1909. {
  1910. DBF_TEXT(trace, 3, __FUNCTION__);
  1911. switch (fsm_getstate(fi)) {
  1912. case DEV_STATE_RUNNING:
  1913. if (event == DEV_EVENT_TXDOWN)
  1914. fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
  1915. else
  1916. fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
  1917. break;
  1918. case DEV_STATE_STARTWAIT_RX:
  1919. if (event == DEV_EVENT_TXDOWN)
  1920. fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
  1921. break;
  1922. case DEV_STATE_STARTWAIT_TX:
  1923. if (event == DEV_EVENT_RXDOWN)
  1924. fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
  1925. break;
  1926. case DEV_STATE_STOPWAIT_RXTX:
  1927. if (event == DEV_EVENT_TXDOWN)
  1928. fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
  1929. else
  1930. fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
  1931. break;
  1932. case DEV_STATE_STOPWAIT_RX:
  1933. if (event == DEV_EVENT_RXDOWN)
  1934. fsm_newstate(fi, DEV_STATE_STOPPED);
  1935. break;
  1936. case DEV_STATE_STOPWAIT_TX:
  1937. if (event == DEV_EVENT_TXDOWN)
  1938. fsm_newstate(fi, DEV_STATE_STOPPED);
  1939. break;
  1940. }
  1941. }
  1942. static const fsm_node dev_fsm[] = {
  1943. {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
  1944. {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
  1945. {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
  1946. {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
  1947. {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
  1948. {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
  1949. {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
  1950. {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
  1951. {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
  1952. {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
  1953. {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
  1954. {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
  1955. {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
  1956. {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
  1957. {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
  1958. {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
  1959. {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
  1960. {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
  1961. {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
  1962. {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
  1963. {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
  1964. {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
  1965. {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
  1966. {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
  1967. {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
  1968. {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
  1969. {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
  1970. {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
  1971. {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
  1972. {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
  1973. {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
  1974. {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
  1975. {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
  1976. {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
  1977. {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
  1978. {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
  1979. {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
  1980. };
  1981. static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
  1982. /**
  1983. * Transmit a packet.
  1984. * This is a helper function for ctc_tx().
  1985. *
  1986. * @param ch Channel to be used for sending.
  1987. * @param skb Pointer to struct sk_buff of packet to send.
  1988. * The linklevel header has already been set up
  1989. * by ctc_tx().
  1990. *
  1991. * @return 0 on success, -ERRNO on failure. (Never fails.)
  1992. */
  1993. static int
  1994. transmit_skb(struct channel *ch, struct sk_buff *skb)
  1995. {
  1996. unsigned long saveflags;
  1997. struct ll_header header;
  1998. int rc = 0;
  1999. DBF_TEXT(trace, 5, __FUNCTION__);
  2000. /* we need to acquire the lock for testing the state
  2001. * otherwise we can have an IRQ changing the state to
  2002. * TXIDLE after the test but before acquiring the lock.
  2003. */
  2004. spin_lock_irqsave(&ch->collect_lock, saveflags);
  2005. if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
  2006. int l = skb->len + LL_HEADER_LENGTH;
  2007. if (ch->collect_len + l > ch->max_bufsize - 2) {
  2008. spin_unlock_irqrestore(&ch->collect_lock, saveflags);
  2009. return -EBUSY;
  2010. } else {
  2011. atomic_inc(&skb->users);
  2012. header.length = l;
  2013. header.type = skb->protocol;
  2014. header.unused = 0;
  2015. memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
  2016. LL_HEADER_LENGTH);
  2017. skb_queue_tail(&ch->collect_queue, skb);
  2018. ch->collect_len += l;
  2019. }
  2020. spin_unlock_irqrestore(&ch->collect_lock, saveflags);
  2021. } else {
  2022. __u16 block_len;
  2023. int ccw_idx;
  2024. struct sk_buff *nskb;
  2025. unsigned long hi;
  2026. spin_unlock_irqrestore(&ch->collect_lock, saveflags);
  2027. /**
  2028. * Protect skb against beeing free'd by upper
  2029. * layers.
  2030. */
  2031. atomic_inc(&skb->users);
  2032. ch->prof.txlen += skb->len;
  2033. header.length = skb->len + LL_HEADER_LENGTH;
  2034. header.type = skb->protocol;
  2035. header.unused = 0;
  2036. memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
  2037. LL_HEADER_LENGTH);
  2038. block_len = skb->len + 2;
  2039. *((__u16 *) skb_push(skb, 2)) = block_len;
  2040. /**
  2041. * IDAL support in CTC is broken, so we have to
  2042. * care about skb's above 2G ourselves.
  2043. */
  2044. hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
  2045. if (hi) {
  2046. nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
  2047. if (!nskb) {
  2048. atomic_dec(&skb->users);
  2049. skb_pull(skb, LL_HEADER_LENGTH + 2);
  2050. ctc_clear_busy(ch->netdev);
  2051. return -ENOMEM;
  2052. } else {
  2053. memcpy(skb_put(nskb, skb->len),
  2054. skb->data, skb->len);
  2055. atomic_inc(&nskb->users);
  2056. atomic_dec(&skb->users);
  2057. dev_kfree_skb_irq(skb);
  2058. skb = nskb;
  2059. }
  2060. }
  2061. ch->ccw[4].count = block_len;
  2062. if (set_normalized_cda(&ch->ccw[4], skb->data)) {
  2063. /**
  2064. * idal allocation failed, try via copying to
  2065. * trans_skb. trans_skb usually has a pre-allocated
  2066. * idal.
  2067. */
  2068. if (ctc_checkalloc_buffer(ch, 1)) {
  2069. /**
  2070. * Remove our header. It gets added
  2071. * again on retransmit.
  2072. */
  2073. atomic_dec(&skb->users);
  2074. skb_pull(skb, LL_HEADER_LENGTH + 2);
  2075. ctc_clear_busy(ch->netdev);
  2076. return -EBUSY;
  2077. }
  2078. ch->trans_skb->tail = ch->trans_skb->data;
  2079. ch->trans_skb->len = 0;
  2080. ch->ccw[1].count = skb->len;
  2081. memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
  2082. skb->len);
  2083. atomic_dec(&skb->users);
  2084. dev_kfree_skb_irq(skb);
  2085. ccw_idx = 0;
  2086. } else {
  2087. skb_queue_tail(&ch->io_queue, skb);
  2088. ccw_idx = 3;
  2089. }
  2090. ch->retry = 0;
  2091. fsm_newstate(ch->fsm, CH_STATE_TX);
  2092. fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
  2093. spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
  2094. ch->prof.send_stamp = xtime;
  2095. rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
  2096. (unsigned long) ch, 0xff, 0);
  2097. spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
  2098. if (ccw_idx == 3)
  2099. ch->prof.doios_single++;
  2100. if (rc != 0) {
  2101. fsm_deltimer(&ch->timer);
  2102. ccw_check_return_code(ch, rc, "single skb TX");
  2103. if (ccw_idx == 3)
  2104. skb_dequeue_tail(&ch->io_queue);
  2105. /**
  2106. * Remove our header. It gets added
  2107. * again on retransmit.
  2108. */
  2109. skb_pull(skb, LL_HEADER_LENGTH + 2);
  2110. } else {
  2111. if (ccw_idx == 0) {
  2112. struct net_device *dev = ch->netdev;
  2113. struct ctc_priv *privptr = dev->priv;
  2114. privptr->stats.tx_packets++;
  2115. privptr->stats.tx_bytes +=
  2116. skb->len - LL_HEADER_LENGTH;
  2117. }
  2118. }
  2119. }
  2120. ctc_clear_busy(ch->netdev);
  2121. return rc;
  2122. }
  2123. /**
  2124. * Interface API for upper network layers
  2125. *****************************************************************************/
  2126. /**
  2127. * Open an interface.
  2128. * Called from generic network layer when ifconfig up is run.
  2129. *
  2130. * @param dev Pointer to interface struct.
  2131. *
  2132. * @return 0 on success, -ERRNO on failure. (Never fails.)
  2133. */
  2134. static int
  2135. ctc_open(struct net_device * dev)
  2136. {
  2137. DBF_TEXT(trace, 5, __FUNCTION__);
  2138. fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
  2139. return 0;
  2140. }
  2141. /**
  2142. * Close an interface.
  2143. * Called from generic network layer when ifconfig down is run.
  2144. *
  2145. * @param dev Pointer to interface struct.
  2146. *
  2147. * @return 0 on success, -ERRNO on failure. (Never fails.)
  2148. */
  2149. static int
  2150. ctc_close(struct net_device * dev)
  2151. {
  2152. DBF_TEXT(trace, 5, __FUNCTION__);
  2153. fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
  2154. return 0;
  2155. }
  2156. /**
  2157. * Start transmission of a packet.
  2158. * Called from generic network device layer.
  2159. *
  2160. * @param skb Pointer to buffer containing the packet.
  2161. * @param dev Pointer to interface struct.
  2162. *
  2163. * @return 0 if packet consumed, !0 if packet rejected.
  2164. * Note: If we return !0, then the packet is free'd by
  2165. * the generic network layer.
  2166. */
  2167. static int
  2168. ctc_tx(struct sk_buff *skb, struct net_device * dev)
  2169. {
  2170. int rc = 0;
  2171. struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
  2172. DBF_TEXT(trace, 5, __FUNCTION__);
  2173. /**
  2174. * Some sanity checks ...
  2175. */
  2176. if (skb == NULL) {
  2177. ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
  2178. privptr->stats.tx_dropped++;
  2179. return 0;
  2180. }
  2181. if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
  2182. ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
  2183. dev->name, LL_HEADER_LENGTH + 2);
  2184. dev_kfree_skb(skb);
  2185. privptr->stats.tx_dropped++;
  2186. return 0;
  2187. }
  2188. /**
  2189. * If channels are not running, try to restart them
  2190. * and throw away packet.
  2191. */
  2192. if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
  2193. fsm_event(privptr->fsm, DEV_EVENT_START, dev);
  2194. dev_kfree_skb(skb);
  2195. privptr->stats.tx_dropped++;
  2196. privptr->stats.tx_errors++;
  2197. privptr->stats.tx_carrier_errors++;
  2198. return 0;
  2199. }
  2200. if (ctc_test_and_set_busy(dev))
  2201. return -EBUSY;
  2202. dev->trans_start = jiffies;
  2203. if (transmit_skb(privptr->channel[WRITE], skb) != 0)
  2204. rc = 1;
  2205. return rc;
  2206. }
  2207. /**
  2208. * Sets MTU of an interface.
  2209. *
  2210. * @param dev Pointer to interface struct.
  2211. * @param new_mtu The new MTU to use for this interface.
  2212. *
  2213. * @return 0 on success, -EINVAL if MTU is out of valid range.
  2214. * (valid range is 576 .. 65527). If VM is on the
  2215. * remote side, maximum MTU is 32760, however this is
  2216. * <em>not</em> checked here.
  2217. */
  2218. static int
  2219. ctc_change_mtu(struct net_device * dev, int new_mtu)
  2220. {
  2221. struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
  2222. DBF_TEXT(trace, 3, __FUNCTION__);
  2223. if ((new_mtu < 576) || (new_mtu > 65527) ||
  2224. (new_mtu > (privptr->channel[READ]->max_bufsize -
  2225. LL_HEADER_LENGTH - 2)))
  2226. return -EINVAL;
  2227. dev->mtu = new_mtu;
  2228. dev->hard_header_len = LL_HEADER_LENGTH + 2;
  2229. return 0;
  2230. }
  2231. /**
  2232. * Returns interface statistics of a device.
  2233. *
  2234. * @param dev Pointer to interface struct.
  2235. *
  2236. * @return Pointer to stats struct of this interface.
  2237. */
  2238. static struct net_device_stats *
  2239. ctc_stats(struct net_device * dev)
  2240. {
  2241. return &((struct ctc_priv *) dev->priv)->stats;
  2242. }
  2243. /*
  2244. * sysfs attributes
  2245. */
  2246. static ssize_t
  2247. buffer_show(struct device *dev, struct device_attribute *attr, char *buf)
  2248. {
  2249. struct ctc_priv *priv;
  2250. priv = dev->driver_data;
  2251. if (!priv)
  2252. return -ENODEV;
  2253. return sprintf(buf, "%d\n",
  2254. priv->buffer_size);
  2255. }
  2256. static ssize_t
  2257. buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  2258. {
  2259. struct ctc_priv *priv;
  2260. struct net_device *ndev;
  2261. int bs1;
  2262. char buffer[16];
  2263. DBF_TEXT(trace, 3, __FUNCTION__);
  2264. DBF_TEXT(trace, 3, buf);
  2265. priv = dev->driver_data;
  2266. if (!priv) {
  2267. DBF_TEXT(trace, 3, "bfnopriv");
  2268. return -ENODEV;
  2269. }
  2270. sscanf(buf, "%u", &bs1);
  2271. if (bs1 > CTC_BUFSIZE_LIMIT)
  2272. goto einval;
  2273. if (bs1 < (576 + LL_HEADER_LENGTH + 2))
  2274. goto einval;
  2275. priv->buffer_size = bs1; // just to overwrite the default
  2276. ndev = priv->channel[READ]->netdev;
  2277. if (!ndev) {
  2278. DBF_TEXT(trace, 3, "bfnondev");
  2279. return -ENODEV;
  2280. }
  2281. if ((ndev->flags & IFF_RUNNING) &&
  2282. (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
  2283. goto einval;
  2284. priv->channel[READ]->max_bufsize = bs1;
  2285. priv->channel[WRITE]->max_bufsize = bs1;
  2286. if (!(ndev->flags & IFF_RUNNING))
  2287. ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
  2288. priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
  2289. priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
  2290. sprintf(buffer, "%d",priv->buffer_size);
  2291. DBF_TEXT(trace, 3, buffer);
  2292. return count;
  2293. einval:
  2294. DBF_TEXT(trace, 3, "buff_err");
  2295. return -EINVAL;
  2296. }
  2297. static ssize_t
  2298. loglevel_show(struct device *dev, struct device_attribute *attr, char *buf)
  2299. {
  2300. return sprintf(buf, "%d\n", loglevel);
  2301. }
  2302. static ssize_t
  2303. loglevel_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  2304. {
  2305. int ll1;
  2306. DBF_TEXT(trace, 5, __FUNCTION__);
  2307. sscanf(buf, "%i", &ll1);
  2308. if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
  2309. return -EINVAL;
  2310. loglevel = ll1;
  2311. return count;
  2312. }
  2313. static void
  2314. ctc_print_statistics(struct ctc_priv *priv)
  2315. {
  2316. char *sbuf;
  2317. char *p;
  2318. DBF_TEXT(trace, 4, __FUNCTION__);
  2319. if (!priv)
  2320. return;
  2321. sbuf = (char *)kmalloc(2048, GFP_KERNEL);
  2322. if (sbuf == NULL)
  2323. return;
  2324. p = sbuf;
  2325. p += sprintf(p, " Device FSM state: %s\n",
  2326. fsm_getstate_str(priv->fsm));
  2327. p += sprintf(p, " RX channel FSM state: %s\n",
  2328. fsm_getstate_str(priv->channel[READ]->fsm));
  2329. p += sprintf(p, " TX channel FSM state: %s\n",
  2330. fsm_getstate_str(priv->channel[WRITE]->fsm));
  2331. p += sprintf(p, " Max. TX buffer used: %ld\n",
  2332. priv->channel[WRITE]->prof.maxmulti);
  2333. p += sprintf(p, " Max. chained SKBs: %ld\n",
  2334. priv->channel[WRITE]->prof.maxcqueue);
  2335. p += sprintf(p, " TX single write ops: %ld\n",
  2336. priv->channel[WRITE]->prof.doios_single);
  2337. p += sprintf(p, " TX multi write ops: %ld\n",
  2338. priv->channel[WRITE]->prof.doios_multi);
  2339. p += sprintf(p, " Netto bytes written: %ld\n",
  2340. priv->channel[WRITE]->prof.txlen);
  2341. p += sprintf(p, " Max. TX IO-time: %ld\n",
  2342. priv->channel[WRITE]->prof.tx_time);
  2343. ctc_pr_debug("Statistics for %s:\n%s",
  2344. priv->channel[WRITE]->netdev->name, sbuf);
  2345. kfree(sbuf);
  2346. return;
  2347. }
  2348. static ssize_t
  2349. stats_show(struct device *dev, struct device_attribute *attr, char *buf)
  2350. {
  2351. struct ctc_priv *priv = dev->driver_data;
  2352. if (!priv)
  2353. return -ENODEV;
  2354. ctc_print_statistics(priv);
  2355. return sprintf(buf, "0\n");
  2356. }
  2357. static ssize_t
  2358. stats_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  2359. {
  2360. struct ctc_priv *priv = dev->driver_data;
  2361. if (!priv)
  2362. return -ENODEV;
  2363. /* Reset statistics */
  2364. memset(&priv->channel[WRITE]->prof, 0,
  2365. sizeof(priv->channel[WRITE]->prof));
  2366. return count;
  2367. }
  2368. static void
  2369. ctc_netdev_unregister(struct net_device * dev)
  2370. {
  2371. struct ctc_priv *privptr;
  2372. if (!dev)
  2373. return;
  2374. privptr = (struct ctc_priv *) dev->priv;
  2375. unregister_netdev(dev);
  2376. }
  2377. static int
  2378. ctc_netdev_register(struct net_device * dev)
  2379. {
  2380. return register_netdev(dev);
  2381. }
  2382. static void
  2383. ctc_free_netdevice(struct net_device * dev, int free_dev)
  2384. {
  2385. struct ctc_priv *privptr;
  2386. if (!dev)
  2387. return;
  2388. privptr = dev->priv;
  2389. if (privptr) {
  2390. if (privptr->fsm)
  2391. kfree_fsm(privptr->fsm);
  2392. kfree(privptr);
  2393. }
  2394. #ifdef MODULE
  2395. if (free_dev)
  2396. free_netdev(dev);
  2397. #endif
  2398. }
  2399. static ssize_t
  2400. ctc_proto_show(struct device *dev, struct device_attribute *attr, char *buf)
  2401. {
  2402. struct ctc_priv *priv;
  2403. priv = dev->driver_data;
  2404. if (!priv)
  2405. return -ENODEV;
  2406. return sprintf(buf, "%d\n", priv->protocol);
  2407. }
  2408. static ssize_t
  2409. ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  2410. {
  2411. struct ctc_priv *priv;
  2412. int value;
  2413. DBF_TEXT(trace, 3, __FUNCTION__);
  2414. pr_debug("%s() called\n", __FUNCTION__);
  2415. priv = dev->driver_data;
  2416. if (!priv)
  2417. return -ENODEV;
  2418. sscanf(buf, "%u", &value);
  2419. if (!((value == CTC_PROTO_S390) ||
  2420. (value == CTC_PROTO_LINUX) ||
  2421. (value == CTC_PROTO_OS390)))
  2422. return -EINVAL;
  2423. priv->protocol = value;
  2424. return count;
  2425. }
  2426. static ssize_t
  2427. ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
  2428. {
  2429. struct ccwgroup_device *cgdev;
  2430. cgdev = to_ccwgroupdev(dev);
  2431. if (!cgdev)
  2432. return -ENODEV;
  2433. return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
  2434. }
  2435. static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
  2436. static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
  2437. static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
  2438. static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
  2439. static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
  2440. static struct attribute *ctc_attr[] = {
  2441. &dev_attr_protocol.attr,
  2442. &dev_attr_type.attr,
  2443. &dev_attr_buffer.attr,
  2444. NULL,
  2445. };
  2446. static struct attribute_group ctc_attr_group = {
  2447. .attrs = ctc_attr,
  2448. };
  2449. static int
  2450. ctc_add_attributes(struct device *dev)
  2451. {
  2452. int rc;
  2453. rc = device_create_file(dev, &dev_attr_loglevel);
  2454. if (rc)
  2455. goto out;
  2456. rc = device_create_file(dev, &dev_attr_stats);
  2457. if (!rc)
  2458. goto out;
  2459. device_remove_file(dev, &dev_attr_loglevel);
  2460. out:
  2461. return rc;
  2462. }
  2463. static void
  2464. ctc_remove_attributes(struct device *dev)
  2465. {
  2466. device_remove_file(dev, &dev_attr_stats);
  2467. device_remove_file(dev, &dev_attr_loglevel);
  2468. }
  2469. static int
  2470. ctc_add_files(struct device *dev)
  2471. {
  2472. pr_debug("%s() called\n", __FUNCTION__);
  2473. return sysfs_create_group(&dev->kobj, &ctc_attr_group);
  2474. }
  2475. static void
  2476. ctc_remove_files(struct device *dev)
  2477. {
  2478. pr_debug("%s() called\n", __FUNCTION__);
  2479. sysfs_remove_group(&dev->kobj, &ctc_attr_group);
  2480. }
  2481. /**
  2482. * Add ctc specific attributes.
  2483. * Add ctc private data.
  2484. *
  2485. * @param cgdev pointer to ccwgroup_device just added
  2486. *
  2487. * @returns 0 on success, !0 on failure.
  2488. */
  2489. static int
  2490. ctc_probe_device(struct ccwgroup_device *cgdev)
  2491. {
  2492. struct ctc_priv *priv;
  2493. int rc;
  2494. char buffer[16];
  2495. pr_debug("%s() called\n", __FUNCTION__);
  2496. DBF_TEXT(setup, 3, __FUNCTION__);
  2497. if (!get_device(&cgdev->dev))
  2498. return -ENODEV;
  2499. priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
  2500. if (!priv) {
  2501. ctc_pr_err("%s: Out of memory\n", __func__);
  2502. put_device(&cgdev->dev);
  2503. return -ENOMEM;
  2504. }
  2505. memset(priv, 0, sizeof (struct ctc_priv));
  2506. rc = ctc_add_files(&cgdev->dev);
  2507. if (rc) {
  2508. kfree(priv);
  2509. put_device(&cgdev->dev);
  2510. return rc;
  2511. }
  2512. priv->buffer_size = CTC_BUFSIZE_DEFAULT;
  2513. cgdev->cdev[0]->handler = ctc_irq_handler;
  2514. cgdev->cdev[1]->handler = ctc_irq_handler;
  2515. cgdev->dev.driver_data = priv;
  2516. sprintf(buffer, "%p", priv);
  2517. DBF_TEXT(data, 3, buffer);
  2518. sprintf(buffer, "%u", (unsigned int)sizeof(struct ctc_priv));
  2519. DBF_TEXT(data, 3, buffer);
  2520. sprintf(buffer, "%p", &channels);
  2521. DBF_TEXT(data, 3, buffer);
  2522. sprintf(buffer, "%u", (unsigned int)sizeof(struct channel));
  2523. DBF_TEXT(data, 3, buffer);
  2524. return 0;
  2525. }
  2526. /**
  2527. * Initialize everything of the net device except the name and the
  2528. * channel structs.
  2529. */
  2530. static struct net_device *
  2531. ctc_init_netdevice(struct net_device * dev, int alloc_device,
  2532. struct ctc_priv *privptr)
  2533. {
  2534. if (!privptr)
  2535. return NULL;
  2536. DBF_TEXT(setup, 3, __FUNCTION__);
  2537. if (alloc_device) {
  2538. dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
  2539. if (!dev)
  2540. return NULL;
  2541. memset(dev, 0, sizeof (struct net_device));
  2542. }
  2543. dev->priv = privptr;
  2544. privptr->fsm = init_fsm("ctcdev", dev_state_names,
  2545. dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS,
  2546. dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
  2547. if (privptr->fsm == NULL) {
  2548. if (alloc_device)
  2549. kfree(dev);
  2550. return NULL;
  2551. }
  2552. fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
  2553. fsm_settimer(privptr->fsm, &privptr->restart_timer);
  2554. if (dev->mtu == 0)
  2555. dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
  2556. dev->hard_start_xmit = ctc_tx;
  2557. dev->open = ctc_open;
  2558. dev->stop = ctc_close;
  2559. dev->get_stats = ctc_stats;
  2560. dev->change_mtu = ctc_change_mtu;
  2561. dev->hard_header_len = LL_HEADER_LENGTH + 2;
  2562. dev->addr_len = 0;
  2563. dev->type = ARPHRD_SLIP;
  2564. dev->tx_queue_len = 100;
  2565. dev->flags = IFF_POINTOPOINT | IFF_NOARP;
  2566. SET_MODULE_OWNER(dev);
  2567. return dev;
  2568. }
  2569. /**
  2570. *
  2571. * Setup an interface.
  2572. *
  2573. * @param cgdev Device to be setup.
  2574. *
  2575. * @returns 0 on success, !0 on failure.
  2576. */
  2577. static int
  2578. ctc_new_device(struct ccwgroup_device *cgdev)
  2579. {
  2580. char read_id[CTC_ID_SIZE];
  2581. char write_id[CTC_ID_SIZE];
  2582. int direction;
  2583. enum channel_types type;
  2584. struct ctc_priv *privptr;
  2585. struct net_device *dev;
  2586. int ret;
  2587. char buffer[16];
  2588. pr_debug("%s() called\n", __FUNCTION__);
  2589. DBF_TEXT(setup, 3, __FUNCTION__);
  2590. privptr = cgdev->dev.driver_data;
  2591. if (!privptr)
  2592. return -ENODEV;
  2593. sprintf(buffer, "%d", privptr->buffer_size);
  2594. DBF_TEXT(setup, 3, buffer);
  2595. type = get_channel_type(&cgdev->cdev[0]->id);
  2596. snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
  2597. snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
  2598. if (add_channel(cgdev->cdev[0], type))
  2599. return -ENOMEM;
  2600. if (add_channel(cgdev->cdev[1], type))
  2601. return -ENOMEM;
  2602. ret = ccw_device_set_online(cgdev->cdev[0]);
  2603. if (ret != 0) {
  2604. printk(KERN_WARNING
  2605. "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
  2606. }
  2607. ret = ccw_device_set_online(cgdev->cdev[1]);
  2608. if (ret != 0) {
  2609. printk(KERN_WARNING
  2610. "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
  2611. }
  2612. dev = ctc_init_netdevice(NULL, 1, privptr);
  2613. if (!dev) {
  2614. ctc_pr_warn("ctc_init_netdevice failed\n");
  2615. goto out;
  2616. }
  2617. strlcpy(dev->name, "ctc%d", IFNAMSIZ);
  2618. for (direction = READ; direction <= WRITE; direction++) {
  2619. privptr->channel[direction] =
  2620. channel_get(type, direction == READ ? read_id : write_id,
  2621. direction);
  2622. if (privptr->channel[direction] == NULL) {
  2623. if (direction == WRITE)
  2624. channel_free(privptr->channel[READ]);
  2625. ctc_free_netdevice(dev, 1);
  2626. goto out;
  2627. }
  2628. privptr->channel[direction]->netdev = dev;
  2629. privptr->channel[direction]->protocol = privptr->protocol;
  2630. privptr->channel[direction]->max_bufsize = privptr->buffer_size;
  2631. }
  2632. /* sysfs magic */
  2633. SET_NETDEV_DEV(dev, &cgdev->dev);
  2634. if (ctc_netdev_register(dev) != 0) {
  2635. ctc_free_netdevice(dev, 1);
  2636. goto out;
  2637. }
  2638. if (ctc_add_attributes(&cgdev->dev)) {
  2639. ctc_netdev_unregister(dev);
  2640. dev->priv = NULL;
  2641. ctc_free_netdevice(dev, 1);
  2642. goto out;
  2643. }
  2644. strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
  2645. print_banner();
  2646. ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
  2647. dev->name, privptr->channel[READ]->id,
  2648. privptr->channel[WRITE]->id, privptr->protocol);
  2649. return 0;
  2650. out:
  2651. ccw_device_set_offline(cgdev->cdev[1]);
  2652. ccw_device_set_offline(cgdev->cdev[0]);
  2653. return -ENODEV;
  2654. }
  2655. /**
  2656. * Shutdown an interface.
  2657. *
  2658. * @param cgdev Device to be shut down.
  2659. *
  2660. * @returns 0 on success, !0 on failure.
  2661. */
  2662. static int
  2663. ctc_shutdown_device(struct ccwgroup_device *cgdev)
  2664. {
  2665. struct ctc_priv *priv;
  2666. struct net_device *ndev;
  2667. DBF_TEXT(setup, 3, __FUNCTION__);
  2668. pr_debug("%s() called\n", __FUNCTION__);
  2669. priv = cgdev->dev.driver_data;
  2670. ndev = NULL;
  2671. if (!priv)
  2672. return -ENODEV;
  2673. if (priv->channel[READ]) {
  2674. ndev = priv->channel[READ]->netdev;
  2675. /* Close the device */
  2676. ctc_close(ndev);
  2677. ndev->flags &=~IFF_RUNNING;
  2678. ctc_remove_attributes(&cgdev->dev);
  2679. channel_free(priv->channel[READ]);
  2680. }
  2681. if (priv->channel[WRITE])
  2682. channel_free(priv->channel[WRITE]);
  2683. if (ndev) {
  2684. ctc_netdev_unregister(ndev);
  2685. ndev->priv = NULL;
  2686. ctc_free_netdevice(ndev, 1);
  2687. }
  2688. if (priv->fsm)
  2689. kfree_fsm(priv->fsm);
  2690. ccw_device_set_offline(cgdev->cdev[1]);
  2691. ccw_device_set_offline(cgdev->cdev[0]);
  2692. if (priv->channel[READ])
  2693. channel_remove(priv->channel[READ]);
  2694. if (priv->channel[WRITE])
  2695. channel_remove(priv->channel[WRITE]);
  2696. priv->channel[READ] = priv->channel[WRITE] = NULL;
  2697. return 0;
  2698. }
  2699. static void
  2700. ctc_remove_device(struct ccwgroup_device *cgdev)
  2701. {
  2702. struct ctc_priv *priv;
  2703. pr_debug("%s() called\n", __FUNCTION__);
  2704. DBF_TEXT(setup, 3, __FUNCTION__);
  2705. priv = cgdev->dev.driver_data;
  2706. if (!priv)
  2707. return;
  2708. if (cgdev->state == CCWGROUP_ONLINE)
  2709. ctc_shutdown_device(cgdev);
  2710. ctc_remove_files(&cgdev->dev);
  2711. cgdev->dev.driver_data = NULL;
  2712. kfree(priv);
  2713. put_device(&cgdev->dev);
  2714. }
  2715. static struct ccwgroup_driver ctc_group_driver = {
  2716. .owner = THIS_MODULE,
  2717. .name = "ctc",
  2718. .max_slaves = 2,
  2719. .driver_id = 0xC3E3C3,
  2720. .probe = ctc_probe_device,
  2721. .remove = ctc_remove_device,
  2722. .set_online = ctc_new_device,
  2723. .set_offline = ctc_shutdown_device,
  2724. };
  2725. /**
  2726. * Module related routines
  2727. *****************************************************************************/
  2728. /**
  2729. * Prepare to be unloaded. Free IRQ's and release all resources.
  2730. * This is called just before this module is unloaded. It is
  2731. * <em>not</em> called, if the usage count is !0, so we don't need to check
  2732. * for that.
  2733. */
  2734. static void __exit
  2735. ctc_exit(void)
  2736. {
  2737. DBF_TEXT(setup, 3, __FUNCTION__);
  2738. unregister_cu3088_discipline(&ctc_group_driver);
  2739. ctc_unregister_dbf_views();
  2740. ctc_pr_info("CTC driver unloaded\n");
  2741. }
  2742. /**
  2743. * Initialize module.
  2744. * This is called just after the module is loaded.
  2745. *
  2746. * @return 0 on success, !0 on error.
  2747. */
  2748. static int __init
  2749. ctc_init(void)
  2750. {
  2751. int ret = 0;
  2752. loglevel = CTC_LOGLEVEL_DEFAULT;
  2753. DBF_TEXT(setup, 3, __FUNCTION__);
  2754. print_banner();
  2755. ret = ctc_register_dbf_views();
  2756. if (ret){
  2757. ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
  2758. return ret;
  2759. }
  2760. ret = register_cu3088_discipline(&ctc_group_driver);
  2761. if (ret) {
  2762. ctc_unregister_dbf_views();
  2763. }
  2764. return ret;
  2765. }
  2766. module_init(ctc_init);
  2767. module_exit(ctc_exit);
  2768. /* --- This is the END my friend --- */