lec.c 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421
  1. /*
  2. * lec.c: Lan Emulation driver
  3. *
  4. * Marko Kiiskila <mkiiskila@yahoo.com>
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  7. #include <linux/slab.h>
  8. #include <linux/kernel.h>
  9. #include <linux/bitops.h>
  10. #include <linux/capability.h>
  11. /* We are ethernet device */
  12. #include <linux/if_ether.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/etherdevice.h>
  15. #include <net/sock.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/ip.h>
  18. #include <asm/byteorder.h>
  19. #include <linux/uaccess.h>
  20. #include <net/arp.h>
  21. #include <net/dst.h>
  22. #include <linux/proc_fs.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/seq_file.h>
  25. /* TokenRing if needed */
  26. #ifdef CONFIG_TR
  27. #include <linux/trdevice.h>
  28. #endif
  29. /* And atm device */
  30. #include <linux/atmdev.h>
  31. #include <linux/atmlec.h>
  32. /* Proxy LEC knows about bridging */
  33. #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
  34. #include "../bridge/br_private.h"
  35. static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
  36. #endif
  37. /* Modular too */
  38. #include <linux/module.h>
  39. #include <linux/init.h>
  40. #include "lec.h"
  41. #include "lec_arpc.h"
  42. #include "resources.h"
  43. #define DUMP_PACKETS 0 /*
  44. * 0 = None,
  45. * 1 = 30 first bytes
  46. * 2 = Whole packet
  47. */
  48. #define LEC_UNRES_QUE_LEN 8 /*
  49. * number of tx packets to queue for a
  50. * single destination while waiting for SVC
  51. */
  52. static int lec_open(struct net_device *dev);
  53. static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
  54. struct net_device *dev);
  55. static int lec_close(struct net_device *dev);
  56. static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
  57. const unsigned char *mac_addr);
  58. static int lec_arp_remove(struct lec_priv *priv,
  59. struct lec_arp_table *to_remove);
  60. /* LANE2 functions */
  61. static void lane2_associate_ind(struct net_device *dev, const u8 *mac_address,
  62. const u8 *tlvs, u32 sizeoftlvs);
  63. static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force,
  64. u8 **tlvs, u32 *sizeoftlvs);
  65. static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
  66. const u8 *tlvs, u32 sizeoftlvs);
  67. static int lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
  68. unsigned long permanent);
  69. static void lec_arp_check_empties(struct lec_priv *priv,
  70. struct atm_vcc *vcc, struct sk_buff *skb);
  71. static void lec_arp_destroy(struct lec_priv *priv);
  72. static void lec_arp_init(struct lec_priv *priv);
  73. static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
  74. const unsigned char *mac_to_find,
  75. int is_rdesc,
  76. struct lec_arp_table **ret_entry);
  77. static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
  78. const unsigned char *atm_addr,
  79. unsigned long remoteflag,
  80. unsigned int targetless_le_arp);
  81. static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id);
  82. static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc);
  83. static void lec_set_flush_tran_id(struct lec_priv *priv,
  84. const unsigned char *atm_addr,
  85. unsigned long tran_id);
  86. static void lec_vcc_added(struct lec_priv *priv,
  87. const struct atmlec_ioc *ioc_data,
  88. struct atm_vcc *vcc,
  89. void (*old_push)(struct atm_vcc *vcc,
  90. struct sk_buff *skb));
  91. static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc);
  92. /* must be done under lec_arp_lock */
  93. static inline void lec_arp_hold(struct lec_arp_table *entry)
  94. {
  95. atomic_inc(&entry->usage);
  96. }
  97. static inline void lec_arp_put(struct lec_arp_table *entry)
  98. {
  99. if (atomic_dec_and_test(&entry->usage))
  100. kfree(entry);
  101. }
  102. static struct lane2_ops lane2_ops = {
  103. lane2_resolve, /* resolve, spec 3.1.3 */
  104. lane2_associate_req, /* associate_req, spec 3.1.4 */
  105. NULL /* associate indicator, spec 3.1.5 */
  106. };
  107. static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  108. /* Device structures */
  109. static struct net_device *dev_lec[MAX_LEC_ITF];
  110. #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
  111. static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
  112. {
  113. struct ethhdr *eth;
  114. char *buff;
  115. struct lec_priv *priv;
  116. /*
  117. * Check if this is a BPDU. If so, ask zeppelin to send
  118. * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit
  119. * as the Config BPDU has
  120. */
  121. eth = (struct ethhdr *)skb->data;
  122. buff = skb->data + skb->dev->hard_header_len;
  123. if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) {
  124. struct sock *sk;
  125. struct sk_buff *skb2;
  126. struct atmlec_msg *mesg;
  127. skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
  128. if (skb2 == NULL)
  129. return;
  130. skb2->len = sizeof(struct atmlec_msg);
  131. mesg = (struct atmlec_msg *)skb2->data;
  132. mesg->type = l_topology_change;
  133. buff += 4;
  134. mesg->content.normal.flag = *buff & 0x01;
  135. /* 0x01 is topology change */
  136. priv = netdev_priv(dev);
  137. atm_force_charge(priv->lecd, skb2->truesize);
  138. sk = sk_atm(priv->lecd);
  139. skb_queue_tail(&sk->sk_receive_queue, skb2);
  140. sk->sk_data_ready(sk, skb2->len);
  141. }
  142. return;
  143. }
  144. #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
  145. /*
  146. * Modelled after tr_type_trans
  147. * All multicast and ARE or STE frames go to BUS.
  148. * Non source routed frames go by destination address.
  149. * Last hop source routed frames go by destination address.
  150. * Not last hop source routed frames go by _next_ route descriptor.
  151. * Returns pointer to destination MAC address or fills in rdesc
  152. * and returns NULL.
  153. */
  154. #ifdef CONFIG_TR
  155. static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc)
  156. {
  157. struct trh_hdr *trh;
  158. unsigned int riflen, num_rdsc;
  159. trh = (struct trh_hdr *)packet;
  160. if (trh->daddr[0] & (uint8_t) 0x80)
  161. return bus_mac; /* multicast */
  162. if (trh->saddr[0] & TR_RII) {
  163. riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
  164. if ((ntohs(trh->rcf) >> 13) != 0)
  165. return bus_mac; /* ARE or STE */
  166. } else
  167. return trh->daddr; /* not source routed */
  168. if (riflen < 6)
  169. return trh->daddr; /* last hop, source routed */
  170. /* riflen is 6 or more, packet has more than one route descriptor */
  171. num_rdsc = (riflen / 2) - 1;
  172. memset(rdesc, 0, ETH_ALEN);
  173. /* offset 4 comes from LAN destination field in LE control frames */
  174. if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT))
  175. memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16));
  176. else {
  177. memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16));
  178. rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0));
  179. }
  180. return NULL;
  181. }
  182. #endif /* CONFIG_TR */
  183. /*
  184. * Open/initialize the netdevice. This is called (in the current kernel)
  185. * sometime after booting when the 'ifconfig' program is run.
  186. *
  187. * This routine should set everything up anew at each open, even
  188. * registers that "should" only need to be set once at boot, so that
  189. * there is non-reboot way to recover if something goes wrong.
  190. */
  191. static int lec_open(struct net_device *dev)
  192. {
  193. netif_start_queue(dev);
  194. memset(&dev->stats, 0, sizeof(struct net_device_stats));
  195. return 0;
  196. }
  197. static void
  198. lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
  199. {
  200. struct net_device *dev = skb->dev;
  201. ATM_SKB(skb)->vcc = vcc;
  202. ATM_SKB(skb)->atm_options = vcc->atm_options;
  203. atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
  204. if (vcc->send(vcc, skb) < 0) {
  205. dev->stats.tx_dropped++;
  206. return;
  207. }
  208. dev->stats.tx_packets++;
  209. dev->stats.tx_bytes += skb->len;
  210. }
  211. static void lec_tx_timeout(struct net_device *dev)
  212. {
  213. pr_info("%s\n", dev->name);
  214. dev->trans_start = jiffies;
  215. netif_wake_queue(dev);
  216. }
  217. static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
  218. struct net_device *dev)
  219. {
  220. struct sk_buff *skb2;
  221. struct lec_priv *priv = netdev_priv(dev);
  222. struct lecdatahdr_8023 *lec_h;
  223. struct atm_vcc *vcc;
  224. struct lec_arp_table *entry;
  225. unsigned char *dst;
  226. int min_frame_size;
  227. #ifdef CONFIG_TR
  228. unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */
  229. #endif
  230. int is_rdesc;
  231. pr_debug("called\n");
  232. if (!priv->lecd) {
  233. pr_info("%s:No lecd attached\n", dev->name);
  234. dev->stats.tx_errors++;
  235. netif_stop_queue(dev);
  236. kfree_skb(skb);
  237. return NETDEV_TX_OK;
  238. }
  239. pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n",
  240. (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb),
  241. (long)skb_end_pointer(skb));
  242. #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
  243. if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
  244. lec_handle_bridge(skb, dev);
  245. #endif
  246. /* Make sure we have room for lec_id */
  247. if (skb_headroom(skb) < 2) {
  248. pr_debug("reallocating skb\n");
  249. skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
  250. kfree_skb(skb);
  251. if (skb2 == NULL)
  252. return NETDEV_TX_OK;
  253. skb = skb2;
  254. }
  255. skb_push(skb, 2);
  256. /* Put le header to place, works for TokenRing too */
  257. lec_h = (struct lecdatahdr_8023 *)skb->data;
  258. lec_h->le_header = htons(priv->lecid);
  259. #ifdef CONFIG_TR
  260. /*
  261. * Ugly. Use this to realign Token Ring packets for
  262. * e.g. PCA-200E driver.
  263. */
  264. if (priv->is_trdev) {
  265. skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
  266. kfree_skb(skb);
  267. if (skb2 == NULL)
  268. return NETDEV_TX_OK;
  269. skb = skb2;
  270. }
  271. #endif
  272. #if DUMP_PACKETS >= 2
  273. #define MAX_DUMP_SKB 99
  274. #elif DUMP_PACKETS >= 1
  275. #define MAX_DUMP_SKB 30
  276. #endif
  277. #if DUMP_PACKETS >= 1
  278. printk(KERN_DEBUG "%s: send datalen:%ld lecid:%4.4x\n",
  279. dev->name, skb->len, priv->lecid);
  280. print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
  281. skb->data, min(skb->len, MAX_DUMP_SKB), true);
  282. #endif /* DUMP_PACKETS >= 1 */
  283. /* Minimum ethernet-frame size */
  284. #ifdef CONFIG_TR
  285. if (priv->is_trdev)
  286. min_frame_size = LEC_MINIMUM_8025_SIZE;
  287. else
  288. #endif
  289. min_frame_size = LEC_MINIMUM_8023_SIZE;
  290. if (skb->len < min_frame_size) {
  291. if ((skb->len + skb_tailroom(skb)) < min_frame_size) {
  292. skb2 = skb_copy_expand(skb, 0,
  293. min_frame_size - skb->truesize,
  294. GFP_ATOMIC);
  295. dev_kfree_skb(skb);
  296. if (skb2 == NULL) {
  297. dev->stats.tx_dropped++;
  298. return NETDEV_TX_OK;
  299. }
  300. skb = skb2;
  301. }
  302. skb_put(skb, min_frame_size - skb->len);
  303. }
  304. /* Send to right vcc */
  305. is_rdesc = 0;
  306. dst = lec_h->h_dest;
  307. #ifdef CONFIG_TR
  308. if (priv->is_trdev) {
  309. dst = get_tr_dst(skb->data + 2, rdesc);
  310. if (dst == NULL) {
  311. dst = rdesc;
  312. is_rdesc = 1;
  313. }
  314. }
  315. #endif
  316. entry = NULL;
  317. vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry);
  318. pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n",
  319. dev->name, vcc, vcc ? vcc->flags : 0, entry);
  320. if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) {
  321. if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) {
  322. pr_debug("%s:queuing packet, MAC address %pM\n",
  323. dev->name, lec_h->h_dest);
  324. skb_queue_tail(&entry->tx_wait, skb);
  325. } else {
  326. pr_debug("%s:tx queue full or no arp entry, dropping, MAC address: %pM\n",
  327. dev->name, lec_h->h_dest);
  328. dev->stats.tx_dropped++;
  329. dev_kfree_skb(skb);
  330. }
  331. goto out;
  332. }
  333. #if DUMP_PACKETS > 0
  334. printk(KERN_DEBUG "%s:sending to vpi:%d vci:%d\n",
  335. dev->name, vcc->vpi, vcc->vci);
  336. #endif /* DUMP_PACKETS > 0 */
  337. while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) {
  338. pr_debug("emptying tx queue, MAC address %pM\n", lec_h->h_dest);
  339. lec_send(vcc, skb2);
  340. }
  341. lec_send(vcc, skb);
  342. if (!atm_may_send(vcc, 0)) {
  343. struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
  344. vpriv->xoff = 1;
  345. netif_stop_queue(dev);
  346. /*
  347. * vcc->pop() might have occurred in between, making
  348. * the vcc usuable again. Since xmit is serialized,
  349. * this is the only situation we have to re-test.
  350. */
  351. if (atm_may_send(vcc, 0))
  352. netif_wake_queue(dev);
  353. }
  354. out:
  355. if (entry)
  356. lec_arp_put(entry);
  357. dev->trans_start = jiffies;
  358. return NETDEV_TX_OK;
  359. }
  360. /* The inverse routine to net_open(). */
  361. static int lec_close(struct net_device *dev)
  362. {
  363. netif_stop_queue(dev);
  364. return 0;
  365. }
  366. static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
  367. {
  368. unsigned long flags;
  369. struct net_device *dev = (struct net_device *)vcc->proto_data;
  370. struct lec_priv *priv = netdev_priv(dev);
  371. struct atmlec_msg *mesg;
  372. struct lec_arp_table *entry;
  373. int i;
  374. char *tmp; /* FIXME */
  375. atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
  376. mesg = (struct atmlec_msg *)skb->data;
  377. tmp = skb->data;
  378. tmp += sizeof(struct atmlec_msg);
  379. pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type);
  380. switch (mesg->type) {
  381. case l_set_mac_addr:
  382. for (i = 0; i < 6; i++)
  383. dev->dev_addr[i] = mesg->content.normal.mac_addr[i];
  384. break;
  385. case l_del_mac_addr:
  386. for (i = 0; i < 6; i++)
  387. dev->dev_addr[i] = 0;
  388. break;
  389. case l_addr_delete:
  390. lec_addr_delete(priv, mesg->content.normal.atm_addr,
  391. mesg->content.normal.flag);
  392. break;
  393. case l_topology_change:
  394. priv->topology_change = mesg->content.normal.flag;
  395. break;
  396. case l_flush_complete:
  397. lec_flush_complete(priv, mesg->content.normal.flag);
  398. break;
  399. case l_narp_req: /* LANE2: see 7.1.35 in the lane2 spec */
  400. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  401. entry = lec_arp_find(priv, mesg->content.normal.mac_addr);
  402. lec_arp_remove(priv, entry);
  403. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  404. if (mesg->content.normal.no_source_le_narp)
  405. break;
  406. /* FALL THROUGH */
  407. case l_arp_update:
  408. lec_arp_update(priv, mesg->content.normal.mac_addr,
  409. mesg->content.normal.atm_addr,
  410. mesg->content.normal.flag,
  411. mesg->content.normal.targetless_le_arp);
  412. pr_debug("in l_arp_update\n");
  413. if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */
  414. pr_debug("LANE2 3.1.5, got tlvs, size %d\n",
  415. mesg->sizeoftlvs);
  416. lane2_associate_ind(dev, mesg->content.normal.mac_addr,
  417. tmp, mesg->sizeoftlvs);
  418. }
  419. break;
  420. case l_config:
  421. priv->maximum_unknown_frame_count =
  422. mesg->content.config.maximum_unknown_frame_count;
  423. priv->max_unknown_frame_time =
  424. (mesg->content.config.max_unknown_frame_time * HZ);
  425. priv->max_retry_count = mesg->content.config.max_retry_count;
  426. priv->aging_time = (mesg->content.config.aging_time * HZ);
  427. priv->forward_delay_time =
  428. (mesg->content.config.forward_delay_time * HZ);
  429. priv->arp_response_time =
  430. (mesg->content.config.arp_response_time * HZ);
  431. priv->flush_timeout = (mesg->content.config.flush_timeout * HZ);
  432. priv->path_switching_delay =
  433. (mesg->content.config.path_switching_delay * HZ);
  434. priv->lane_version = mesg->content.config.lane_version;
  435. /* LANE2 */
  436. priv->lane2_ops = NULL;
  437. if (priv->lane_version > 1)
  438. priv->lane2_ops = &lane2_ops;
  439. if (dev_set_mtu(dev, mesg->content.config.mtu))
  440. pr_info("%s: change_mtu to %d failed\n",
  441. dev->name, mesg->content.config.mtu);
  442. priv->is_proxy = mesg->content.config.is_proxy;
  443. break;
  444. case l_flush_tran_id:
  445. lec_set_flush_tran_id(priv, mesg->content.normal.atm_addr,
  446. mesg->content.normal.flag);
  447. break;
  448. case l_set_lecid:
  449. priv->lecid =
  450. (unsigned short)(0xffff & mesg->content.normal.flag);
  451. break;
  452. case l_should_bridge:
  453. #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
  454. {
  455. pr_debug("%s: bridge zeppelin asks about %pM\n",
  456. dev->name, mesg->content.proxy.mac_addr);
  457. if (br_fdb_test_addr_hook == NULL)
  458. break;
  459. if (br_fdb_test_addr_hook(dev, mesg->content.proxy.mac_addr)) {
  460. /* hit from bridge table, send LE_ARP_RESPONSE */
  461. struct sk_buff *skb2;
  462. struct sock *sk;
  463. pr_debug("%s: entry found, responding to zeppelin\n",
  464. dev->name);
  465. skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
  466. if (skb2 == NULL)
  467. break;
  468. skb2->len = sizeof(struct atmlec_msg);
  469. skb_copy_to_linear_data(skb2, mesg, sizeof(*mesg));
  470. atm_force_charge(priv->lecd, skb2->truesize);
  471. sk = sk_atm(priv->lecd);
  472. skb_queue_tail(&sk->sk_receive_queue, skb2);
  473. sk->sk_data_ready(sk, skb2->len);
  474. }
  475. }
  476. #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
  477. break;
  478. default:
  479. pr_info("%s: Unknown message type %d\n", dev->name, mesg->type);
  480. dev_kfree_skb(skb);
  481. return -EINVAL;
  482. }
  483. dev_kfree_skb(skb);
  484. return 0;
  485. }
  486. static void lec_atm_close(struct atm_vcc *vcc)
  487. {
  488. struct sk_buff *skb;
  489. struct net_device *dev = (struct net_device *)vcc->proto_data;
  490. struct lec_priv *priv = netdev_priv(dev);
  491. priv->lecd = NULL;
  492. /* Do something needful? */
  493. netif_stop_queue(dev);
  494. lec_arp_destroy(priv);
  495. if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
  496. pr_info("%s closing with messages pending\n", dev->name);
  497. while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) {
  498. atm_return(vcc, skb->truesize);
  499. dev_kfree_skb(skb);
  500. }
  501. pr_info("%s: Shut down!\n", dev->name);
  502. module_put(THIS_MODULE);
  503. }
  504. static struct atmdev_ops lecdev_ops = {
  505. .close = lec_atm_close,
  506. .send = lec_atm_send
  507. };
  508. static struct atm_dev lecatm_dev = {
  509. .ops = &lecdev_ops,
  510. .type = "lec",
  511. .number = 999, /* dummy device number */
  512. .lock = __SPIN_LOCK_UNLOCKED(lecatm_dev.lock)
  513. };
  514. /*
  515. * LANE2: new argument struct sk_buff *data contains
  516. * the LE_ARP based TLVs introduced in the LANE2 spec
  517. */
  518. static int
  519. send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
  520. const unsigned char *mac_addr, const unsigned char *atm_addr,
  521. struct sk_buff *data)
  522. {
  523. struct sock *sk;
  524. struct sk_buff *skb;
  525. struct atmlec_msg *mesg;
  526. if (!priv || !priv->lecd)
  527. return -1;
  528. skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
  529. if (!skb)
  530. return -1;
  531. skb->len = sizeof(struct atmlec_msg);
  532. mesg = (struct atmlec_msg *)skb->data;
  533. memset(mesg, 0, sizeof(struct atmlec_msg));
  534. mesg->type = type;
  535. if (data != NULL)
  536. mesg->sizeoftlvs = data->len;
  537. if (mac_addr)
  538. memcpy(&mesg->content.normal.mac_addr, mac_addr, ETH_ALEN);
  539. else
  540. mesg->content.normal.targetless_le_arp = 1;
  541. if (atm_addr)
  542. memcpy(&mesg->content.normal.atm_addr, atm_addr, ATM_ESA_LEN);
  543. atm_force_charge(priv->lecd, skb->truesize);
  544. sk = sk_atm(priv->lecd);
  545. skb_queue_tail(&sk->sk_receive_queue, skb);
  546. sk->sk_data_ready(sk, skb->len);
  547. if (data != NULL) {
  548. pr_debug("about to send %d bytes of data\n", data->len);
  549. atm_force_charge(priv->lecd, data->truesize);
  550. skb_queue_tail(&sk->sk_receive_queue, data);
  551. sk->sk_data_ready(sk, skb->len);
  552. }
  553. return 0;
  554. }
  555. /* shamelessly stolen from drivers/net/net_init.c */
  556. static int lec_change_mtu(struct net_device *dev, int new_mtu)
  557. {
  558. if ((new_mtu < 68) || (new_mtu > 18190))
  559. return -EINVAL;
  560. dev->mtu = new_mtu;
  561. return 0;
  562. }
  563. static void lec_set_multicast_list(struct net_device *dev)
  564. {
  565. /*
  566. * by default, all multicast frames arrive over the bus.
  567. * eventually support selective multicast service
  568. */
  569. return;
  570. }
  571. static const struct net_device_ops lec_netdev_ops = {
  572. .ndo_open = lec_open,
  573. .ndo_stop = lec_close,
  574. .ndo_start_xmit = lec_start_xmit,
  575. .ndo_change_mtu = lec_change_mtu,
  576. .ndo_tx_timeout = lec_tx_timeout,
  577. .ndo_set_multicast_list = lec_set_multicast_list,
  578. };
  579. static const unsigned char lec_ctrl_magic[] = {
  580. 0xff,
  581. 0x00,
  582. 0x01,
  583. 0x01
  584. };
  585. #define LEC_DATA_DIRECT_8023 2
  586. #define LEC_DATA_DIRECT_8025 3
  587. static int lec_is_data_direct(struct atm_vcc *vcc)
  588. {
  589. return ((vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8023) ||
  590. (vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8025));
  591. }
  592. static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
  593. {
  594. unsigned long flags;
  595. struct net_device *dev = (struct net_device *)vcc->proto_data;
  596. struct lec_priv *priv = netdev_priv(dev);
  597. #if DUMP_PACKETS > 0
  598. printk(KERN_DEBUG "%s: vcc vpi:%d vci:%d\n",
  599. dev->name, vcc->vpi, vcc->vci);
  600. #endif
  601. if (!skb) {
  602. pr_debug("%s: null skb\n", dev->name);
  603. lec_vcc_close(priv, vcc);
  604. return;
  605. }
  606. #if DUMP_PACKETS >= 2
  607. #define MAX_SKB_DUMP 99
  608. #elif DUMP_PACKETS >= 1
  609. #define MAX_SKB_DUMP 30
  610. #endif
  611. #if DUMP_PACKETS > 0
  612. printk(KERN_DEBUG "%s: rcv datalen:%ld lecid:%4.4x\n",
  613. dev->name, skb->len, priv->lecid);
  614. print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
  615. skb->data, min(MAX_SKB_DUMP, skb->len), true);
  616. #endif /* DUMP_PACKETS > 0 */
  617. if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) {
  618. /* Control frame, to daemon */
  619. struct sock *sk = sk_atm(vcc);
  620. pr_debug("%s: To daemon\n", dev->name);
  621. skb_queue_tail(&sk->sk_receive_queue, skb);
  622. sk->sk_data_ready(sk, skb->len);
  623. } else { /* Data frame, queue to protocol handlers */
  624. struct lec_arp_table *entry;
  625. unsigned char *src, *dst;
  626. atm_return(vcc, skb->truesize);
  627. if (*(__be16 *) skb->data == htons(priv->lecid) ||
  628. !priv->lecd || !(dev->flags & IFF_UP)) {
  629. /*
  630. * Probably looping back, or if lecd is missing,
  631. * lecd has gone down
  632. */
  633. pr_debug("Ignoring frame...\n");
  634. dev_kfree_skb(skb);
  635. return;
  636. }
  637. #ifdef CONFIG_TR
  638. if (priv->is_trdev)
  639. dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest;
  640. else
  641. #endif
  642. dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest;
  643. /*
  644. * If this is a Data Direct VCC, and the VCC does not match
  645. * the LE_ARP cache entry, delete the LE_ARP cache entry.
  646. */
  647. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  648. if (lec_is_data_direct(vcc)) {
  649. #ifdef CONFIG_TR
  650. if (priv->is_trdev)
  651. src =
  652. ((struct lecdatahdr_8025 *)skb->data)->
  653. h_source;
  654. else
  655. #endif
  656. src =
  657. ((struct lecdatahdr_8023 *)skb->data)->
  658. h_source;
  659. entry = lec_arp_find(priv, src);
  660. if (entry && entry->vcc != vcc) {
  661. lec_arp_remove(priv, entry);
  662. lec_arp_put(entry);
  663. }
  664. }
  665. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  666. if (!(dst[0] & 0x01) && /* Never filter Multi/Broadcast */
  667. !priv->is_proxy && /* Proxy wants all the packets */
  668. memcmp(dst, dev->dev_addr, dev->addr_len)) {
  669. dev_kfree_skb(skb);
  670. return;
  671. }
  672. if (!hlist_empty(&priv->lec_arp_empty_ones))
  673. lec_arp_check_empties(priv, vcc, skb);
  674. skb_pull(skb, 2); /* skip lec_id */
  675. #ifdef CONFIG_TR
  676. if (priv->is_trdev)
  677. skb->protocol = tr_type_trans(skb, dev);
  678. else
  679. #endif
  680. skb->protocol = eth_type_trans(skb, dev);
  681. dev->stats.rx_packets++;
  682. dev->stats.rx_bytes += skb->len;
  683. memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
  684. netif_rx(skb);
  685. }
  686. }
  687. static void lec_pop(struct atm_vcc *vcc, struct sk_buff *skb)
  688. {
  689. struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
  690. struct net_device *dev = skb->dev;
  691. if (vpriv == NULL) {
  692. pr_info("vpriv = NULL!?!?!?\n");
  693. return;
  694. }
  695. vpriv->old_pop(vcc, skb);
  696. if (vpriv->xoff && atm_may_send(vcc, 0)) {
  697. vpriv->xoff = 0;
  698. if (netif_running(dev) && netif_queue_stopped(dev))
  699. netif_wake_queue(dev);
  700. }
  701. }
  702. static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
  703. {
  704. struct lec_vcc_priv *vpriv;
  705. int bytes_left;
  706. struct atmlec_ioc ioc_data;
  707. /* Lecd must be up in this case */
  708. bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
  709. if (bytes_left != 0)
  710. pr_info("copy from user failed for %d bytes\n", bytes_left);
  711. if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
  712. !dev_lec[ioc_data.dev_num])
  713. return -EINVAL;
  714. vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
  715. if (!vpriv)
  716. return -ENOMEM;
  717. vpriv->xoff = 0;
  718. vpriv->old_pop = vcc->pop;
  719. vcc->user_back = vpriv;
  720. vcc->pop = lec_pop;
  721. lec_vcc_added(netdev_priv(dev_lec[ioc_data.dev_num]),
  722. &ioc_data, vcc, vcc->push);
  723. vcc->proto_data = dev_lec[ioc_data.dev_num];
  724. vcc->push = lec_push;
  725. return 0;
  726. }
  727. static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
  728. {
  729. if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
  730. return -EINVAL;
  731. vcc->proto_data = dev_lec[arg];
  732. return lec_mcast_make((struct lec_priv *)netdev_priv(dev_lec[arg]),
  733. vcc);
  734. }
  735. /* Initialize device. */
  736. static int lecd_attach(struct atm_vcc *vcc, int arg)
  737. {
  738. int i;
  739. struct lec_priv *priv;
  740. if (arg < 0)
  741. i = 0;
  742. else
  743. i = arg;
  744. #ifdef CONFIG_TR
  745. if (arg >= MAX_LEC_ITF)
  746. return -EINVAL;
  747. #else /* Reserve the top NUM_TR_DEVS for TR */
  748. if (arg >= (MAX_LEC_ITF - NUM_TR_DEVS))
  749. return -EINVAL;
  750. #endif
  751. if (!dev_lec[i]) {
  752. int is_trdev, size;
  753. is_trdev = 0;
  754. if (i >= (MAX_LEC_ITF - NUM_TR_DEVS))
  755. is_trdev = 1;
  756. size = sizeof(struct lec_priv);
  757. #ifdef CONFIG_TR
  758. if (is_trdev)
  759. dev_lec[i] = alloc_trdev(size);
  760. else
  761. #endif
  762. dev_lec[i] = alloc_etherdev(size);
  763. if (!dev_lec[i])
  764. return -ENOMEM;
  765. dev_lec[i]->netdev_ops = &lec_netdev_ops;
  766. snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i);
  767. if (register_netdev(dev_lec[i])) {
  768. free_netdev(dev_lec[i]);
  769. return -EINVAL;
  770. }
  771. priv = netdev_priv(dev_lec[i]);
  772. priv->is_trdev = is_trdev;
  773. } else {
  774. priv = netdev_priv(dev_lec[i]);
  775. if (priv->lecd)
  776. return -EADDRINUSE;
  777. }
  778. lec_arp_init(priv);
  779. priv->itfnum = i; /* LANE2 addition */
  780. priv->lecd = vcc;
  781. vcc->dev = &lecatm_dev;
  782. vcc_insert_socket(sk_atm(vcc));
  783. vcc->proto_data = dev_lec[i];
  784. set_bit(ATM_VF_META, &vcc->flags);
  785. set_bit(ATM_VF_READY, &vcc->flags);
  786. /* Set default values to these variables */
  787. priv->maximum_unknown_frame_count = 1;
  788. priv->max_unknown_frame_time = (1 * HZ);
  789. priv->vcc_timeout_period = (1200 * HZ);
  790. priv->max_retry_count = 1;
  791. priv->aging_time = (300 * HZ);
  792. priv->forward_delay_time = (15 * HZ);
  793. priv->topology_change = 0;
  794. priv->arp_response_time = (1 * HZ);
  795. priv->flush_timeout = (4 * HZ);
  796. priv->path_switching_delay = (6 * HZ);
  797. if (dev_lec[i]->flags & IFF_UP)
  798. netif_start_queue(dev_lec[i]);
  799. __module_get(THIS_MODULE);
  800. return i;
  801. }
  802. #ifdef CONFIG_PROC_FS
  803. static const char *lec_arp_get_status_string(unsigned char status)
  804. {
  805. static const char *const lec_arp_status_string[] = {
  806. "ESI_UNKNOWN ",
  807. "ESI_ARP_PENDING ",
  808. "ESI_VC_PENDING ",
  809. "<Undefined> ",
  810. "ESI_FLUSH_PENDING ",
  811. "ESI_FORWARD_DIRECT"
  812. };
  813. if (status > ESI_FORWARD_DIRECT)
  814. status = 3; /* ESI_UNDEFINED */
  815. return lec_arp_status_string[status];
  816. }
  817. static void lec_info(struct seq_file *seq, struct lec_arp_table *entry)
  818. {
  819. int i;
  820. for (i = 0; i < ETH_ALEN; i++)
  821. seq_printf(seq, "%2.2x", entry->mac_addr[i] & 0xff);
  822. seq_printf(seq, " ");
  823. for (i = 0; i < ATM_ESA_LEN; i++)
  824. seq_printf(seq, "%2.2x", entry->atm_addr[i] & 0xff);
  825. seq_printf(seq, " %s %4.4x", lec_arp_get_status_string(entry->status),
  826. entry->flags & 0xffff);
  827. if (entry->vcc)
  828. seq_printf(seq, "%3d %3d ", entry->vcc->vpi, entry->vcc->vci);
  829. else
  830. seq_printf(seq, " ");
  831. if (entry->recv_vcc) {
  832. seq_printf(seq, " %3d %3d", entry->recv_vcc->vpi,
  833. entry->recv_vcc->vci);
  834. }
  835. seq_putc(seq, '\n');
  836. }
  837. struct lec_state {
  838. unsigned long flags;
  839. struct lec_priv *locked;
  840. struct hlist_node *node;
  841. struct net_device *dev;
  842. int itf;
  843. int arp_table;
  844. int misc_table;
  845. };
  846. static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl,
  847. loff_t *l)
  848. {
  849. struct hlist_node *e = state->node;
  850. struct lec_arp_table *tmp;
  851. if (!e)
  852. e = tbl->first;
  853. if (e == SEQ_START_TOKEN) {
  854. e = tbl->first;
  855. --*l;
  856. }
  857. hlist_for_each_entry_from(tmp, e, next) {
  858. if (--*l < 0)
  859. break;
  860. }
  861. state->node = e;
  862. return (*l < 0) ? state : NULL;
  863. }
  864. static void *lec_arp_walk(struct lec_state *state, loff_t *l,
  865. struct lec_priv *priv)
  866. {
  867. void *v = NULL;
  868. int p;
  869. for (p = state->arp_table; p < LEC_ARP_TABLE_SIZE; p++) {
  870. v = lec_tbl_walk(state, &priv->lec_arp_tables[p], l);
  871. if (v)
  872. break;
  873. }
  874. state->arp_table = p;
  875. return v;
  876. }
  877. static void *lec_misc_walk(struct lec_state *state, loff_t *l,
  878. struct lec_priv *priv)
  879. {
  880. struct hlist_head *lec_misc_tables[] = {
  881. &priv->lec_arp_empty_ones,
  882. &priv->lec_no_forward,
  883. &priv->mcast_fwds
  884. };
  885. void *v = NULL;
  886. int q;
  887. for (q = state->misc_table; q < ARRAY_SIZE(lec_misc_tables); q++) {
  888. v = lec_tbl_walk(state, lec_misc_tables[q], l);
  889. if (v)
  890. break;
  891. }
  892. state->misc_table = q;
  893. return v;
  894. }
  895. static void *lec_priv_walk(struct lec_state *state, loff_t *l,
  896. struct lec_priv *priv)
  897. {
  898. if (!state->locked) {
  899. state->locked = priv;
  900. spin_lock_irqsave(&priv->lec_arp_lock, state->flags);
  901. }
  902. if (!lec_arp_walk(state, l, priv) && !lec_misc_walk(state, l, priv)) {
  903. spin_unlock_irqrestore(&priv->lec_arp_lock, state->flags);
  904. state->locked = NULL;
  905. /* Partial state reset for the next time we get called */
  906. state->arp_table = state->misc_table = 0;
  907. }
  908. return state->locked;
  909. }
  910. static void *lec_itf_walk(struct lec_state *state, loff_t *l)
  911. {
  912. struct net_device *dev;
  913. void *v;
  914. dev = state->dev ? state->dev : dev_lec[state->itf];
  915. v = (dev && netdev_priv(dev)) ?
  916. lec_priv_walk(state, l, netdev_priv(dev)) : NULL;
  917. if (!v && dev) {
  918. dev_put(dev);
  919. /* Partial state reset for the next time we get called */
  920. dev = NULL;
  921. }
  922. state->dev = dev;
  923. return v;
  924. }
  925. static void *lec_get_idx(struct lec_state *state, loff_t l)
  926. {
  927. void *v = NULL;
  928. for (; state->itf < MAX_LEC_ITF; state->itf++) {
  929. v = lec_itf_walk(state, &l);
  930. if (v)
  931. break;
  932. }
  933. return v;
  934. }
  935. static void *lec_seq_start(struct seq_file *seq, loff_t *pos)
  936. {
  937. struct lec_state *state = seq->private;
  938. state->itf = 0;
  939. state->dev = NULL;
  940. state->locked = NULL;
  941. state->arp_table = 0;
  942. state->misc_table = 0;
  943. state->node = SEQ_START_TOKEN;
  944. return *pos ? lec_get_idx(state, *pos) : SEQ_START_TOKEN;
  945. }
  946. static void lec_seq_stop(struct seq_file *seq, void *v)
  947. {
  948. struct lec_state *state = seq->private;
  949. if (state->dev) {
  950. spin_unlock_irqrestore(&state->locked->lec_arp_lock,
  951. state->flags);
  952. dev_put(state->dev);
  953. }
  954. }
  955. static void *lec_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  956. {
  957. struct lec_state *state = seq->private;
  958. v = lec_get_idx(state, 1);
  959. *pos += !!PTR_ERR(v);
  960. return v;
  961. }
  962. static int lec_seq_show(struct seq_file *seq, void *v)
  963. {
  964. static const char lec_banner[] =
  965. "Itf MAC ATM destination"
  966. " Status Flags "
  967. "VPI/VCI Recv VPI/VCI\n";
  968. if (v == SEQ_START_TOKEN)
  969. seq_puts(seq, lec_banner);
  970. else {
  971. struct lec_state *state = seq->private;
  972. struct net_device *dev = state->dev;
  973. struct lec_arp_table *entry = hlist_entry(state->node,
  974. struct lec_arp_table,
  975. next);
  976. seq_printf(seq, "%s ", dev->name);
  977. lec_info(seq, entry);
  978. }
  979. return 0;
  980. }
  981. static const struct seq_operations lec_seq_ops = {
  982. .start = lec_seq_start,
  983. .next = lec_seq_next,
  984. .stop = lec_seq_stop,
  985. .show = lec_seq_show,
  986. };
  987. static int lec_seq_open(struct inode *inode, struct file *file)
  988. {
  989. return seq_open_private(file, &lec_seq_ops, sizeof(struct lec_state));
  990. }
  991. static const struct file_operations lec_seq_fops = {
  992. .owner = THIS_MODULE,
  993. .open = lec_seq_open,
  994. .read = seq_read,
  995. .llseek = seq_lseek,
  996. .release = seq_release_private,
  997. };
  998. #endif
  999. static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  1000. {
  1001. struct atm_vcc *vcc = ATM_SD(sock);
  1002. int err = 0;
  1003. switch (cmd) {
  1004. case ATMLEC_CTRL:
  1005. case ATMLEC_MCAST:
  1006. case ATMLEC_DATA:
  1007. if (!capable(CAP_NET_ADMIN))
  1008. return -EPERM;
  1009. break;
  1010. default:
  1011. return -ENOIOCTLCMD;
  1012. }
  1013. switch (cmd) {
  1014. case ATMLEC_CTRL:
  1015. err = lecd_attach(vcc, (int)arg);
  1016. if (err >= 0)
  1017. sock->state = SS_CONNECTED;
  1018. break;
  1019. case ATMLEC_MCAST:
  1020. err = lec_mcast_attach(vcc, (int)arg);
  1021. break;
  1022. case ATMLEC_DATA:
  1023. err = lec_vcc_attach(vcc, (void __user *)arg);
  1024. break;
  1025. }
  1026. return err;
  1027. }
  1028. static struct atm_ioctl lane_ioctl_ops = {
  1029. .owner = THIS_MODULE,
  1030. .ioctl = lane_ioctl,
  1031. };
  1032. static int __init lane_module_init(void)
  1033. {
  1034. #ifdef CONFIG_PROC_FS
  1035. struct proc_dir_entry *p;
  1036. p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops);
  1037. if (!p) {
  1038. pr_err("Unable to initialize /proc/net/atm/lec\n");
  1039. return -ENOMEM;
  1040. }
  1041. #endif
  1042. register_atm_ioctl(&lane_ioctl_ops);
  1043. pr_info("lec.c: " __DATE__ " " __TIME__ " initialized\n");
  1044. return 0;
  1045. }
  1046. static void __exit lane_module_cleanup(void)
  1047. {
  1048. int i;
  1049. struct lec_priv *priv;
  1050. remove_proc_entry("lec", atm_proc_root);
  1051. deregister_atm_ioctl(&lane_ioctl_ops);
  1052. for (i = 0; i < MAX_LEC_ITF; i++) {
  1053. if (dev_lec[i] != NULL) {
  1054. priv = netdev_priv(dev_lec[i]);
  1055. unregister_netdev(dev_lec[i]);
  1056. free_netdev(dev_lec[i]);
  1057. dev_lec[i] = NULL;
  1058. }
  1059. }
  1060. return;
  1061. }
  1062. module_init(lane_module_init);
  1063. module_exit(lane_module_cleanup);
  1064. /*
  1065. * LANE2: 3.1.3, LE_RESOLVE.request
  1066. * Non force allocates memory and fills in *tlvs, fills in *sizeoftlvs.
  1067. * If sizeoftlvs == NULL the default TLVs associated with with this
  1068. * lec will be used.
  1069. * If dst_mac == NULL, targetless LE_ARP will be sent
  1070. */
  1071. static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force,
  1072. u8 **tlvs, u32 *sizeoftlvs)
  1073. {
  1074. unsigned long flags;
  1075. struct lec_priv *priv = netdev_priv(dev);
  1076. struct lec_arp_table *table;
  1077. struct sk_buff *skb;
  1078. int retval;
  1079. if (force == 0) {
  1080. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  1081. table = lec_arp_find(priv, dst_mac);
  1082. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  1083. if (table == NULL)
  1084. return -1;
  1085. *tlvs = kmemdup(table->tlvs, table->sizeoftlvs, GFP_ATOMIC);
  1086. if (*tlvs == NULL)
  1087. return -1;
  1088. *sizeoftlvs = table->sizeoftlvs;
  1089. return 0;
  1090. }
  1091. if (sizeoftlvs == NULL)
  1092. retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, NULL);
  1093. else {
  1094. skb = alloc_skb(*sizeoftlvs, GFP_ATOMIC);
  1095. if (skb == NULL)
  1096. return -1;
  1097. skb->len = *sizeoftlvs;
  1098. skb_copy_to_linear_data(skb, *tlvs, *sizeoftlvs);
  1099. retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, skb);
  1100. }
  1101. return retval;
  1102. }
  1103. /*
  1104. * LANE2: 3.1.4, LE_ASSOCIATE.request
  1105. * Associate the *tlvs with the *lan_dst address.
  1106. * Will overwrite any previous association
  1107. * Returns 1 for success, 0 for failure (out of memory)
  1108. *
  1109. */
  1110. static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
  1111. const u8 *tlvs, u32 sizeoftlvs)
  1112. {
  1113. int retval;
  1114. struct sk_buff *skb;
  1115. struct lec_priv *priv = netdev_priv(dev);
  1116. if (compare_ether_addr(lan_dst, dev->dev_addr))
  1117. return 0; /* not our mac address */
  1118. kfree(priv->tlvs); /* NULL if there was no previous association */
  1119. priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
  1120. if (priv->tlvs == NULL)
  1121. return 0;
  1122. priv->sizeoftlvs = sizeoftlvs;
  1123. skb = alloc_skb(sizeoftlvs, GFP_ATOMIC);
  1124. if (skb == NULL)
  1125. return 0;
  1126. skb->len = sizeoftlvs;
  1127. skb_copy_to_linear_data(skb, tlvs, sizeoftlvs);
  1128. retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb);
  1129. if (retval != 0)
  1130. pr_info("lec.c: lane2_associate_req() failed\n");
  1131. /*
  1132. * If the previous association has changed we must
  1133. * somehow notify other LANE entities about the change
  1134. */
  1135. return 1;
  1136. }
  1137. /*
  1138. * LANE2: 3.1.5, LE_ASSOCIATE.indication
  1139. *
  1140. */
  1141. static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
  1142. const u8 *tlvs, u32 sizeoftlvs)
  1143. {
  1144. #if 0
  1145. int i = 0;
  1146. #endif
  1147. struct lec_priv *priv = netdev_priv(dev);
  1148. #if 0 /*
  1149. * Why have the TLVs in LE_ARP entries
  1150. * since we do not use them? When you
  1151. * uncomment this code, make sure the
  1152. * TLVs get freed when entry is killed
  1153. */
  1154. struct lec_arp_table *entry = lec_arp_find(priv, mac_addr);
  1155. if (entry == NULL)
  1156. return; /* should not happen */
  1157. kfree(entry->tlvs);
  1158. entry->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
  1159. if (entry->tlvs == NULL)
  1160. return;
  1161. entry->sizeoftlvs = sizeoftlvs;
  1162. #endif
  1163. #if 0
  1164. pr_info("\n");
  1165. pr_info("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs);
  1166. while (i < sizeoftlvs)
  1167. pr_cont("%02x ", tlvs[i++]);
  1168. pr_cont("\n");
  1169. #endif
  1170. /* tell MPOA about the TLVs we saw */
  1171. if (priv->lane2_ops && priv->lane2_ops->associate_indicator) {
  1172. priv->lane2_ops->associate_indicator(dev, mac_addr,
  1173. tlvs, sizeoftlvs);
  1174. }
  1175. return;
  1176. }
  1177. /*
  1178. * Here starts what used to lec_arpc.c
  1179. *
  1180. * lec_arpc.c was added here when making
  1181. * lane client modular. October 1997
  1182. */
  1183. #include <linux/types.h>
  1184. #include <linux/timer.h>
  1185. #include <linux/param.h>
  1186. #include <asm/atomic.h>
  1187. #include <linux/inetdevice.h>
  1188. #include <net/route.h>
  1189. #if 0
  1190. #define pr_debug(format, args...)
  1191. /*
  1192. #define pr_debug printk
  1193. */
  1194. #endif
  1195. #define DEBUG_ARP_TABLE 0
  1196. #define LEC_ARP_REFRESH_INTERVAL (3*HZ)
  1197. static void lec_arp_check_expire(struct work_struct *work);
  1198. static void lec_arp_expire_arp(unsigned long data);
  1199. /*
  1200. * Arp table funcs
  1201. */
  1202. #define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE - 1))
  1203. /*
  1204. * Initialization of arp-cache
  1205. */
  1206. static void lec_arp_init(struct lec_priv *priv)
  1207. {
  1208. unsigned short i;
  1209. for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
  1210. INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
  1211. INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
  1212. INIT_HLIST_HEAD(&priv->lec_no_forward);
  1213. INIT_HLIST_HEAD(&priv->mcast_fwds);
  1214. spin_lock_init(&priv->lec_arp_lock);
  1215. INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
  1216. schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
  1217. }
  1218. static void lec_arp_clear_vccs(struct lec_arp_table *entry)
  1219. {
  1220. if (entry->vcc) {
  1221. struct atm_vcc *vcc = entry->vcc;
  1222. struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
  1223. struct net_device *dev = (struct net_device *)vcc->proto_data;
  1224. vcc->pop = vpriv->old_pop;
  1225. if (vpriv->xoff)
  1226. netif_wake_queue(dev);
  1227. kfree(vpriv);
  1228. vcc->user_back = NULL;
  1229. vcc->push = entry->old_push;
  1230. vcc_release_async(vcc, -EPIPE);
  1231. entry->vcc = NULL;
  1232. }
  1233. if (entry->recv_vcc) {
  1234. entry->recv_vcc->push = entry->old_recv_push;
  1235. vcc_release_async(entry->recv_vcc, -EPIPE);
  1236. entry->recv_vcc = NULL;
  1237. }
  1238. }
  1239. /*
  1240. * Insert entry to lec_arp_table
  1241. * LANE2: Add to the end of the list to satisfy 8.1.13
  1242. */
  1243. static inline void
  1244. lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry)
  1245. {
  1246. struct hlist_head *tmp;
  1247. tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])];
  1248. hlist_add_head(&entry->next, tmp);
  1249. pr_debug("Added entry:%pM\n", entry->mac_addr);
  1250. }
  1251. /*
  1252. * Remove entry from lec_arp_table
  1253. */
  1254. static int
  1255. lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
  1256. {
  1257. struct hlist_node *node;
  1258. struct lec_arp_table *entry;
  1259. int i, remove_vcc = 1;
  1260. if (!to_remove)
  1261. return -1;
  1262. hlist_del(&to_remove->next);
  1263. del_timer(&to_remove->timer);
  1264. /*
  1265. * If this is the only MAC connected to this VCC,
  1266. * also tear down the VCC
  1267. */
  1268. if (to_remove->status >= ESI_FLUSH_PENDING) {
  1269. /*
  1270. * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT
  1271. */
  1272. for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
  1273. hlist_for_each_entry(entry, node,
  1274. &priv->lec_arp_tables[i], next) {
  1275. if (memcmp(to_remove->atm_addr,
  1276. entry->atm_addr, ATM_ESA_LEN) == 0) {
  1277. remove_vcc = 0;
  1278. break;
  1279. }
  1280. }
  1281. }
  1282. if (remove_vcc)
  1283. lec_arp_clear_vccs(to_remove);
  1284. }
  1285. skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */
  1286. pr_debug("Removed entry:%pM\n", to_remove->mac_addr);
  1287. return 0;
  1288. }
  1289. #if DEBUG_ARP_TABLE
  1290. static const char *get_status_string(unsigned char st)
  1291. {
  1292. switch (st) {
  1293. case ESI_UNKNOWN:
  1294. return "ESI_UNKNOWN";
  1295. case ESI_ARP_PENDING:
  1296. return "ESI_ARP_PENDING";
  1297. case ESI_VC_PENDING:
  1298. return "ESI_VC_PENDING";
  1299. case ESI_FLUSH_PENDING:
  1300. return "ESI_FLUSH_PENDING";
  1301. case ESI_FORWARD_DIRECT:
  1302. return "ESI_FORWARD_DIRECT";
  1303. }
  1304. return "<UNKNOWN>";
  1305. }
  1306. static void dump_arp_table(struct lec_priv *priv)
  1307. {
  1308. struct hlist_node *node;
  1309. struct lec_arp_table *rulla;
  1310. char buf[256];
  1311. int i, j, offset;
  1312. pr_info("Dump %p:\n", priv);
  1313. for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
  1314. hlist_for_each_entry(rulla, node,
  1315. &priv->lec_arp_tables[i], next) {
  1316. offset = 0;
  1317. offset += sprintf(buf, "%d: %p\n", i, rulla);
  1318. offset += sprintf(buf + offset, "Mac: %pM",
  1319. rulla->mac_addr);
  1320. offset += sprintf(buf + offset, " Atm:");
  1321. for (j = 0; j < ATM_ESA_LEN; j++) {
  1322. offset += sprintf(buf + offset,
  1323. "%2.2x ",
  1324. rulla->atm_addr[j] & 0xff);
  1325. }
  1326. offset += sprintf(buf + offset,
  1327. "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ",
  1328. rulla->vcc ? rulla->vcc->vpi : 0,
  1329. rulla->vcc ? rulla->vcc->vci : 0,
  1330. rulla->recv_vcc ? rulla->recv_vcc->
  1331. vpi : 0,
  1332. rulla->recv_vcc ? rulla->recv_vcc->
  1333. vci : 0, rulla->last_used,
  1334. rulla->timestamp, rulla->no_tries);
  1335. offset +=
  1336. sprintf(buf + offset,
  1337. "Flags:%x, Packets_flooded:%x, Status: %s ",
  1338. rulla->flags, rulla->packets_flooded,
  1339. get_status_string(rulla->status));
  1340. pr_info("%s\n", buf);
  1341. }
  1342. }
  1343. if (!hlist_empty(&priv->lec_no_forward))
  1344. pr_info("No forward\n");
  1345. hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) {
  1346. offset = 0;
  1347. offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
  1348. offset += sprintf(buf + offset, " Atm:");
  1349. for (j = 0; j < ATM_ESA_LEN; j++) {
  1350. offset += sprintf(buf + offset, "%2.2x ",
  1351. rulla->atm_addr[j] & 0xff);
  1352. }
  1353. offset += sprintf(buf + offset,
  1354. "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ",
  1355. rulla->vcc ? rulla->vcc->vpi : 0,
  1356. rulla->vcc ? rulla->vcc->vci : 0,
  1357. rulla->recv_vcc ? rulla->recv_vcc->vpi : 0,
  1358. rulla->recv_vcc ? rulla->recv_vcc->vci : 0,
  1359. rulla->last_used,
  1360. rulla->timestamp, rulla->no_tries);
  1361. offset += sprintf(buf + offset,
  1362. "Flags:%x, Packets_flooded:%x, Status: %s ",
  1363. rulla->flags, rulla->packets_flooded,
  1364. get_status_string(rulla->status));
  1365. pr_info("%s\n", buf);
  1366. }
  1367. if (!hlist_empty(&priv->lec_arp_empty_ones))
  1368. pr_info("Empty ones\n");
  1369. hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) {
  1370. offset = 0;
  1371. offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
  1372. offset += sprintf(buf + offset, " Atm:");
  1373. for (j = 0; j < ATM_ESA_LEN; j++) {
  1374. offset += sprintf(buf + offset, "%2.2x ",
  1375. rulla->atm_addr[j] & 0xff);
  1376. }
  1377. offset += sprintf(buf + offset,
  1378. "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ",
  1379. rulla->vcc ? rulla->vcc->vpi : 0,
  1380. rulla->vcc ? rulla->vcc->vci : 0,
  1381. rulla->recv_vcc ? rulla->recv_vcc->vpi : 0,
  1382. rulla->recv_vcc ? rulla->recv_vcc->vci : 0,
  1383. rulla->last_used,
  1384. rulla->timestamp, rulla->no_tries);
  1385. offset += sprintf(buf + offset,
  1386. "Flags:%x, Packets_flooded:%x, Status: %s ",
  1387. rulla->flags, rulla->packets_flooded,
  1388. get_status_string(rulla->status));
  1389. pr_info("%s", buf);
  1390. }
  1391. if (!hlist_empty(&priv->mcast_fwds))
  1392. pr_info("Multicast Forward VCCs\n");
  1393. hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) {
  1394. offset = 0;
  1395. offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
  1396. offset += sprintf(buf + offset, " Atm:");
  1397. for (j = 0; j < ATM_ESA_LEN; j++) {
  1398. offset += sprintf(buf + offset, "%2.2x ",
  1399. rulla->atm_addr[j] & 0xff);
  1400. }
  1401. offset += sprintf(buf + offset,
  1402. "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ",
  1403. rulla->vcc ? rulla->vcc->vpi : 0,
  1404. rulla->vcc ? rulla->vcc->vci : 0,
  1405. rulla->recv_vcc ? rulla->recv_vcc->vpi : 0,
  1406. rulla->recv_vcc ? rulla->recv_vcc->vci : 0,
  1407. rulla->last_used,
  1408. rulla->timestamp, rulla->no_tries);
  1409. offset += sprintf(buf + offset,
  1410. "Flags:%x, Packets_flooded:%x, Status: %s ",
  1411. rulla->flags, rulla->packets_flooded,
  1412. get_status_string(rulla->status));
  1413. pr_info("%s\n", buf);
  1414. }
  1415. }
  1416. #else
  1417. #define dump_arp_table(priv) do { } while (0)
  1418. #endif
  1419. /*
  1420. * Destruction of arp-cache
  1421. */
  1422. static void lec_arp_destroy(struct lec_priv *priv)
  1423. {
  1424. unsigned long flags;
  1425. struct hlist_node *node, *next;
  1426. struct lec_arp_table *entry;
  1427. int i;
  1428. cancel_rearming_delayed_work(&priv->lec_arp_work);
  1429. /*
  1430. * Remove all entries
  1431. */
  1432. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  1433. for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
  1434. hlist_for_each_entry_safe(entry, node, next,
  1435. &priv->lec_arp_tables[i], next) {
  1436. lec_arp_remove(priv, entry);
  1437. lec_arp_put(entry);
  1438. }
  1439. INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
  1440. }
  1441. hlist_for_each_entry_safe(entry, node, next,
  1442. &priv->lec_arp_empty_ones, next) {
  1443. del_timer_sync(&entry->timer);
  1444. lec_arp_clear_vccs(entry);
  1445. hlist_del(&entry->next);
  1446. lec_arp_put(entry);
  1447. }
  1448. INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
  1449. hlist_for_each_entry_safe(entry, node, next,
  1450. &priv->lec_no_forward, next) {
  1451. del_timer_sync(&entry->timer);
  1452. lec_arp_clear_vccs(entry);
  1453. hlist_del(&entry->next);
  1454. lec_arp_put(entry);
  1455. }
  1456. INIT_HLIST_HEAD(&priv->lec_no_forward);
  1457. hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) {
  1458. /* No timer, LANEv2 7.1.20 and 2.3.5.3 */
  1459. lec_arp_clear_vccs(entry);
  1460. hlist_del(&entry->next);
  1461. lec_arp_put(entry);
  1462. }
  1463. INIT_HLIST_HEAD(&priv->mcast_fwds);
  1464. priv->mcast_vcc = NULL;
  1465. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  1466. }
  1467. /*
  1468. * Find entry by mac_address
  1469. */
  1470. static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
  1471. const unsigned char *mac_addr)
  1472. {
  1473. struct hlist_node *node;
  1474. struct hlist_head *head;
  1475. struct lec_arp_table *entry;
  1476. pr_debug("%pM\n", mac_addr);
  1477. head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
  1478. hlist_for_each_entry(entry, node, head, next) {
  1479. if (!compare_ether_addr(mac_addr, entry->mac_addr))
  1480. return entry;
  1481. }
  1482. return NULL;
  1483. }
  1484. static struct lec_arp_table *make_entry(struct lec_priv *priv,
  1485. const unsigned char *mac_addr)
  1486. {
  1487. struct lec_arp_table *to_return;
  1488. to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
  1489. if (!to_return) {
  1490. pr_info("LEC: Arp entry kmalloc failed\n");
  1491. return NULL;
  1492. }
  1493. memcpy(to_return->mac_addr, mac_addr, ETH_ALEN);
  1494. INIT_HLIST_NODE(&to_return->next);
  1495. setup_timer(&to_return->timer, lec_arp_expire_arp,
  1496. (unsigned long)to_return);
  1497. to_return->last_used = jiffies;
  1498. to_return->priv = priv;
  1499. skb_queue_head_init(&to_return->tx_wait);
  1500. atomic_set(&to_return->usage, 1);
  1501. return to_return;
  1502. }
  1503. /* Arp sent timer expired */
  1504. static void lec_arp_expire_arp(unsigned long data)
  1505. {
  1506. struct lec_arp_table *entry;
  1507. entry = (struct lec_arp_table *)data;
  1508. pr_debug("\n");
  1509. if (entry->status == ESI_ARP_PENDING) {
  1510. if (entry->no_tries <= entry->priv->max_retry_count) {
  1511. if (entry->is_rdesc)
  1512. send_to_lecd(entry->priv, l_rdesc_arp_xmt,
  1513. entry->mac_addr, NULL, NULL);
  1514. else
  1515. send_to_lecd(entry->priv, l_arp_xmt,
  1516. entry->mac_addr, NULL, NULL);
  1517. entry->no_tries++;
  1518. }
  1519. mod_timer(&entry->timer, jiffies + (1 * HZ));
  1520. }
  1521. }
  1522. /* Unknown/unused vcc expire, remove associated entry */
  1523. static void lec_arp_expire_vcc(unsigned long data)
  1524. {
  1525. unsigned long flags;
  1526. struct lec_arp_table *to_remove = (struct lec_arp_table *)data;
  1527. struct lec_priv *priv = (struct lec_priv *)to_remove->priv;
  1528. del_timer(&to_remove->timer);
  1529. pr_debug("%p %p: vpi:%d vci:%d\n",
  1530. to_remove, priv,
  1531. to_remove->vcc ? to_remove->recv_vcc->vpi : 0,
  1532. to_remove->vcc ? to_remove->recv_vcc->vci : 0);
  1533. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  1534. hlist_del(&to_remove->next);
  1535. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  1536. lec_arp_clear_vccs(to_remove);
  1537. lec_arp_put(to_remove);
  1538. }
  1539. static bool __lec_arp_check_expire(struct lec_arp_table *entry,
  1540. unsigned long now,
  1541. struct lec_priv *priv)
  1542. {
  1543. unsigned long time_to_check;
  1544. if ((entry->flags) & LEC_REMOTE_FLAG && priv->topology_change)
  1545. time_to_check = priv->forward_delay_time;
  1546. else
  1547. time_to_check = priv->aging_time;
  1548. pr_debug("About to expire: %lx - %lx > %lx\n",
  1549. now, entry->last_used, time_to_check);
  1550. if (time_after(now, entry->last_used + time_to_check) &&
  1551. !(entry->flags & LEC_PERMANENT_FLAG) &&
  1552. !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */
  1553. /* Remove entry */
  1554. pr_debug("Entry timed out\n");
  1555. lec_arp_remove(priv, entry);
  1556. lec_arp_put(entry);
  1557. } else {
  1558. /* Something else */
  1559. if ((entry->status == ESI_VC_PENDING ||
  1560. entry->status == ESI_ARP_PENDING) &&
  1561. time_after_eq(now, entry->timestamp +
  1562. priv->max_unknown_frame_time)) {
  1563. entry->timestamp = jiffies;
  1564. entry->packets_flooded = 0;
  1565. if (entry->status == ESI_VC_PENDING)
  1566. send_to_lecd(priv, l_svc_setup,
  1567. entry->mac_addr,
  1568. entry->atm_addr,
  1569. NULL);
  1570. }
  1571. if (entry->status == ESI_FLUSH_PENDING &&
  1572. time_after_eq(now, entry->timestamp +
  1573. priv->path_switching_delay)) {
  1574. lec_arp_hold(entry);
  1575. return true;
  1576. }
  1577. }
  1578. return false;
  1579. }
  1580. /*
  1581. * Expire entries.
  1582. * 1. Re-set timer
  1583. * 2. For each entry, delete entries that have aged past the age limit.
  1584. * 3. For each entry, depending on the status of the entry, perform
  1585. * the following maintenance.
  1586. * a. If status is ESI_VC_PENDING or ESI_ARP_PENDING then if the
  1587. * tick_count is above the max_unknown_frame_time, clear
  1588. * the tick_count to zero and clear the packets_flooded counter
  1589. * to zero. This supports the packet rate limit per address
  1590. * while flooding unknowns.
  1591. * b. If the status is ESI_FLUSH_PENDING and the tick_count is greater
  1592. * than or equal to the path_switching_delay, change the status
  1593. * to ESI_FORWARD_DIRECT. This causes the flush period to end
  1594. * regardless of the progress of the flush protocol.
  1595. */
  1596. static void lec_arp_check_expire(struct work_struct *work)
  1597. {
  1598. unsigned long flags;
  1599. struct lec_priv *priv =
  1600. container_of(work, struct lec_priv, lec_arp_work.work);
  1601. struct hlist_node *node, *next;
  1602. struct lec_arp_table *entry;
  1603. unsigned long now;
  1604. int i;
  1605. pr_debug("%p\n", priv);
  1606. now = jiffies;
  1607. restart:
  1608. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  1609. for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
  1610. hlist_for_each_entry_safe(entry, node, next,
  1611. &priv->lec_arp_tables[i], next) {
  1612. if (__lec_arp_check_expire(entry, now, priv)) {
  1613. struct sk_buff *skb;
  1614. struct atm_vcc *vcc = entry->vcc;
  1615. spin_unlock_irqrestore(&priv->lec_arp_lock,
  1616. flags);
  1617. while ((skb = skb_dequeue(&entry->tx_wait)))
  1618. lec_send(vcc, skb);
  1619. entry->last_used = jiffies;
  1620. entry->status = ESI_FORWARD_DIRECT;
  1621. lec_arp_put(entry);
  1622. goto restart;
  1623. }
  1624. }
  1625. }
  1626. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  1627. schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
  1628. }
  1629. /*
  1630. * Try to find vcc where mac_address is attached.
  1631. *
  1632. */
  1633. static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
  1634. const unsigned char *mac_to_find,
  1635. int is_rdesc,
  1636. struct lec_arp_table **ret_entry)
  1637. {
  1638. unsigned long flags;
  1639. struct lec_arp_table *entry;
  1640. struct atm_vcc *found;
  1641. if (mac_to_find[0] & 0x01) {
  1642. switch (priv->lane_version) {
  1643. case 1:
  1644. return priv->mcast_vcc;
  1645. case 2: /* LANE2 wants arp for multicast addresses */
  1646. if (!compare_ether_addr(mac_to_find, bus_mac))
  1647. return priv->mcast_vcc;
  1648. break;
  1649. default:
  1650. break;
  1651. }
  1652. }
  1653. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  1654. entry = lec_arp_find(priv, mac_to_find);
  1655. if (entry) {
  1656. if (entry->status == ESI_FORWARD_DIRECT) {
  1657. /* Connection Ok */
  1658. entry->last_used = jiffies;
  1659. lec_arp_hold(entry);
  1660. *ret_entry = entry;
  1661. found = entry->vcc;
  1662. goto out;
  1663. }
  1664. /*
  1665. * If the LE_ARP cache entry is still pending, reset count to 0
  1666. * so another LE_ARP request can be made for this frame.
  1667. */
  1668. if (entry->status == ESI_ARP_PENDING)
  1669. entry->no_tries = 0;
  1670. /*
  1671. * Data direct VC not yet set up, check to see if the unknown
  1672. * frame count is greater than the limit. If the limit has
  1673. * not been reached, allow the caller to send packet to
  1674. * BUS.
  1675. */
  1676. if (entry->status != ESI_FLUSH_PENDING &&
  1677. entry->packets_flooded <
  1678. priv->maximum_unknown_frame_count) {
  1679. entry->packets_flooded++;
  1680. pr_debug("Flooding..\n");
  1681. found = priv->mcast_vcc;
  1682. goto out;
  1683. }
  1684. /*
  1685. * We got here because entry->status == ESI_FLUSH_PENDING
  1686. * or BUS flood limit was reached for an entry which is
  1687. * in ESI_ARP_PENDING or ESI_VC_PENDING state.
  1688. */
  1689. lec_arp_hold(entry);
  1690. *ret_entry = entry;
  1691. pr_debug("entry->status %d entry->vcc %p\n", entry->status,
  1692. entry->vcc);
  1693. found = NULL;
  1694. } else {
  1695. /* No matching entry was found */
  1696. entry = make_entry(priv, mac_to_find);
  1697. pr_debug("Making entry\n");
  1698. if (!entry) {
  1699. found = priv->mcast_vcc;
  1700. goto out;
  1701. }
  1702. lec_arp_add(priv, entry);
  1703. /* We want arp-request(s) to be sent */
  1704. entry->packets_flooded = 1;
  1705. entry->status = ESI_ARP_PENDING;
  1706. entry->no_tries = 1;
  1707. entry->last_used = entry->timestamp = jiffies;
  1708. entry->is_rdesc = is_rdesc;
  1709. if (entry->is_rdesc)
  1710. send_to_lecd(priv, l_rdesc_arp_xmt, mac_to_find, NULL,
  1711. NULL);
  1712. else
  1713. send_to_lecd(priv, l_arp_xmt, mac_to_find, NULL, NULL);
  1714. entry->timer.expires = jiffies + (1 * HZ);
  1715. entry->timer.function = lec_arp_expire_arp;
  1716. add_timer(&entry->timer);
  1717. found = priv->mcast_vcc;
  1718. }
  1719. out:
  1720. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  1721. return found;
  1722. }
  1723. static int
  1724. lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
  1725. unsigned long permanent)
  1726. {
  1727. unsigned long flags;
  1728. struct hlist_node *node, *next;
  1729. struct lec_arp_table *entry;
  1730. int i;
  1731. pr_debug("\n");
  1732. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  1733. for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
  1734. hlist_for_each_entry_safe(entry, node, next,
  1735. &priv->lec_arp_tables[i], next) {
  1736. if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) &&
  1737. (permanent ||
  1738. !(entry->flags & LEC_PERMANENT_FLAG))) {
  1739. lec_arp_remove(priv, entry);
  1740. lec_arp_put(entry);
  1741. }
  1742. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  1743. return 0;
  1744. }
  1745. }
  1746. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  1747. return -1;
  1748. }
  1749. /*
  1750. * Notifies: Response to arp_request (atm_addr != NULL)
  1751. */
  1752. static void
  1753. lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
  1754. const unsigned char *atm_addr, unsigned long remoteflag,
  1755. unsigned int targetless_le_arp)
  1756. {
  1757. unsigned long flags;
  1758. struct hlist_node *node, *next;
  1759. struct lec_arp_table *entry, *tmp;
  1760. int i;
  1761. pr_debug("%smac:%pM\n",
  1762. (targetless_le_arp) ? "targetless " : "", mac_addr);
  1763. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  1764. entry = lec_arp_find(priv, mac_addr);
  1765. if (entry == NULL && targetless_le_arp)
  1766. goto out; /*
  1767. * LANE2: ignore targetless LE_ARPs for which
  1768. * we have no entry in the cache. 7.1.30
  1769. */
  1770. if (!hlist_empty(&priv->lec_arp_empty_ones)) {
  1771. hlist_for_each_entry_safe(entry, node, next,
  1772. &priv->lec_arp_empty_ones, next) {
  1773. if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) {
  1774. hlist_del(&entry->next);
  1775. del_timer(&entry->timer);
  1776. tmp = lec_arp_find(priv, mac_addr);
  1777. if (tmp) {
  1778. del_timer(&tmp->timer);
  1779. tmp->status = ESI_FORWARD_DIRECT;
  1780. memcpy(tmp->atm_addr, atm_addr, ATM_ESA_LEN);
  1781. tmp->vcc = entry->vcc;
  1782. tmp->old_push = entry->old_push;
  1783. tmp->last_used = jiffies;
  1784. del_timer(&entry->timer);
  1785. lec_arp_put(entry);
  1786. entry = tmp;
  1787. } else {
  1788. entry->status = ESI_FORWARD_DIRECT;
  1789. memcpy(entry->mac_addr, mac_addr, ETH_ALEN);
  1790. entry->last_used = jiffies;
  1791. lec_arp_add(priv, entry);
  1792. }
  1793. if (remoteflag)
  1794. entry->flags |= LEC_REMOTE_FLAG;
  1795. else
  1796. entry->flags &= ~LEC_REMOTE_FLAG;
  1797. pr_debug("After update\n");
  1798. dump_arp_table(priv);
  1799. goto out;
  1800. }
  1801. }
  1802. }
  1803. entry = lec_arp_find(priv, mac_addr);
  1804. if (!entry) {
  1805. entry = make_entry(priv, mac_addr);
  1806. if (!entry)
  1807. goto out;
  1808. entry->status = ESI_UNKNOWN;
  1809. lec_arp_add(priv, entry);
  1810. /* Temporary, changes before end of function */
  1811. }
  1812. memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN);
  1813. del_timer(&entry->timer);
  1814. for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
  1815. hlist_for_each_entry(tmp, node,
  1816. &priv->lec_arp_tables[i], next) {
  1817. if (entry != tmp &&
  1818. !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) {
  1819. /* Vcc to this host exists */
  1820. if (tmp->status > ESI_VC_PENDING) {
  1821. /*
  1822. * ESI_FLUSH_PENDING,
  1823. * ESI_FORWARD_DIRECT
  1824. */
  1825. entry->vcc = tmp->vcc;
  1826. entry->old_push = tmp->old_push;
  1827. }
  1828. entry->status = tmp->status;
  1829. break;
  1830. }
  1831. }
  1832. }
  1833. if (remoteflag)
  1834. entry->flags |= LEC_REMOTE_FLAG;
  1835. else
  1836. entry->flags &= ~LEC_REMOTE_FLAG;
  1837. if (entry->status == ESI_ARP_PENDING || entry->status == ESI_UNKNOWN) {
  1838. entry->status = ESI_VC_PENDING;
  1839. send_to_lecd(priv, l_svc_setup, entry->mac_addr, atm_addr, NULL);
  1840. }
  1841. pr_debug("After update2\n");
  1842. dump_arp_table(priv);
  1843. out:
  1844. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  1845. }
  1846. /*
  1847. * Notifies: Vcc setup ready
  1848. */
  1849. static void
  1850. lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
  1851. struct atm_vcc *vcc,
  1852. void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb))
  1853. {
  1854. unsigned long flags;
  1855. struct hlist_node *node;
  1856. struct lec_arp_table *entry;
  1857. int i, found_entry = 0;
  1858. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  1859. /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */
  1860. if (ioc_data->receive == 2) {
  1861. pr_debug("LEC_ARP: Attaching mcast forward\n");
  1862. #if 0
  1863. entry = lec_arp_find(priv, bus_mac);
  1864. if (!entry) {
  1865. pr_info("LEC_ARP: Multicast entry not found!\n");
  1866. goto out;
  1867. }
  1868. memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
  1869. entry->recv_vcc = vcc;
  1870. entry->old_recv_push = old_push;
  1871. #endif
  1872. entry = make_entry(priv, bus_mac);
  1873. if (entry == NULL)
  1874. goto out;
  1875. del_timer(&entry->timer);
  1876. memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
  1877. entry->recv_vcc = vcc;
  1878. entry->old_recv_push = old_push;
  1879. hlist_add_head(&entry->next, &priv->mcast_fwds);
  1880. goto out;
  1881. } else if (ioc_data->receive == 1) {
  1882. /*
  1883. * Vcc which we don't want to make default vcc,
  1884. * attach it anyway.
  1885. */
  1886. pr_debug("LEC_ARP:Attaching data direct, not default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
  1887. ioc_data->atm_addr[0], ioc_data->atm_addr[1],
  1888. ioc_data->atm_addr[2], ioc_data->atm_addr[3],
  1889. ioc_data->atm_addr[4], ioc_data->atm_addr[5],
  1890. ioc_data->atm_addr[6], ioc_data->atm_addr[7],
  1891. ioc_data->atm_addr[8], ioc_data->atm_addr[9],
  1892. ioc_data->atm_addr[10], ioc_data->atm_addr[11],
  1893. ioc_data->atm_addr[12], ioc_data->atm_addr[13],
  1894. ioc_data->atm_addr[14], ioc_data->atm_addr[15],
  1895. ioc_data->atm_addr[16], ioc_data->atm_addr[17],
  1896. ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
  1897. entry = make_entry(priv, bus_mac);
  1898. if (entry == NULL)
  1899. goto out;
  1900. memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
  1901. memset(entry->mac_addr, 0, ETH_ALEN);
  1902. entry->recv_vcc = vcc;
  1903. entry->old_recv_push = old_push;
  1904. entry->status = ESI_UNKNOWN;
  1905. entry->timer.expires = jiffies + priv->vcc_timeout_period;
  1906. entry->timer.function = lec_arp_expire_vcc;
  1907. hlist_add_head(&entry->next, &priv->lec_no_forward);
  1908. add_timer(&entry->timer);
  1909. dump_arp_table(priv);
  1910. goto out;
  1911. }
  1912. pr_debug("LEC_ARP:Attaching data direct, default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
  1913. ioc_data->atm_addr[0], ioc_data->atm_addr[1],
  1914. ioc_data->atm_addr[2], ioc_data->atm_addr[3],
  1915. ioc_data->atm_addr[4], ioc_data->atm_addr[5],
  1916. ioc_data->atm_addr[6], ioc_data->atm_addr[7],
  1917. ioc_data->atm_addr[8], ioc_data->atm_addr[9],
  1918. ioc_data->atm_addr[10], ioc_data->atm_addr[11],
  1919. ioc_data->atm_addr[12], ioc_data->atm_addr[13],
  1920. ioc_data->atm_addr[14], ioc_data->atm_addr[15],
  1921. ioc_data->atm_addr[16], ioc_data->atm_addr[17],
  1922. ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
  1923. for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
  1924. hlist_for_each_entry(entry, node,
  1925. &priv->lec_arp_tables[i], next) {
  1926. if (memcmp
  1927. (ioc_data->atm_addr, entry->atm_addr,
  1928. ATM_ESA_LEN) == 0) {
  1929. pr_debug("LEC_ARP: Attaching data direct\n");
  1930. pr_debug("Currently -> Vcc: %d, Rvcc:%d\n",
  1931. entry->vcc ? entry->vcc->vci : 0,
  1932. entry->recv_vcc ? entry->recv_vcc->
  1933. vci : 0);
  1934. found_entry = 1;
  1935. del_timer(&entry->timer);
  1936. entry->vcc = vcc;
  1937. entry->old_push = old_push;
  1938. if (entry->status == ESI_VC_PENDING) {
  1939. if (priv->maximum_unknown_frame_count
  1940. == 0)
  1941. entry->status =
  1942. ESI_FORWARD_DIRECT;
  1943. else {
  1944. entry->timestamp = jiffies;
  1945. entry->status =
  1946. ESI_FLUSH_PENDING;
  1947. #if 0
  1948. send_to_lecd(priv, l_flush_xmt,
  1949. NULL,
  1950. entry->atm_addr,
  1951. NULL);
  1952. #endif
  1953. }
  1954. } else {
  1955. /*
  1956. * They were forming a connection
  1957. * to us, and we to them. Our
  1958. * ATM address is numerically lower
  1959. * than theirs, so we make connection
  1960. * we formed into default VCC (8.1.11).
  1961. * Connection they made gets torn
  1962. * down. This might confuse some
  1963. * clients. Can be changed if
  1964. * someone reports trouble...
  1965. */
  1966. ;
  1967. }
  1968. }
  1969. }
  1970. }
  1971. if (found_entry) {
  1972. pr_debug("After vcc was added\n");
  1973. dump_arp_table(priv);
  1974. goto out;
  1975. }
  1976. /*
  1977. * Not found, snatch address from first data packet that arrives
  1978. * from this vcc
  1979. */
  1980. entry = make_entry(priv, bus_mac);
  1981. if (!entry)
  1982. goto out;
  1983. entry->vcc = vcc;
  1984. entry->old_push = old_push;
  1985. memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
  1986. memset(entry->mac_addr, 0, ETH_ALEN);
  1987. entry->status = ESI_UNKNOWN;
  1988. hlist_add_head(&entry->next, &priv->lec_arp_empty_ones);
  1989. entry->timer.expires = jiffies + priv->vcc_timeout_period;
  1990. entry->timer.function = lec_arp_expire_vcc;
  1991. add_timer(&entry->timer);
  1992. pr_debug("After vcc was added\n");
  1993. dump_arp_table(priv);
  1994. out:
  1995. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  1996. }
  1997. static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
  1998. {
  1999. unsigned long flags;
  2000. struct hlist_node *node;
  2001. struct lec_arp_table *entry;
  2002. int i;
  2003. pr_debug("%lx\n", tran_id);
  2004. restart:
  2005. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  2006. for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
  2007. hlist_for_each_entry(entry, node,
  2008. &priv->lec_arp_tables[i], next) {
  2009. if (entry->flush_tran_id == tran_id &&
  2010. entry->status == ESI_FLUSH_PENDING) {
  2011. struct sk_buff *skb;
  2012. struct atm_vcc *vcc = entry->vcc;
  2013. lec_arp_hold(entry);
  2014. spin_unlock_irqrestore(&priv->lec_arp_lock,
  2015. flags);
  2016. while ((skb = skb_dequeue(&entry->tx_wait)))
  2017. lec_send(vcc, skb);
  2018. entry->last_used = jiffies;
  2019. entry->status = ESI_FORWARD_DIRECT;
  2020. lec_arp_put(entry);
  2021. pr_debug("LEC_ARP: Flushed\n");
  2022. goto restart;
  2023. }
  2024. }
  2025. }
  2026. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  2027. dump_arp_table(priv);
  2028. }
  2029. static void
  2030. lec_set_flush_tran_id(struct lec_priv *priv,
  2031. const unsigned char *atm_addr, unsigned long tran_id)
  2032. {
  2033. unsigned long flags;
  2034. struct hlist_node *node;
  2035. struct lec_arp_table *entry;
  2036. int i;
  2037. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  2038. for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
  2039. hlist_for_each_entry(entry, node,
  2040. &priv->lec_arp_tables[i], next) {
  2041. if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) {
  2042. entry->flush_tran_id = tran_id;
  2043. pr_debug("Set flush transaction id to %lx for %p\n",
  2044. tran_id, entry);
  2045. }
  2046. }
  2047. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  2048. }
  2049. static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc)
  2050. {
  2051. unsigned long flags;
  2052. unsigned char mac_addr[] = {
  2053. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  2054. };
  2055. struct lec_arp_table *to_add;
  2056. struct lec_vcc_priv *vpriv;
  2057. int err = 0;
  2058. vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
  2059. if (!vpriv)
  2060. return -ENOMEM;
  2061. vpriv->xoff = 0;
  2062. vpriv->old_pop = vcc->pop;
  2063. vcc->user_back = vpriv;
  2064. vcc->pop = lec_pop;
  2065. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  2066. to_add = make_entry(priv, mac_addr);
  2067. if (!to_add) {
  2068. vcc->pop = vpriv->old_pop;
  2069. kfree(vpriv);
  2070. err = -ENOMEM;
  2071. goto out;
  2072. }
  2073. memcpy(to_add->atm_addr, vcc->remote.sas_addr.prv, ATM_ESA_LEN);
  2074. to_add->status = ESI_FORWARD_DIRECT;
  2075. to_add->flags |= LEC_PERMANENT_FLAG;
  2076. to_add->vcc = vcc;
  2077. to_add->old_push = vcc->push;
  2078. vcc->push = lec_push;
  2079. priv->mcast_vcc = vcc;
  2080. lec_arp_add(priv, to_add);
  2081. out:
  2082. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  2083. return err;
  2084. }
  2085. static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
  2086. {
  2087. unsigned long flags;
  2088. struct hlist_node *node, *next;
  2089. struct lec_arp_table *entry;
  2090. int i;
  2091. pr_debug("LEC_ARP: lec_vcc_close vpi:%d vci:%d\n", vcc->vpi, vcc->vci);
  2092. dump_arp_table(priv);
  2093. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  2094. for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
  2095. hlist_for_each_entry_safe(entry, node, next,
  2096. &priv->lec_arp_tables[i], next) {
  2097. if (vcc == entry->vcc) {
  2098. lec_arp_remove(priv, entry);
  2099. lec_arp_put(entry);
  2100. if (priv->mcast_vcc == vcc)
  2101. priv->mcast_vcc = NULL;
  2102. }
  2103. }
  2104. }
  2105. hlist_for_each_entry_safe(entry, node, next,
  2106. &priv->lec_arp_empty_ones, next) {
  2107. if (entry->vcc == vcc) {
  2108. lec_arp_clear_vccs(entry);
  2109. del_timer(&entry->timer);
  2110. hlist_del(&entry->next);
  2111. lec_arp_put(entry);
  2112. }
  2113. }
  2114. hlist_for_each_entry_safe(entry, node, next,
  2115. &priv->lec_no_forward, next) {
  2116. if (entry->recv_vcc == vcc) {
  2117. lec_arp_clear_vccs(entry);
  2118. del_timer(&entry->timer);
  2119. hlist_del(&entry->next);
  2120. lec_arp_put(entry);
  2121. }
  2122. }
  2123. hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) {
  2124. if (entry->recv_vcc == vcc) {
  2125. lec_arp_clear_vccs(entry);
  2126. /* No timer, LANEv2 7.1.20 and 2.3.5.3 */
  2127. hlist_del(&entry->next);
  2128. lec_arp_put(entry);
  2129. }
  2130. }
  2131. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  2132. dump_arp_table(priv);
  2133. }
  2134. static void
  2135. lec_arp_check_empties(struct lec_priv *priv,
  2136. struct atm_vcc *vcc, struct sk_buff *skb)
  2137. {
  2138. unsigned long flags;
  2139. struct hlist_node *node, *next;
  2140. struct lec_arp_table *entry, *tmp;
  2141. struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data;
  2142. unsigned char *src;
  2143. #ifdef CONFIG_TR
  2144. struct lecdatahdr_8025 *tr_hdr = (struct lecdatahdr_8025 *)skb->data;
  2145. if (priv->is_trdev)
  2146. src = tr_hdr->h_source;
  2147. else
  2148. #endif
  2149. src = hdr->h_source;
  2150. spin_lock_irqsave(&priv->lec_arp_lock, flags);
  2151. hlist_for_each_entry_safe(entry, node, next,
  2152. &priv->lec_arp_empty_ones, next) {
  2153. if (vcc == entry->vcc) {
  2154. del_timer(&entry->timer);
  2155. memcpy(entry->mac_addr, src, ETH_ALEN);
  2156. entry->status = ESI_FORWARD_DIRECT;
  2157. entry->last_used = jiffies;
  2158. /* We might have got an entry */
  2159. tmp = lec_arp_find(priv, src);
  2160. if (tmp) {
  2161. lec_arp_remove(priv, tmp);
  2162. lec_arp_put(tmp);
  2163. }
  2164. hlist_del(&entry->next);
  2165. lec_arp_add(priv, entry);
  2166. goto out;
  2167. }
  2168. }
  2169. pr_debug("LEC_ARP: Arp_check_empties: entry not found!\n");
  2170. out:
  2171. spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
  2172. }
  2173. MODULE_LICENSE("GPL");