tr.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /*
  2. * NET3: Token ring device handling subroutines
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Fixes: 3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes.
  10. * Added rif table to /proc/net/tr_rif and rif timeout to
  11. * /proc/sys/net/token-ring/rif_timeout.
  12. * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
  13. * tr_header and tr_type_trans to handle passing IPX SNAP and
  14. * 802.2 through the correct layers. Eliminated tr_reformat.
  15. *
  16. */
  17. #include <asm/uaccess.h>
  18. #include <asm/system.h>
  19. #include <linux/config.h>
  20. #include <linux/module.h>
  21. #include <linux/types.h>
  22. #include <linux/kernel.h>
  23. #include <linux/jiffies.h>
  24. #include <linux/string.h>
  25. #include <linux/mm.h>
  26. #include <linux/socket.h>
  27. #include <linux/in.h>
  28. #include <linux/inet.h>
  29. #include <linux/netdevice.h>
  30. #include <linux/trdevice.h>
  31. #include <linux/skbuff.h>
  32. #include <linux/errno.h>
  33. #include <linux/timer.h>
  34. #include <linux/net.h>
  35. #include <linux/proc_fs.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/init.h>
  38. #include <net/arp.h>
  39. static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev);
  40. static void rif_check_expire(unsigned long dummy);
  41. #define TR_SR_DEBUG 0
  42. /*
  43. * Each RIF entry we learn is kept this way
  44. */
  45. struct rif_cache {
  46. unsigned char addr[TR_ALEN];
  47. int iface;
  48. __be16 rcf;
  49. __be16 rseg[8];
  50. struct rif_cache *next;
  51. unsigned long last_used;
  52. unsigned char local_ring;
  53. };
  54. #define RIF_TABLE_SIZE 32
  55. /*
  56. * We hash the RIF cache 32 ways. We do after all have to look it
  57. * up a lot.
  58. */
  59. static struct rif_cache *rif_table[RIF_TABLE_SIZE];
  60. static DEFINE_SPINLOCK(rif_lock);
  61. /*
  62. * Garbage disposal timer.
  63. */
  64. static struct timer_list rif_timer;
  65. int sysctl_tr_rif_timeout = 60*10*HZ;
  66. static inline unsigned long rif_hash(const unsigned char *addr)
  67. {
  68. unsigned long x;
  69. x = addr[0];
  70. x = (x << 2) ^ addr[1];
  71. x = (x << 2) ^ addr[2];
  72. x = (x << 2) ^ addr[3];
  73. x = (x << 2) ^ addr[4];
  74. x = (x << 2) ^ addr[5];
  75. x ^= x >> 8;
  76. return x & (RIF_TABLE_SIZE - 1);
  77. }
  78. /*
  79. * Put the headers on a token ring packet. Token ring source routing
  80. * makes this a little more exciting than on ethernet.
  81. */
  82. static int tr_header(struct sk_buff *skb, struct net_device *dev,
  83. unsigned short type,
  84. void *daddr, void *saddr, unsigned len)
  85. {
  86. struct trh_hdr *trh;
  87. int hdr_len;
  88. /*
  89. * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
  90. * dev->hard_header directly.
  91. */
  92. if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
  93. {
  94. struct trllc *trllc;
  95. hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc);
  96. trh = (struct trh_hdr *)skb_push(skb, hdr_len);
  97. trllc = (struct trllc *)(trh+1);
  98. trllc->dsap = trllc->ssap = EXTENDED_SAP;
  99. trllc->llc = UI_CMD;
  100. trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00;
  101. trllc->ethertype = htons(type);
  102. }
  103. else
  104. {
  105. hdr_len = sizeof(struct trh_hdr);
  106. trh = (struct trh_hdr *)skb_push(skb, hdr_len);
  107. }
  108. trh->ac=AC;
  109. trh->fc=LLC_FRAME;
  110. if(saddr)
  111. memcpy(trh->saddr,saddr,dev->addr_len);
  112. else
  113. memcpy(trh->saddr,dev->dev_addr,dev->addr_len);
  114. /*
  115. * Build the destination and then source route the frame
  116. */
  117. if(daddr)
  118. {
  119. memcpy(trh->daddr,daddr,dev->addr_len);
  120. tr_source_route(skb,trh,dev);
  121. return(hdr_len);
  122. }
  123. return -hdr_len;
  124. }
  125. /*
  126. * A neighbour discovery of some species (eg arp) has completed. We
  127. * can now send the packet.
  128. */
  129. static int tr_rebuild_header(struct sk_buff *skb)
  130. {
  131. struct trh_hdr *trh=(struct trh_hdr *)skb->data;
  132. struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr));
  133. struct net_device *dev = skb->dev;
  134. /*
  135. * FIXME: We don't yet support IPv6 over token rings
  136. */
  137. if(trllc->ethertype != htons(ETH_P_IP)) {
  138. printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n",(unsigned int)htons(trllc->ethertype));
  139. return 0;
  140. }
  141. #ifdef CONFIG_INET
  142. if(arp_find(trh->daddr, skb)) {
  143. return 1;
  144. }
  145. else
  146. #endif
  147. {
  148. tr_source_route(skb,trh,dev);
  149. return 0;
  150. }
  151. }
  152. /*
  153. * Some of this is a bit hackish. We intercept RIF information
  154. * used for source routing. We also grab IP directly and don't feed
  155. * it via SNAP.
  156. */
  157. unsigned short tr_type_trans(struct sk_buff *skb, struct net_device *dev)
  158. {
  159. struct trh_hdr *trh=(struct trh_hdr *)skb->data;
  160. struct trllc *trllc;
  161. unsigned riflen=0;
  162. skb->mac.raw = skb->data;
  163. if(trh->saddr[0] & TR_RII)
  164. riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
  165. trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
  166. skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
  167. if(*trh->daddr & 0x80)
  168. {
  169. if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN))
  170. skb->pkt_type=PACKET_BROADCAST;
  171. else
  172. skb->pkt_type=PACKET_MULTICAST;
  173. }
  174. else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E))
  175. {
  176. skb->pkt_type=PACKET_MULTICAST;
  177. }
  178. else if(dev->flags & IFF_PROMISC)
  179. {
  180. if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN))
  181. skb->pkt_type=PACKET_OTHERHOST;
  182. }
  183. if ((skb->pkt_type != PACKET_BROADCAST) &&
  184. (skb->pkt_type != PACKET_MULTICAST))
  185. tr_add_rif_info(trh,dev) ;
  186. /*
  187. * Strip the SNAP header from ARP packets since we don't
  188. * pass them through to the 802.2/SNAP layers.
  189. */
  190. if (trllc->dsap == EXTENDED_SAP &&
  191. (trllc->ethertype == ntohs(ETH_P_IP) ||
  192. trllc->ethertype == ntohs(ETH_P_IPV6) ||
  193. trllc->ethertype == ntohs(ETH_P_ARP)))
  194. {
  195. skb_pull(skb, sizeof(struct trllc));
  196. return trllc->ethertype;
  197. }
  198. return ntohs(ETH_P_TR_802_2);
  199. }
  200. /*
  201. * We try to do source routing...
  202. */
  203. void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device *dev)
  204. {
  205. int slack;
  206. unsigned int hash;
  207. struct rif_cache *entry;
  208. unsigned char *olddata;
  209. unsigned long flags;
  210. static const unsigned char mcast_func_addr[]
  211. = {0xC0,0x00,0x00,0x04,0x00,0x00};
  212. spin_lock_irqsave(&rif_lock, flags);
  213. /*
  214. * Broadcasts are single route as stated in RFC 1042
  215. */
  216. if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) ||
  217. (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN)) )
  218. {
  219. trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
  220. | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
  221. trh->saddr[0]|=TR_RII;
  222. }
  223. else
  224. {
  225. hash = rif_hash(trh->daddr);
  226. /*
  227. * Walk the hash table and look for an entry
  228. */
  229. for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next);
  230. /*
  231. * If we found an entry we can route the frame.
  232. */
  233. if(entry)
  234. {
  235. #if TR_SR_DEBUG
  236. printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
  237. trh->daddr[1],trh->daddr[2],trh->daddr[3],trh->daddr[4],trh->daddr[5]);
  238. #endif
  239. if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8)
  240. {
  241. trh->rcf=entry->rcf;
  242. memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short));
  243. trh->rcf^=htons(TR_RCF_DIR_BIT);
  244. trh->rcf&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */
  245. trh->saddr[0]|=TR_RII;
  246. #if TR_SR_DEBUG
  247. printk("entry found with rcf %04x\n", entry->rcf);
  248. }
  249. else
  250. {
  251. printk("entry found but without rcf length, local=%02x\n", entry->local_ring);
  252. #endif
  253. }
  254. entry->last_used=jiffies;
  255. }
  256. else
  257. {
  258. /*
  259. * Without the information we simply have to shout
  260. * on the wire. The replies should rapidly clean this
  261. * situation up.
  262. */
  263. trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
  264. | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
  265. trh->saddr[0]|=TR_RII;
  266. #if TR_SR_DEBUG
  267. printk("no entry in rif table found - broadcasting frame\n");
  268. #endif
  269. }
  270. }
  271. /* Compress the RIF here so we don't have to do it in the driver(s) */
  272. if (!(trh->saddr[0] & 0x80))
  273. slack = 18;
  274. else
  275. slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
  276. olddata = skb->data;
  277. spin_unlock_irqrestore(&rif_lock, flags);
  278. skb_pull(skb, slack);
  279. memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
  280. }
  281. /*
  282. * We have learned some new RIF information for our source
  283. * routing.
  284. */
  285. static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
  286. {
  287. unsigned int hash, rii_p = 0;
  288. unsigned long flags;
  289. struct rif_cache *entry;
  290. unsigned char saddr0;
  291. spin_lock_irqsave(&rif_lock, flags);
  292. saddr0 = trh->saddr[0];
  293. /*
  294. * Firstly see if the entry exists
  295. */
  296. if(trh->saddr[0] & TR_RII)
  297. {
  298. trh->saddr[0]&=0x7f;
  299. if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2)
  300. {
  301. rii_p = 1;
  302. }
  303. }
  304. hash = rif_hash(trh->saddr);
  305. for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next);
  306. if(entry==NULL)
  307. {
  308. #if TR_SR_DEBUG
  309. printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
  310. trh->saddr[0],trh->saddr[1],trh->saddr[2],
  311. trh->saddr[3],trh->saddr[4],trh->saddr[5],
  312. ntohs(trh->rcf));
  313. #endif
  314. /*
  315. * Allocate our new entry. A failure to allocate loses
  316. * use the information. This is harmless.
  317. *
  318. * FIXME: We ought to keep some kind of cache size
  319. * limiting and adjust the timers to suit.
  320. */
  321. entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
  322. if(!entry)
  323. {
  324. printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
  325. spin_unlock_irqrestore(&rif_lock, flags);
  326. return;
  327. }
  328. memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);
  329. entry->iface = dev->ifindex;
  330. entry->next=rif_table[hash];
  331. entry->last_used=jiffies;
  332. rif_table[hash]=entry;
  333. if (rii_p)
  334. {
  335. entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
  336. memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
  337. entry->local_ring = 0;
  338. }
  339. else
  340. {
  341. entry->local_ring = 1;
  342. }
  343. }
  344. else /* Y. Tahara added */
  345. {
  346. /*
  347. * Update existing entries
  348. */
  349. if (!entry->local_ring)
  350. if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) &&
  351. !(trh->rcf & htons(TR_RCF_BROADCAST_MASK)))
  352. {
  353. #if TR_SR_DEBUG
  354. printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
  355. trh->saddr[0],trh->saddr[1],trh->saddr[2],
  356. trh->saddr[3],trh->saddr[4],trh->saddr[5],
  357. ntohs(trh->rcf));
  358. #endif
  359. entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
  360. memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
  361. }
  362. entry->last_used=jiffies;
  363. }
  364. trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */
  365. spin_unlock_irqrestore(&rif_lock, flags);
  366. }
  367. /*
  368. * Scan the cache with a timer and see what we need to throw out.
  369. */
  370. static void rif_check_expire(unsigned long dummy)
  371. {
  372. int i;
  373. unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
  374. spin_lock_irqsave(&rif_lock, flags);
  375. for(i =0; i < RIF_TABLE_SIZE; i++) {
  376. struct rif_cache *entry, **pentry;
  377. pentry = rif_table+i;
  378. while((entry=*pentry) != NULL) {
  379. unsigned long expires
  380. = entry->last_used + sysctl_tr_rif_timeout;
  381. if (time_before_eq(expires, jiffies)) {
  382. *pentry = entry->next;
  383. kfree(entry);
  384. } else {
  385. pentry = &entry->next;
  386. if (time_before(expires, next_interval))
  387. next_interval = expires;
  388. }
  389. }
  390. }
  391. spin_unlock_irqrestore(&rif_lock, flags);
  392. mod_timer(&rif_timer, next_interval);
  393. }
  394. /*
  395. * Generate the /proc/net information for the token ring RIF
  396. * routing.
  397. */
  398. #ifdef CONFIG_PROC_FS
  399. static struct rif_cache *rif_get_idx(loff_t pos)
  400. {
  401. int i;
  402. struct rif_cache *entry;
  403. loff_t off = 0;
  404. for(i = 0; i < RIF_TABLE_SIZE; i++)
  405. for(entry = rif_table[i]; entry; entry = entry->next) {
  406. if (off == pos)
  407. return entry;
  408. ++off;
  409. }
  410. return NULL;
  411. }
  412. static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
  413. {
  414. spin_lock_irq(&rif_lock);
  415. return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN;
  416. }
  417. static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  418. {
  419. int i;
  420. struct rif_cache *ent = v;
  421. ++*pos;
  422. if (v == SEQ_START_TOKEN) {
  423. i = -1;
  424. goto scan;
  425. }
  426. if (ent->next)
  427. return ent->next;
  428. i = rif_hash(ent->addr);
  429. scan:
  430. while (++i < RIF_TABLE_SIZE) {
  431. if ((ent = rif_table[i]) != NULL)
  432. return ent;
  433. }
  434. return NULL;
  435. }
  436. static void rif_seq_stop(struct seq_file *seq, void *v)
  437. {
  438. spin_unlock_irq(&rif_lock);
  439. }
  440. static int rif_seq_show(struct seq_file *seq, void *v)
  441. {
  442. int j, rcf_len, segment, brdgnmb;
  443. struct rif_cache *entry = v;
  444. if (v == SEQ_START_TOKEN)
  445. seq_puts(seq,
  446. "if TR address TTL rcf routing segments\n");
  447. else {
  448. struct net_device *dev = dev_get_by_index(entry->iface);
  449. long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout)
  450. - (long) jiffies;
  451. seq_printf(seq, "%s %02X:%02X:%02X:%02X:%02X:%02X %7li ",
  452. dev?dev->name:"?",
  453. entry->addr[0],entry->addr[1],entry->addr[2],
  454. entry->addr[3],entry->addr[4],entry->addr[5],
  455. ttl/HZ);
  456. if (entry->local_ring)
  457. seq_puts(seq, "local\n");
  458. else {
  459. seq_printf(seq, "%04X", ntohs(entry->rcf));
  460. rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2;
  461. if (rcf_len)
  462. rcf_len >>= 1;
  463. for(j = 1; j < rcf_len; j++) {
  464. if(j==1) {
  465. segment=ntohs(entry->rseg[j-1])>>4;
  466. seq_printf(seq," %03X",segment);
  467. };
  468. segment=ntohs(entry->rseg[j])>>4;
  469. brdgnmb=ntohs(entry->rseg[j-1])&0x00f;
  470. seq_printf(seq,"-%01X-%03X",brdgnmb,segment);
  471. }
  472. seq_putc(seq, '\n');
  473. }
  474. }
  475. return 0;
  476. }
  477. static struct seq_operations rif_seq_ops = {
  478. .start = rif_seq_start,
  479. .next = rif_seq_next,
  480. .stop = rif_seq_stop,
  481. .show = rif_seq_show,
  482. };
  483. static int rif_seq_open(struct inode *inode, struct file *file)
  484. {
  485. return seq_open(file, &rif_seq_ops);
  486. }
  487. static struct file_operations rif_seq_fops = {
  488. .owner = THIS_MODULE,
  489. .open = rif_seq_open,
  490. .read = seq_read,
  491. .llseek = seq_lseek,
  492. .release = seq_release,
  493. };
  494. #endif
  495. static void tr_setup(struct net_device *dev)
  496. {
  497. /*
  498. * Configure and register
  499. */
  500. dev->hard_header = tr_header;
  501. dev->rebuild_header = tr_rebuild_header;
  502. dev->type = ARPHRD_IEEE802_TR;
  503. dev->hard_header_len = TR_HLEN;
  504. dev->mtu = 2000;
  505. dev->addr_len = TR_ALEN;
  506. dev->tx_queue_len = 100; /* Long queues on tr */
  507. memset(dev->broadcast,0xFF, TR_ALEN);
  508. /* New-style flags. */
  509. dev->flags = IFF_BROADCAST | IFF_MULTICAST ;
  510. }
  511. /**
  512. * alloc_trdev - Register token ring device
  513. * @sizeof_priv: Size of additional driver-private structure to be allocated
  514. * for this token ring device
  515. *
  516. * Fill in the fields of the device structure with token ring-generic values.
  517. *
  518. * Constructs a new net device, complete with a private data area of
  519. * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
  520. * this private data area.
  521. */
  522. struct net_device *alloc_trdev(int sizeof_priv)
  523. {
  524. return alloc_netdev(sizeof_priv, "tr%d", tr_setup);
  525. }
  526. /*
  527. * Called during bootup. We don't actually have to initialise
  528. * too much for this.
  529. */
  530. static int __init rif_init(void)
  531. {
  532. init_timer(&rif_timer);
  533. rif_timer.expires = sysctl_tr_rif_timeout;
  534. rif_timer.data = 0L;
  535. rif_timer.function = rif_check_expire;
  536. add_timer(&rif_timer);
  537. proc_net_fops_create("tr_rif", S_IRUGO, &rif_seq_fops);
  538. return 0;
  539. }
  540. module_init(rif_init);
  541. EXPORT_SYMBOL(tr_source_route);
  542. EXPORT_SYMBOL(tr_type_trans);
  543. EXPORT_SYMBOL(alloc_trdev);