bond_alb.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747
  1. /*
  2. * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License as published by the
  6. * Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  11. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  12. * for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along
  15. * with this program; if not, write to the Free Software Foundation, Inc.,
  16. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * The full GNU General Public License is included in this distribution in the
  19. * file called LICENSE.
  20. *
  21. */
  22. //#define BONDING_DEBUG 1
  23. #include <linux/skbuff.h>
  24. #include <linux/netdevice.h>
  25. #include <linux/etherdevice.h>
  26. #include <linux/pkt_sched.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/slab.h>
  29. #include <linux/timer.h>
  30. #include <linux/ip.h>
  31. #include <linux/ipv6.h>
  32. #include <linux/if_arp.h>
  33. #include <linux/if_ether.h>
  34. #include <linux/if_bonding.h>
  35. #include <linux/if_vlan.h>
  36. #include <linux/in.h>
  37. #include <net/ipx.h>
  38. #include <net/arp.h>
  39. #include <asm/byteorder.h>
  40. #include "bonding.h"
  41. #include "bond_alb.h"
  42. #define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */
  43. #define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing.
  44. * Used for division - never set
  45. * to zero !!!
  46. */
  47. #define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of
  48. * learning packets to the switch
  49. */
  50. #define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
  51. * ALB_TIMER_TICKS_PER_SEC)
  52. #define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \
  53. * ALB_TIMER_TICKS_PER_SEC)
  54. #define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
  55. * Note that this value MUST NOT be smaller
  56. * because the key hash table is BYTE wide !
  57. */
  58. #define TLB_NULL_INDEX 0xffffffff
  59. #define MAX_LP_BURST 3
  60. /* rlb defs */
  61. #define RLB_HASH_TABLE_SIZE 256
  62. #define RLB_NULL_INDEX 0xffffffff
  63. #define RLB_UPDATE_DELAY 2*ALB_TIMER_TICKS_PER_SEC /* 2 seconds */
  64. #define RLB_ARP_BURST_SIZE 2
  65. #define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb
  66. * rebalance interval (5 min).
  67. */
  68. /* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is
  69. * promiscuous after failover
  70. */
  71. #define RLB_PROMISC_TIMEOUT 10*ALB_TIMER_TICKS_PER_SEC
  72. static const u8 mac_bcast[ETH_ALEN] = {0xff,0xff,0xff,0xff,0xff,0xff};
  73. static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
  74. #pragma pack(1)
  75. struct learning_pkt {
  76. u8 mac_dst[ETH_ALEN];
  77. u8 mac_src[ETH_ALEN];
  78. __be16 type;
  79. u8 padding[ETH_ZLEN - ETH_HLEN];
  80. };
  81. struct arp_pkt {
  82. __be16 hw_addr_space;
  83. __be16 prot_addr_space;
  84. u8 hw_addr_len;
  85. u8 prot_addr_len;
  86. __be16 op_code;
  87. u8 mac_src[ETH_ALEN]; /* sender hardware address */
  88. __be32 ip_src; /* sender IP address */
  89. u8 mac_dst[ETH_ALEN]; /* target hardware address */
  90. __be32 ip_dst; /* target IP address */
  91. };
  92. #pragma pack()
  93. static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
  94. {
  95. return (struct arp_pkt *)skb_network_header(skb);
  96. }
  97. /* Forward declaration */
  98. static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
  99. static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
  100. {
  101. int i;
  102. u8 hash = 0;
  103. for (i = 0; i < hash_size; i++) {
  104. hash ^= hash_start[i];
  105. }
  106. return hash;
  107. }
  108. /*********************** tlb specific functions ***************************/
  109. static inline void _lock_tx_hashtbl(struct bonding *bond)
  110. {
  111. spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
  112. }
  113. static inline void _unlock_tx_hashtbl(struct bonding *bond)
  114. {
  115. spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
  116. }
  117. /* Caller must hold tx_hashtbl lock */
  118. static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load)
  119. {
  120. if (save_load) {
  121. entry->load_history = 1 + entry->tx_bytes /
  122. BOND_TLB_REBALANCE_INTERVAL;
  123. entry->tx_bytes = 0;
  124. }
  125. entry->tx_slave = NULL;
  126. entry->next = TLB_NULL_INDEX;
  127. entry->prev = TLB_NULL_INDEX;
  128. }
  129. static inline void tlb_init_slave(struct slave *slave)
  130. {
  131. SLAVE_TLB_INFO(slave).load = 0;
  132. SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX;
  133. }
  134. /* Caller must hold bond lock for read */
  135. static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load)
  136. {
  137. struct tlb_client_info *tx_hash_table;
  138. u32 index;
  139. _lock_tx_hashtbl(bond);
  140. /* clear slave from tx_hashtbl */
  141. tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
  142. index = SLAVE_TLB_INFO(slave).head;
  143. while (index != TLB_NULL_INDEX) {
  144. u32 next_index = tx_hash_table[index].next;
  145. tlb_init_table_entry(&tx_hash_table[index], save_load);
  146. index = next_index;
  147. }
  148. tlb_init_slave(slave);
  149. _unlock_tx_hashtbl(bond);
  150. }
  151. /* Must be called before starting the monitor timer */
  152. static int tlb_initialize(struct bonding *bond)
  153. {
  154. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  155. int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info);
  156. struct tlb_client_info *new_hashtbl;
  157. int i;
  158. spin_lock_init(&(bond_info->tx_hashtbl_lock));
  159. new_hashtbl = kzalloc(size, GFP_KERNEL);
  160. if (!new_hashtbl) {
  161. printk(KERN_ERR DRV_NAME
  162. ": %s: Error: Failed to allocate TLB hash table\n",
  163. bond->dev->name);
  164. return -1;
  165. }
  166. _lock_tx_hashtbl(bond);
  167. bond_info->tx_hashtbl = new_hashtbl;
  168. for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
  169. tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1);
  170. }
  171. _unlock_tx_hashtbl(bond);
  172. return 0;
  173. }
  174. /* Must be called only after all slaves have been released */
  175. static void tlb_deinitialize(struct bonding *bond)
  176. {
  177. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  178. _lock_tx_hashtbl(bond);
  179. kfree(bond_info->tx_hashtbl);
  180. bond_info->tx_hashtbl = NULL;
  181. _unlock_tx_hashtbl(bond);
  182. }
  183. /* Caller must hold bond lock for read */
  184. static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
  185. {
  186. struct slave *slave, *least_loaded;
  187. s64 max_gap;
  188. int i, found = 0;
  189. /* Find the first enabled slave */
  190. bond_for_each_slave(bond, slave, i) {
  191. if (SLAVE_IS_OK(slave)) {
  192. found = 1;
  193. break;
  194. }
  195. }
  196. if (!found) {
  197. return NULL;
  198. }
  199. least_loaded = slave;
  200. max_gap = (s64)(slave->speed << 20) - /* Convert to Megabit per sec */
  201. (s64)(SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
  202. /* Find the slave with the largest gap */
  203. bond_for_each_slave_from(bond, slave, i, least_loaded) {
  204. if (SLAVE_IS_OK(slave)) {
  205. s64 gap = (s64)(slave->speed << 20) -
  206. (s64)(SLAVE_TLB_INFO(slave).load << 3);
  207. if (max_gap < gap) {
  208. least_loaded = slave;
  209. max_gap = gap;
  210. }
  211. }
  212. }
  213. return least_loaded;
  214. }
  215. /* Caller must hold bond lock for read */
  216. static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u32 skb_len)
  217. {
  218. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  219. struct tlb_client_info *hash_table;
  220. struct slave *assigned_slave;
  221. _lock_tx_hashtbl(bond);
  222. hash_table = bond_info->tx_hashtbl;
  223. assigned_slave = hash_table[hash_index].tx_slave;
  224. if (!assigned_slave) {
  225. assigned_slave = tlb_get_least_loaded_slave(bond);
  226. if (assigned_slave) {
  227. struct tlb_slave_info *slave_info =
  228. &(SLAVE_TLB_INFO(assigned_slave));
  229. u32 next_index = slave_info->head;
  230. hash_table[hash_index].tx_slave = assigned_slave;
  231. hash_table[hash_index].next = next_index;
  232. hash_table[hash_index].prev = TLB_NULL_INDEX;
  233. if (next_index != TLB_NULL_INDEX) {
  234. hash_table[next_index].prev = hash_index;
  235. }
  236. slave_info->head = hash_index;
  237. slave_info->load +=
  238. hash_table[hash_index].load_history;
  239. }
  240. }
  241. if (assigned_slave) {
  242. hash_table[hash_index].tx_bytes += skb_len;
  243. }
  244. _unlock_tx_hashtbl(bond);
  245. return assigned_slave;
  246. }
  247. /*********************** rlb specific functions ***************************/
  248. static inline void _lock_rx_hashtbl(struct bonding *bond)
  249. {
  250. spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
  251. }
  252. static inline void _unlock_rx_hashtbl(struct bonding *bond)
  253. {
  254. spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
  255. }
  256. /* when an ARP REPLY is received from a client update its info
  257. * in the rx_hashtbl
  258. */
  259. static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
  260. {
  261. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  262. struct rlb_client_info *client_info;
  263. u32 hash_index;
  264. _lock_rx_hashtbl(bond);
  265. hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
  266. client_info = &(bond_info->rx_hashtbl[hash_index]);
  267. if ((client_info->assigned) &&
  268. (client_info->ip_src == arp->ip_dst) &&
  269. (client_info->ip_dst == arp->ip_src)) {
  270. /* update the clients MAC address */
  271. memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN);
  272. client_info->ntt = 1;
  273. bond_info->rx_ntt = 1;
  274. }
  275. _unlock_rx_hashtbl(bond);
  276. }
  277. static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype, struct net_device *orig_dev)
  278. {
  279. struct bonding *bond = bond_dev->priv;
  280. struct arp_pkt *arp = (struct arp_pkt *)skb->data;
  281. int res = NET_RX_DROP;
  282. if (bond_dev->nd_net != &init_net)
  283. goto out;
  284. if (!(bond_dev->flags & IFF_MASTER))
  285. goto out;
  286. if (!arp) {
  287. dprintk("Packet has no ARP data\n");
  288. goto out;
  289. }
  290. if (skb->len < sizeof(struct arp_pkt)) {
  291. dprintk("Packet is too small to be an ARP\n");
  292. goto out;
  293. }
  294. if (arp->op_code == htons(ARPOP_REPLY)) {
  295. /* update rx hash table for this ARP */
  296. rlb_update_entry_from_arp(bond, arp);
  297. dprintk("Server received an ARP Reply from client\n");
  298. }
  299. res = NET_RX_SUCCESS;
  300. out:
  301. dev_kfree_skb(skb);
  302. return res;
  303. }
  304. /* Caller must hold bond lock for read */
  305. static struct slave *rlb_next_rx_slave(struct bonding *bond)
  306. {
  307. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  308. struct slave *rx_slave, *slave, *start_at;
  309. int i = 0;
  310. if (bond_info->next_rx_slave) {
  311. start_at = bond_info->next_rx_slave;
  312. } else {
  313. start_at = bond->first_slave;
  314. }
  315. rx_slave = NULL;
  316. bond_for_each_slave_from(bond, slave, i, start_at) {
  317. if (SLAVE_IS_OK(slave)) {
  318. if (!rx_slave) {
  319. rx_slave = slave;
  320. } else if (slave->speed > rx_slave->speed) {
  321. rx_slave = slave;
  322. }
  323. }
  324. }
  325. if (rx_slave) {
  326. bond_info->next_rx_slave = rx_slave->next;
  327. }
  328. return rx_slave;
  329. }
  330. /* teach the switch the mac of a disabled slave
  331. * on the primary for fault tolerance
  332. *
  333. * Caller must hold bond->curr_slave_lock for write or bond lock for write
  334. */
  335. static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
  336. {
  337. if (!bond->curr_active_slave) {
  338. return;
  339. }
  340. if (!bond->alb_info.primary_is_promisc) {
  341. bond->alb_info.primary_is_promisc = 1;
  342. dev_set_promiscuity(bond->curr_active_slave->dev, 1);
  343. }
  344. bond->alb_info.rlb_promisc_timeout_counter = 0;
  345. alb_send_learning_packets(bond->curr_active_slave, addr);
  346. }
  347. /* slave being removed should not be active at this point
  348. *
  349. * Caller must hold bond lock for read
  350. */
  351. static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
  352. {
  353. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  354. struct rlb_client_info *rx_hash_table;
  355. u32 index, next_index;
  356. /* clear slave from rx_hashtbl */
  357. _lock_rx_hashtbl(bond);
  358. rx_hash_table = bond_info->rx_hashtbl;
  359. index = bond_info->rx_hashtbl_head;
  360. for (; index != RLB_NULL_INDEX; index = next_index) {
  361. next_index = rx_hash_table[index].next;
  362. if (rx_hash_table[index].slave == slave) {
  363. struct slave *assigned_slave = rlb_next_rx_slave(bond);
  364. if (assigned_slave) {
  365. rx_hash_table[index].slave = assigned_slave;
  366. if (memcmp(rx_hash_table[index].mac_dst,
  367. mac_bcast, ETH_ALEN)) {
  368. bond_info->rx_hashtbl[index].ntt = 1;
  369. bond_info->rx_ntt = 1;
  370. /* A slave has been removed from the
  371. * table because it is either disabled
  372. * or being released. We must retry the
  373. * update to avoid clients from not
  374. * being updated & disconnecting when
  375. * there is stress
  376. */
  377. bond_info->rlb_update_retry_counter =
  378. RLB_UPDATE_RETRY;
  379. }
  380. } else { /* there is no active slave */
  381. rx_hash_table[index].slave = NULL;
  382. }
  383. }
  384. }
  385. _unlock_rx_hashtbl(bond);
  386. write_lock_bh(&bond->curr_slave_lock);
  387. if (slave != bond->curr_active_slave) {
  388. rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
  389. }
  390. write_unlock_bh(&bond->curr_slave_lock);
  391. }
  392. static void rlb_update_client(struct rlb_client_info *client_info)
  393. {
  394. int i;
  395. if (!client_info->slave) {
  396. return;
  397. }
  398. for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
  399. struct sk_buff *skb;
  400. skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
  401. client_info->ip_dst,
  402. client_info->slave->dev,
  403. client_info->ip_src,
  404. client_info->mac_dst,
  405. client_info->slave->dev->dev_addr,
  406. client_info->mac_dst);
  407. if (!skb) {
  408. printk(KERN_ERR DRV_NAME
  409. ": %s: Error: failed to create an ARP packet\n",
  410. client_info->slave->dev->master->name);
  411. continue;
  412. }
  413. skb->dev = client_info->slave->dev;
  414. if (client_info->tag) {
  415. skb = vlan_put_tag(skb, client_info->vlan_id);
  416. if (!skb) {
  417. printk(KERN_ERR DRV_NAME
  418. ": %s: Error: failed to insert VLAN tag\n",
  419. client_info->slave->dev->master->name);
  420. continue;
  421. }
  422. }
  423. arp_xmit(skb);
  424. }
  425. }
  426. /* sends ARP REPLIES that update the clients that need updating */
  427. static void rlb_update_rx_clients(struct bonding *bond)
  428. {
  429. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  430. struct rlb_client_info *client_info;
  431. u32 hash_index;
  432. _lock_rx_hashtbl(bond);
  433. hash_index = bond_info->rx_hashtbl_head;
  434. for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
  435. client_info = &(bond_info->rx_hashtbl[hash_index]);
  436. if (client_info->ntt) {
  437. rlb_update_client(client_info);
  438. if (bond_info->rlb_update_retry_counter == 0) {
  439. client_info->ntt = 0;
  440. }
  441. }
  442. }
  443. /* do not update the entries again untill this counter is zero so that
  444. * not to confuse the clients.
  445. */
  446. bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY;
  447. _unlock_rx_hashtbl(bond);
  448. }
  449. /* The slave was assigned a new mac address - update the clients */
  450. static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *slave)
  451. {
  452. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  453. struct rlb_client_info *client_info;
  454. int ntt = 0;
  455. u32 hash_index;
  456. _lock_rx_hashtbl(bond);
  457. hash_index = bond_info->rx_hashtbl_head;
  458. for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
  459. client_info = &(bond_info->rx_hashtbl[hash_index]);
  460. if ((client_info->slave == slave) &&
  461. memcmp(client_info->mac_dst, mac_bcast, ETH_ALEN)) {
  462. client_info->ntt = 1;
  463. ntt = 1;
  464. }
  465. }
  466. // update the team's flag only after the whole iteration
  467. if (ntt) {
  468. bond_info->rx_ntt = 1;
  469. //fasten the change
  470. bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
  471. }
  472. _unlock_rx_hashtbl(bond);
  473. }
  474. /* mark all clients using src_ip to be updated */
  475. static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
  476. {
  477. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  478. struct rlb_client_info *client_info;
  479. u32 hash_index;
  480. _lock_rx_hashtbl(bond);
  481. hash_index = bond_info->rx_hashtbl_head;
  482. for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
  483. client_info = &(bond_info->rx_hashtbl[hash_index]);
  484. if (!client_info->slave) {
  485. printk(KERN_ERR DRV_NAME
  486. ": %s: Error: found a client with no channel in "
  487. "the client's hash table\n",
  488. bond->dev->name);
  489. continue;
  490. }
  491. /*update all clients using this src_ip, that are not assigned
  492. * to the team's address (curr_active_slave) and have a known
  493. * unicast mac address.
  494. */
  495. if ((client_info->ip_src == src_ip) &&
  496. memcmp(client_info->slave->dev->dev_addr,
  497. bond->dev->dev_addr, ETH_ALEN) &&
  498. memcmp(client_info->mac_dst, mac_bcast, ETH_ALEN)) {
  499. client_info->ntt = 1;
  500. bond_info->rx_ntt = 1;
  501. }
  502. }
  503. _unlock_rx_hashtbl(bond);
  504. }
  505. /* Caller must hold both bond and ptr locks for read */
  506. static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
  507. {
  508. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  509. struct arp_pkt *arp = arp_pkt(skb);
  510. struct slave *assigned_slave;
  511. struct rlb_client_info *client_info;
  512. u32 hash_index = 0;
  513. _lock_rx_hashtbl(bond);
  514. hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_src));
  515. client_info = &(bond_info->rx_hashtbl[hash_index]);
  516. if (client_info->assigned) {
  517. if ((client_info->ip_src == arp->ip_src) &&
  518. (client_info->ip_dst == arp->ip_dst)) {
  519. /* the entry is already assigned to this client */
  520. if (memcmp(arp->mac_dst, mac_bcast, ETH_ALEN)) {
  521. /* update mac address from arp */
  522. memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
  523. }
  524. assigned_slave = client_info->slave;
  525. if (assigned_slave) {
  526. _unlock_rx_hashtbl(bond);
  527. return assigned_slave;
  528. }
  529. } else {
  530. /* the entry is already assigned to some other client,
  531. * move the old client to primary (curr_active_slave) so
  532. * that the new client can be assigned to this entry.
  533. */
  534. if (bond->curr_active_slave &&
  535. client_info->slave != bond->curr_active_slave) {
  536. client_info->slave = bond->curr_active_slave;
  537. rlb_update_client(client_info);
  538. }
  539. }
  540. }
  541. /* assign a new slave */
  542. assigned_slave = rlb_next_rx_slave(bond);
  543. if (assigned_slave) {
  544. client_info->ip_src = arp->ip_src;
  545. client_info->ip_dst = arp->ip_dst;
  546. /* arp->mac_dst is broadcast for arp reqeusts.
  547. * will be updated with clients actual unicast mac address
  548. * upon receiving an arp reply.
  549. */
  550. memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
  551. client_info->slave = assigned_slave;
  552. if (memcmp(client_info->mac_dst, mac_bcast, ETH_ALEN)) {
  553. client_info->ntt = 1;
  554. bond->alb_info.rx_ntt = 1;
  555. } else {
  556. client_info->ntt = 0;
  557. }
  558. if (!list_empty(&bond->vlan_list)) {
  559. unsigned short vlan_id;
  560. int res = vlan_get_tag(skb, &vlan_id);
  561. if (!res) {
  562. client_info->tag = 1;
  563. client_info->vlan_id = vlan_id;
  564. }
  565. }
  566. if (!client_info->assigned) {
  567. u32 prev_tbl_head = bond_info->rx_hashtbl_head;
  568. bond_info->rx_hashtbl_head = hash_index;
  569. client_info->next = prev_tbl_head;
  570. if (prev_tbl_head != RLB_NULL_INDEX) {
  571. bond_info->rx_hashtbl[prev_tbl_head].prev =
  572. hash_index;
  573. }
  574. client_info->assigned = 1;
  575. }
  576. }
  577. _unlock_rx_hashtbl(bond);
  578. return assigned_slave;
  579. }
  580. /* chooses (and returns) transmit channel for arp reply
  581. * does not choose channel for other arp types since they are
  582. * sent on the curr_active_slave
  583. */
  584. static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
  585. {
  586. struct arp_pkt *arp = arp_pkt(skb);
  587. struct slave *tx_slave = NULL;
  588. if (arp->op_code == __constant_htons(ARPOP_REPLY)) {
  589. /* the arp must be sent on the selected
  590. * rx channel
  591. */
  592. tx_slave = rlb_choose_channel(skb, bond);
  593. if (tx_slave) {
  594. memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN);
  595. }
  596. dprintk("Server sent ARP Reply packet\n");
  597. } else if (arp->op_code == __constant_htons(ARPOP_REQUEST)) {
  598. /* Create an entry in the rx_hashtbl for this client as a
  599. * place holder.
  600. * When the arp reply is received the entry will be updated
  601. * with the correct unicast address of the client.
  602. */
  603. rlb_choose_channel(skb, bond);
  604. /* The ARP relpy packets must be delayed so that
  605. * they can cancel out the influence of the ARP request.
  606. */
  607. bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
  608. /* arp requests are broadcast and are sent on the primary
  609. * the arp request will collapse all clients on the subnet to
  610. * the primary slave. We must register these clients to be
  611. * updated with their assigned mac.
  612. */
  613. rlb_req_update_subnet_clients(bond, arp->ip_src);
  614. dprintk("Server sent ARP Request packet\n");
  615. }
  616. return tx_slave;
  617. }
  618. /* Caller must hold bond lock for read */
  619. static void rlb_rebalance(struct bonding *bond)
  620. {
  621. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  622. struct slave *assigned_slave;
  623. struct rlb_client_info *client_info;
  624. int ntt;
  625. u32 hash_index;
  626. _lock_rx_hashtbl(bond);
  627. ntt = 0;
  628. hash_index = bond_info->rx_hashtbl_head;
  629. for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
  630. client_info = &(bond_info->rx_hashtbl[hash_index]);
  631. assigned_slave = rlb_next_rx_slave(bond);
  632. if (assigned_slave && (client_info->slave != assigned_slave)) {
  633. client_info->slave = assigned_slave;
  634. client_info->ntt = 1;
  635. ntt = 1;
  636. }
  637. }
  638. /* update the team's flag only after the whole iteration */
  639. if (ntt) {
  640. bond_info->rx_ntt = 1;
  641. }
  642. _unlock_rx_hashtbl(bond);
  643. }
  644. /* Caller must hold rx_hashtbl lock */
  645. static void rlb_init_table_entry(struct rlb_client_info *entry)
  646. {
  647. memset(entry, 0, sizeof(struct rlb_client_info));
  648. entry->next = RLB_NULL_INDEX;
  649. entry->prev = RLB_NULL_INDEX;
  650. }
  651. static int rlb_initialize(struct bonding *bond)
  652. {
  653. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  654. struct packet_type *pk_type = &(BOND_ALB_INFO(bond).rlb_pkt_type);
  655. struct rlb_client_info *new_hashtbl;
  656. int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
  657. int i;
  658. spin_lock_init(&(bond_info->rx_hashtbl_lock));
  659. new_hashtbl = kmalloc(size, GFP_KERNEL);
  660. if (!new_hashtbl) {
  661. printk(KERN_ERR DRV_NAME
  662. ": %s: Error: Failed to allocate RLB hash table\n",
  663. bond->dev->name);
  664. return -1;
  665. }
  666. _lock_rx_hashtbl(bond);
  667. bond_info->rx_hashtbl = new_hashtbl;
  668. bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
  669. for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) {
  670. rlb_init_table_entry(bond_info->rx_hashtbl + i);
  671. }
  672. _unlock_rx_hashtbl(bond);
  673. /*initialize packet type*/
  674. pk_type->type = __constant_htons(ETH_P_ARP);
  675. pk_type->dev = bond->dev;
  676. pk_type->func = rlb_arp_recv;
  677. /* register to receive ARPs */
  678. dev_add_pack(pk_type);
  679. return 0;
  680. }
  681. static void rlb_deinitialize(struct bonding *bond)
  682. {
  683. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  684. dev_remove_pack(&(bond_info->rlb_pkt_type));
  685. _lock_rx_hashtbl(bond);
  686. kfree(bond_info->rx_hashtbl);
  687. bond_info->rx_hashtbl = NULL;
  688. bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
  689. _unlock_rx_hashtbl(bond);
  690. }
  691. static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
  692. {
  693. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  694. u32 curr_index;
  695. _lock_rx_hashtbl(bond);
  696. curr_index = bond_info->rx_hashtbl_head;
  697. while (curr_index != RLB_NULL_INDEX) {
  698. struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
  699. u32 next_index = bond_info->rx_hashtbl[curr_index].next;
  700. u32 prev_index = bond_info->rx_hashtbl[curr_index].prev;
  701. if (curr->tag && (curr->vlan_id == vlan_id)) {
  702. if (curr_index == bond_info->rx_hashtbl_head) {
  703. bond_info->rx_hashtbl_head = next_index;
  704. }
  705. if (prev_index != RLB_NULL_INDEX) {
  706. bond_info->rx_hashtbl[prev_index].next = next_index;
  707. }
  708. if (next_index != RLB_NULL_INDEX) {
  709. bond_info->rx_hashtbl[next_index].prev = prev_index;
  710. }
  711. rlb_init_table_entry(curr);
  712. }
  713. curr_index = next_index;
  714. }
  715. _unlock_rx_hashtbl(bond);
  716. }
  717. /*********************** tlb/rlb shared functions *********************/
  718. static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
  719. {
  720. struct bonding *bond = bond_get_bond_by_slave(slave);
  721. struct learning_pkt pkt;
  722. int size = sizeof(struct learning_pkt);
  723. int i;
  724. memset(&pkt, 0, size);
  725. memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
  726. memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
  727. pkt.type = __constant_htons(ETH_P_LOOP);
  728. for (i = 0; i < MAX_LP_BURST; i++) {
  729. struct sk_buff *skb;
  730. char *data;
  731. skb = dev_alloc_skb(size);
  732. if (!skb) {
  733. return;
  734. }
  735. data = skb_put(skb, size);
  736. memcpy(data, &pkt, size);
  737. skb_reset_mac_header(skb);
  738. skb->network_header = skb->mac_header + ETH_HLEN;
  739. skb->protocol = pkt.type;
  740. skb->priority = TC_PRIO_CONTROL;
  741. skb->dev = slave->dev;
  742. if (!list_empty(&bond->vlan_list)) {
  743. struct vlan_entry *vlan;
  744. vlan = bond_next_vlan(bond,
  745. bond->alb_info.current_alb_vlan);
  746. bond->alb_info.current_alb_vlan = vlan;
  747. if (!vlan) {
  748. kfree_skb(skb);
  749. continue;
  750. }
  751. skb = vlan_put_tag(skb, vlan->vlan_id);
  752. if (!skb) {
  753. printk(KERN_ERR DRV_NAME
  754. ": %s: Error: failed to insert VLAN tag\n",
  755. bond->dev->name);
  756. continue;
  757. }
  758. }
  759. dev_queue_xmit(skb);
  760. }
  761. }
  762. /* hw is a boolean parameter that determines whether we should try and
  763. * set the hw address of the device as well as the hw address of the
  764. * net_device
  765. */
  766. static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
  767. {
  768. struct net_device *dev = slave->dev;
  769. struct sockaddr s_addr;
  770. if (!hw) {
  771. memcpy(dev->dev_addr, addr, dev->addr_len);
  772. return 0;
  773. }
  774. /* for rlb each slave must have a unique hw mac addresses so that */
  775. /* each slave will receive packets destined to a different mac */
  776. memcpy(s_addr.sa_data, addr, dev->addr_len);
  777. s_addr.sa_family = dev->type;
  778. if (dev_set_mac_address(dev, &s_addr)) {
  779. printk(KERN_ERR DRV_NAME
  780. ": %s: Error: dev_set_mac_address of dev %s failed! ALB "
  781. "mode requires that the base driver support setting "
  782. "the hw address also when the network device's "
  783. "interface is open\n",
  784. dev->master->name, dev->name);
  785. return -EOPNOTSUPP;
  786. }
  787. return 0;
  788. }
  789. /*
  790. * Swap MAC addresses between two slaves.
  791. *
  792. * Called with RTNL held, and no other locks.
  793. *
  794. */
  795. static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct slave *slave2)
  796. {
  797. u8 tmp_mac_addr[ETH_ALEN];
  798. memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
  799. alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled);
  800. alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled);
  801. }
  802. /*
  803. * Send learning packets after MAC address swap.
  804. *
  805. * Called with RTNL and bond->lock held for read.
  806. */
  807. static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
  808. struct slave *slave2)
  809. {
  810. int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2));
  811. struct slave *disabled_slave = NULL;
  812. /* fasten the change in the switch */
  813. if (SLAVE_IS_OK(slave1)) {
  814. alb_send_learning_packets(slave1, slave1->dev->dev_addr);
  815. if (bond->alb_info.rlb_enabled) {
  816. /* inform the clients that the mac address
  817. * has changed
  818. */
  819. rlb_req_update_slave_clients(bond, slave1);
  820. }
  821. } else {
  822. disabled_slave = slave1;
  823. }
  824. if (SLAVE_IS_OK(slave2)) {
  825. alb_send_learning_packets(slave2, slave2->dev->dev_addr);
  826. if (bond->alb_info.rlb_enabled) {
  827. /* inform the clients that the mac address
  828. * has changed
  829. */
  830. rlb_req_update_slave_clients(bond, slave2);
  831. }
  832. } else {
  833. disabled_slave = slave2;
  834. }
  835. if (bond->alb_info.rlb_enabled && slaves_state_differ) {
  836. /* A disabled slave was assigned an active mac addr */
  837. rlb_teach_disabled_mac_on_primary(bond,
  838. disabled_slave->dev->dev_addr);
  839. }
  840. }
  841. /**
  842. * alb_change_hw_addr_on_detach
  843. * @bond: bonding we're working on
  844. * @slave: the slave that was just detached
  845. *
  846. * We assume that @slave was already detached from the slave list.
  847. *
  848. * If @slave's permanent hw address is different both from its current
  849. * address and from @bond's address, then somewhere in the bond there's
  850. * a slave that has @slave's permanet address as its current address.
  851. * We'll make sure that that slave no longer uses @slave's permanent address.
  852. *
  853. * Caller must hold bond lock
  854. */
  855. static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *slave)
  856. {
  857. int perm_curr_diff;
  858. int perm_bond_diff;
  859. perm_curr_diff = memcmp(slave->perm_hwaddr,
  860. slave->dev->dev_addr,
  861. ETH_ALEN);
  862. perm_bond_diff = memcmp(slave->perm_hwaddr,
  863. bond->dev->dev_addr,
  864. ETH_ALEN);
  865. if (perm_curr_diff && perm_bond_diff) {
  866. struct slave *tmp_slave;
  867. int i, found = 0;
  868. bond_for_each_slave(bond, tmp_slave, i) {
  869. if (!memcmp(slave->perm_hwaddr,
  870. tmp_slave->dev->dev_addr,
  871. ETH_ALEN)) {
  872. found = 1;
  873. break;
  874. }
  875. }
  876. if (found) {
  877. /* locking: needs RTNL and nothing else */
  878. alb_swap_mac_addr(bond, slave, tmp_slave);
  879. alb_fasten_mac_swap(bond, slave, tmp_slave);
  880. }
  881. }
  882. }
  883. /**
  884. * alb_handle_addr_collision_on_attach
  885. * @bond: bonding we're working on
  886. * @slave: the slave that was just attached
  887. *
  888. * checks uniqueness of slave's mac address and handles the case the
  889. * new slave uses the bonds mac address.
  890. *
  891. * If the permanent hw address of @slave is @bond's hw address, we need to
  892. * find a different hw address to give @slave, that isn't in use by any other
  893. * slave in the bond. This address must be, of course, one of the premanent
  894. * addresses of the other slaves.
  895. *
  896. * We go over the slave list, and for each slave there we compare its
  897. * permanent hw address with the current address of all the other slaves.
  898. * If no match was found, then we've found a slave with a permanent address
  899. * that isn't used by any other slave in the bond, so we can assign it to
  900. * @slave.
  901. *
  902. * assumption: this function is called before @slave is attached to the
  903. * bond slave list.
  904. *
  905. * caller must hold the bond lock for write since the mac addresses are compared
  906. * and may be swapped.
  907. */
  908. static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
  909. {
  910. struct slave *tmp_slave1, *tmp_slave2, *free_mac_slave;
  911. struct slave *has_bond_addr = bond->curr_active_slave;
  912. int i, j, found = 0;
  913. if (bond->slave_cnt == 0) {
  914. /* this is the first slave */
  915. return 0;
  916. }
  917. /* if slave's mac address differs from bond's mac address
  918. * check uniqueness of slave's mac address against the other
  919. * slaves in the bond.
  920. */
  921. if (memcmp(slave->perm_hwaddr, bond->dev->dev_addr, ETH_ALEN)) {
  922. bond_for_each_slave(bond, tmp_slave1, i) {
  923. if (!memcmp(tmp_slave1->dev->dev_addr, slave->dev->dev_addr,
  924. ETH_ALEN)) {
  925. found = 1;
  926. break;
  927. }
  928. }
  929. if (!found)
  930. return 0;
  931. /* Try setting slave mac to bond address and fall-through
  932. to code handling that situation below... */
  933. alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
  934. bond->alb_info.rlb_enabled);
  935. }
  936. /* The slave's address is equal to the address of the bond.
  937. * Search for a spare address in the bond for this slave.
  938. */
  939. free_mac_slave = NULL;
  940. bond_for_each_slave(bond, tmp_slave1, i) {
  941. found = 0;
  942. bond_for_each_slave(bond, tmp_slave2, j) {
  943. if (!memcmp(tmp_slave1->perm_hwaddr,
  944. tmp_slave2->dev->dev_addr,
  945. ETH_ALEN)) {
  946. found = 1;
  947. break;
  948. }
  949. }
  950. if (!found) {
  951. /* no slave has tmp_slave1's perm addr
  952. * as its curr addr
  953. */
  954. free_mac_slave = tmp_slave1;
  955. break;
  956. }
  957. if (!has_bond_addr) {
  958. if (!memcmp(tmp_slave1->dev->dev_addr,
  959. bond->dev->dev_addr,
  960. ETH_ALEN)) {
  961. has_bond_addr = tmp_slave1;
  962. }
  963. }
  964. }
  965. if (free_mac_slave) {
  966. alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
  967. bond->alb_info.rlb_enabled);
  968. printk(KERN_WARNING DRV_NAME
  969. ": %s: Warning: the hw address of slave %s is in use by "
  970. "the bond; giving it the hw address of %s\n",
  971. bond->dev->name, slave->dev->name, free_mac_slave->dev->name);
  972. } else if (has_bond_addr) {
  973. printk(KERN_ERR DRV_NAME
  974. ": %s: Error: the hw address of slave %s is in use by the "
  975. "bond; couldn't find a slave with a free hw address to "
  976. "give it (this should not have happened)\n",
  977. bond->dev->name, slave->dev->name);
  978. return -EFAULT;
  979. }
  980. return 0;
  981. }
  982. /**
  983. * alb_set_mac_address
  984. * @bond:
  985. * @addr:
  986. *
  987. * In TLB mode all slaves are configured to the bond's hw address, but set
  988. * their dev_addr field to different addresses (based on their permanent hw
  989. * addresses).
  990. *
  991. * For each slave, this function sets the interface to the new address and then
  992. * changes its dev_addr field to its previous value.
  993. *
  994. * Unwinding assumes bond's mac address has not yet changed.
  995. */
  996. static int alb_set_mac_address(struct bonding *bond, void *addr)
  997. {
  998. struct sockaddr sa;
  999. struct slave *slave, *stop_at;
  1000. char tmp_addr[ETH_ALEN];
  1001. int res;
  1002. int i;
  1003. if (bond->alb_info.rlb_enabled) {
  1004. return 0;
  1005. }
  1006. bond_for_each_slave(bond, slave, i) {
  1007. if (slave->dev->set_mac_address == NULL) {
  1008. res = -EOPNOTSUPP;
  1009. goto unwind;
  1010. }
  1011. /* save net_device's current hw address */
  1012. memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
  1013. res = dev_set_mac_address(slave->dev, addr);
  1014. /* restore net_device's hw address */
  1015. memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
  1016. if (res) {
  1017. goto unwind;
  1018. }
  1019. }
  1020. return 0;
  1021. unwind:
  1022. memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
  1023. sa.sa_family = bond->dev->type;
  1024. /* unwind from head to the slave that failed */
  1025. stop_at = slave;
  1026. bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
  1027. memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
  1028. dev_set_mac_address(slave->dev, &sa);
  1029. memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
  1030. }
  1031. return res;
  1032. }
  1033. /************************ exported alb funcions ************************/
  1034. int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
  1035. {
  1036. int res;
  1037. res = tlb_initialize(bond);
  1038. if (res) {
  1039. return res;
  1040. }
  1041. if (rlb_enabled) {
  1042. bond->alb_info.rlb_enabled = 1;
  1043. /* initialize rlb */
  1044. res = rlb_initialize(bond);
  1045. if (res) {
  1046. tlb_deinitialize(bond);
  1047. return res;
  1048. }
  1049. } else {
  1050. bond->alb_info.rlb_enabled = 0;
  1051. }
  1052. return 0;
  1053. }
  1054. void bond_alb_deinitialize(struct bonding *bond)
  1055. {
  1056. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  1057. tlb_deinitialize(bond);
  1058. if (bond_info->rlb_enabled) {
  1059. rlb_deinitialize(bond);
  1060. }
  1061. }
  1062. int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
  1063. {
  1064. struct bonding *bond = bond_dev->priv;
  1065. struct ethhdr *eth_data;
  1066. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  1067. struct slave *tx_slave = NULL;
  1068. static const __be32 ip_bcast = htonl(0xffffffff);
  1069. int hash_size = 0;
  1070. int do_tx_balance = 1;
  1071. u32 hash_index = 0;
  1072. const u8 *hash_start = NULL;
  1073. int res = 1;
  1074. skb_reset_mac_header(skb);
  1075. eth_data = eth_hdr(skb);
  1076. /* make sure that the curr_active_slave and the slaves list do
  1077. * not change during tx
  1078. */
  1079. read_lock(&bond->lock);
  1080. read_lock(&bond->curr_slave_lock);
  1081. if (!BOND_IS_OK(bond)) {
  1082. goto out;
  1083. }
  1084. switch (ntohs(skb->protocol)) {
  1085. case ETH_P_IP: {
  1086. const struct iphdr *iph = ip_hdr(skb);
  1087. if ((memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) ||
  1088. (iph->daddr == ip_bcast) ||
  1089. (iph->protocol == IPPROTO_IGMP)) {
  1090. do_tx_balance = 0;
  1091. break;
  1092. }
  1093. hash_start = (char *)&(iph->daddr);
  1094. hash_size = sizeof(iph->daddr);
  1095. }
  1096. break;
  1097. case ETH_P_IPV6:
  1098. if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) {
  1099. do_tx_balance = 0;
  1100. break;
  1101. }
  1102. hash_start = (char *)&(ipv6_hdr(skb)->daddr);
  1103. hash_size = sizeof(ipv6_hdr(skb)->daddr);
  1104. break;
  1105. case ETH_P_IPX:
  1106. if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
  1107. /* something is wrong with this packet */
  1108. do_tx_balance = 0;
  1109. break;
  1110. }
  1111. if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) {
  1112. /* The only protocol worth balancing in
  1113. * this family since it has an "ARP" like
  1114. * mechanism
  1115. */
  1116. do_tx_balance = 0;
  1117. break;
  1118. }
  1119. hash_start = (char*)eth_data->h_dest;
  1120. hash_size = ETH_ALEN;
  1121. break;
  1122. case ETH_P_ARP:
  1123. do_tx_balance = 0;
  1124. if (bond_info->rlb_enabled) {
  1125. tx_slave = rlb_arp_xmit(skb, bond);
  1126. }
  1127. break;
  1128. default:
  1129. do_tx_balance = 0;
  1130. break;
  1131. }
  1132. if (do_tx_balance) {
  1133. hash_index = _simple_hash(hash_start, hash_size);
  1134. tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
  1135. }
  1136. if (!tx_slave) {
  1137. /* unbalanced or unassigned, send through primary */
  1138. tx_slave = bond->curr_active_slave;
  1139. bond_info->unbalanced_load += skb->len;
  1140. }
  1141. if (tx_slave && SLAVE_IS_OK(tx_slave)) {
  1142. if (tx_slave != bond->curr_active_slave) {
  1143. memcpy(eth_data->h_source,
  1144. tx_slave->dev->dev_addr,
  1145. ETH_ALEN);
  1146. }
  1147. res = bond_dev_queue_xmit(bond, skb, tx_slave->dev);
  1148. } else {
  1149. if (tx_slave) {
  1150. tlb_clear_slave(bond, tx_slave, 0);
  1151. }
  1152. }
  1153. out:
  1154. if (res) {
  1155. /* no suitable interface, frame not sent */
  1156. dev_kfree_skb(skb);
  1157. }
  1158. read_unlock(&bond->curr_slave_lock);
  1159. read_unlock(&bond->lock);
  1160. return 0;
  1161. }
  1162. void bond_alb_monitor(struct work_struct *work)
  1163. {
  1164. struct bonding *bond = container_of(work, struct bonding,
  1165. alb_work.work);
  1166. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  1167. struct slave *slave;
  1168. int i;
  1169. read_lock(&bond->lock);
  1170. if (bond->kill_timers) {
  1171. goto out;
  1172. }
  1173. if (bond->slave_cnt == 0) {
  1174. bond_info->tx_rebalance_counter = 0;
  1175. bond_info->lp_counter = 0;
  1176. goto re_arm;
  1177. }
  1178. bond_info->tx_rebalance_counter++;
  1179. bond_info->lp_counter++;
  1180. /* send learning packets */
  1181. if (bond_info->lp_counter >= BOND_ALB_LP_TICKS) {
  1182. /* change of curr_active_slave involves swapping of mac addresses.
  1183. * in order to avoid this swapping from happening while
  1184. * sending the learning packets, the curr_slave_lock must be held for
  1185. * read.
  1186. */
  1187. read_lock(&bond->curr_slave_lock);
  1188. bond_for_each_slave(bond, slave, i) {
  1189. alb_send_learning_packets(slave, slave->dev->dev_addr);
  1190. }
  1191. read_unlock(&bond->curr_slave_lock);
  1192. bond_info->lp_counter = 0;
  1193. }
  1194. /* rebalance tx traffic */
  1195. if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
  1196. read_lock(&bond->curr_slave_lock);
  1197. bond_for_each_slave(bond, slave, i) {
  1198. tlb_clear_slave(bond, slave, 1);
  1199. if (slave == bond->curr_active_slave) {
  1200. SLAVE_TLB_INFO(slave).load =
  1201. bond_info->unbalanced_load /
  1202. BOND_TLB_REBALANCE_INTERVAL;
  1203. bond_info->unbalanced_load = 0;
  1204. }
  1205. }
  1206. read_unlock(&bond->curr_slave_lock);
  1207. bond_info->tx_rebalance_counter = 0;
  1208. }
  1209. /* handle rlb stuff */
  1210. if (bond_info->rlb_enabled) {
  1211. if (bond_info->primary_is_promisc &&
  1212. (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
  1213. /*
  1214. * dev_set_promiscuity requires rtnl and
  1215. * nothing else.
  1216. */
  1217. read_unlock(&bond->lock);
  1218. rtnl_lock();
  1219. bond_info->rlb_promisc_timeout_counter = 0;
  1220. /* If the primary was set to promiscuous mode
  1221. * because a slave was disabled then
  1222. * it can now leave promiscuous mode.
  1223. */
  1224. dev_set_promiscuity(bond->curr_active_slave->dev, -1);
  1225. bond_info->primary_is_promisc = 0;
  1226. rtnl_unlock();
  1227. read_lock(&bond->lock);
  1228. }
  1229. if (bond_info->rlb_rebalance) {
  1230. bond_info->rlb_rebalance = 0;
  1231. rlb_rebalance(bond);
  1232. }
  1233. /* check if clients need updating */
  1234. if (bond_info->rx_ntt) {
  1235. if (bond_info->rlb_update_delay_counter) {
  1236. --bond_info->rlb_update_delay_counter;
  1237. } else {
  1238. rlb_update_rx_clients(bond);
  1239. if (bond_info->rlb_update_retry_counter) {
  1240. --bond_info->rlb_update_retry_counter;
  1241. } else {
  1242. bond_info->rx_ntt = 0;
  1243. }
  1244. }
  1245. }
  1246. }
  1247. re_arm:
  1248. queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
  1249. out:
  1250. read_unlock(&bond->lock);
  1251. }
  1252. /* assumption: called before the slave is attached to the bond
  1253. * and not locked by the bond lock
  1254. */
  1255. int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
  1256. {
  1257. int res;
  1258. res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
  1259. bond->alb_info.rlb_enabled);
  1260. if (res) {
  1261. return res;
  1262. }
  1263. /* caller must hold the bond lock for write since the mac addresses
  1264. * are compared and may be swapped.
  1265. */
  1266. read_lock(&bond->lock);
  1267. res = alb_handle_addr_collision_on_attach(bond, slave);
  1268. read_unlock(&bond->lock);
  1269. if (res) {
  1270. return res;
  1271. }
  1272. tlb_init_slave(slave);
  1273. /* order a rebalance ASAP */
  1274. bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
  1275. if (bond->alb_info.rlb_enabled) {
  1276. bond->alb_info.rlb_rebalance = 1;
  1277. }
  1278. return 0;
  1279. }
  1280. /* Caller must hold bond lock for write */
  1281. void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
  1282. {
  1283. if (bond->slave_cnt > 1) {
  1284. alb_change_hw_addr_on_detach(bond, slave);
  1285. }
  1286. tlb_clear_slave(bond, slave, 0);
  1287. if (bond->alb_info.rlb_enabled) {
  1288. bond->alb_info.next_rx_slave = NULL;
  1289. rlb_clear_slave(bond, slave);
  1290. }
  1291. }
  1292. /* Caller must hold bond lock for read */
  1293. void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link)
  1294. {
  1295. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  1296. if (link == BOND_LINK_DOWN) {
  1297. tlb_clear_slave(bond, slave, 0);
  1298. if (bond->alb_info.rlb_enabled) {
  1299. rlb_clear_slave(bond, slave);
  1300. }
  1301. } else if (link == BOND_LINK_UP) {
  1302. /* order a rebalance ASAP */
  1303. bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
  1304. if (bond->alb_info.rlb_enabled) {
  1305. bond->alb_info.rlb_rebalance = 1;
  1306. /* If the updelay module parameter is smaller than the
  1307. * forwarding delay of the switch the rebalance will
  1308. * not work because the rebalance arp replies will
  1309. * not be forwarded to the clients..
  1310. */
  1311. }
  1312. }
  1313. }
  1314. /**
  1315. * bond_alb_handle_active_change - assign new curr_active_slave
  1316. * @bond: our bonding struct
  1317. * @new_slave: new slave to assign
  1318. *
  1319. * Set the bond->curr_active_slave to @new_slave and handle
  1320. * mac address swapping and promiscuity changes as needed.
  1321. *
  1322. * If new_slave is NULL, caller must hold curr_slave_lock or
  1323. * bond->lock for write.
  1324. *
  1325. * If new_slave is not NULL, caller must hold RTNL, bond->lock for
  1326. * read and curr_slave_lock for write. Processing here may sleep, so
  1327. * no other locks may be held.
  1328. */
  1329. void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
  1330. {
  1331. struct slave *swap_slave;
  1332. int i;
  1333. if (new_slave)
  1334. ASSERT_RTNL();
  1335. if (bond->curr_active_slave == new_slave) {
  1336. return;
  1337. }
  1338. if (bond->curr_active_slave && bond->alb_info.primary_is_promisc) {
  1339. dev_set_promiscuity(bond->curr_active_slave->dev, -1);
  1340. bond->alb_info.primary_is_promisc = 0;
  1341. bond->alb_info.rlb_promisc_timeout_counter = 0;
  1342. }
  1343. swap_slave = bond->curr_active_slave;
  1344. bond->curr_active_slave = new_slave;
  1345. if (!new_slave || (bond->slave_cnt == 0)) {
  1346. return;
  1347. }
  1348. /* set the new curr_active_slave to the bonds mac address
  1349. * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
  1350. */
  1351. if (!swap_slave) {
  1352. struct slave *tmp_slave;
  1353. /* find slave that is holding the bond's mac address */
  1354. bond_for_each_slave(bond, tmp_slave, i) {
  1355. if (!memcmp(tmp_slave->dev->dev_addr,
  1356. bond->dev->dev_addr, ETH_ALEN)) {
  1357. swap_slave = tmp_slave;
  1358. break;
  1359. }
  1360. }
  1361. }
  1362. /*
  1363. * Arrange for swap_slave and new_slave to temporarily be
  1364. * ignored so we can mess with their MAC addresses without
  1365. * fear of interference from transmit activity.
  1366. */
  1367. if (swap_slave) {
  1368. tlb_clear_slave(bond, swap_slave, 1);
  1369. }
  1370. tlb_clear_slave(bond, new_slave, 1);
  1371. write_unlock_bh(&bond->curr_slave_lock);
  1372. read_unlock(&bond->lock);
  1373. /* curr_active_slave must be set before calling alb_swap_mac_addr */
  1374. if (swap_slave) {
  1375. /* swap mac address */
  1376. alb_swap_mac_addr(bond, swap_slave, new_slave);
  1377. } else {
  1378. /* set the new_slave to the bond mac address */
  1379. alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
  1380. bond->alb_info.rlb_enabled);
  1381. }
  1382. read_lock(&bond->lock);
  1383. if (swap_slave) {
  1384. alb_fasten_mac_swap(bond, swap_slave, new_slave);
  1385. } else {
  1386. /* fasten bond mac on new current slave */
  1387. alb_send_learning_packets(new_slave, bond->dev->dev_addr);
  1388. }
  1389. write_lock_bh(&bond->curr_slave_lock);
  1390. }
  1391. /*
  1392. * Called with RTNL
  1393. */
  1394. int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
  1395. {
  1396. struct bonding *bond = bond_dev->priv;
  1397. struct sockaddr *sa = addr;
  1398. struct slave *slave, *swap_slave;
  1399. int res;
  1400. int i;
  1401. if (!is_valid_ether_addr(sa->sa_data)) {
  1402. return -EADDRNOTAVAIL;
  1403. }
  1404. res = alb_set_mac_address(bond, addr);
  1405. if (res) {
  1406. return res;
  1407. }
  1408. memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
  1409. /* If there is no curr_active_slave there is nothing else to do.
  1410. * Otherwise we'll need to pass the new address to it and handle
  1411. * duplications.
  1412. */
  1413. if (!bond->curr_active_slave) {
  1414. return 0;
  1415. }
  1416. swap_slave = NULL;
  1417. bond_for_each_slave(bond, slave, i) {
  1418. if (!memcmp(slave->dev->dev_addr, bond_dev->dev_addr, ETH_ALEN)) {
  1419. swap_slave = slave;
  1420. break;
  1421. }
  1422. }
  1423. write_unlock_bh(&bond->curr_slave_lock);
  1424. read_unlock(&bond->lock);
  1425. if (swap_slave) {
  1426. alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
  1427. alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
  1428. } else {
  1429. alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
  1430. bond->alb_info.rlb_enabled);
  1431. alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
  1432. if (bond->alb_info.rlb_enabled) {
  1433. /* inform clients mac address has changed */
  1434. rlb_req_update_slave_clients(bond, bond->curr_active_slave);
  1435. }
  1436. }
  1437. read_lock(&bond->lock);
  1438. write_lock_bh(&bond->curr_slave_lock);
  1439. return 0;
  1440. }
  1441. void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
  1442. {
  1443. if (bond->alb_info.current_alb_vlan &&
  1444. (bond->alb_info.current_alb_vlan->vlan_id == vlan_id)) {
  1445. bond->alb_info.current_alb_vlan = NULL;
  1446. }
  1447. if (bond->alb_info.rlb_enabled) {
  1448. rlb_clear_vlan(bond, vlan_id);
  1449. }
  1450. }