ibmveth.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639
  1. /*
  2. * IBM Power Virtual Ethernet Device Driver
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2003, 2010
  19. *
  20. * Authors: Dave Larson <larson1@us.ibm.com>
  21. * Santiago Leon <santil@linux.vnet.ibm.com>
  22. * Brian King <brking@linux.vnet.ibm.com>
  23. * Robert Jennings <rcj@linux.vnet.ibm.com>
  24. * Anton Blanchard <anton@au.ibm.com>
  25. */
  26. #include <linux/module.h>
  27. #include <linux/moduleparam.h>
  28. #include <linux/types.h>
  29. #include <linux/errno.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/kernel.h>
  32. #include <linux/netdevice.h>
  33. #include <linux/etherdevice.h>
  34. #include <linux/skbuff.h>
  35. #include <linux/init.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/mm.h>
  38. #include <linux/pm.h>
  39. #include <linux/ethtool.h>
  40. #include <linux/in.h>
  41. #include <linux/ip.h>
  42. #include <linux/ipv6.h>
  43. #include <linux/slab.h>
  44. #include <asm/hvcall.h>
  45. #include <linux/atomic.h>
  46. #include <asm/vio.h>
  47. #include <asm/iommu.h>
  48. #include <asm/firmware.h>
  49. #include "ibmveth.h"
  50. static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
  51. static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
  52. static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
  53. static struct kobj_type ktype_veth_pool;
  54. static const char ibmveth_driver_name[] = "ibmveth";
  55. static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
  56. #define ibmveth_driver_version "1.04"
  57. MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
  58. MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
  59. MODULE_LICENSE("GPL");
  60. MODULE_VERSION(ibmveth_driver_version);
  61. static unsigned int tx_copybreak __read_mostly = 128;
  62. module_param(tx_copybreak, uint, 0644);
  63. MODULE_PARM_DESC(tx_copybreak,
  64. "Maximum size of packet that is copied to a new buffer on transmit");
  65. static unsigned int rx_copybreak __read_mostly = 128;
  66. module_param(rx_copybreak, uint, 0644);
  67. MODULE_PARM_DESC(rx_copybreak,
  68. "Maximum size of packet that is copied to a new buffer on receive");
  69. static unsigned int rx_flush __read_mostly = 0;
  70. module_param(rx_flush, uint, 0644);
  71. MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
  72. struct ibmveth_stat {
  73. char name[ETH_GSTRING_LEN];
  74. int offset;
  75. };
  76. #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
  77. #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
  78. struct ibmveth_stat ibmveth_stats[] = {
  79. { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
  80. { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
  81. { "replenish_add_buff_failure",
  82. IBMVETH_STAT_OFF(replenish_add_buff_failure) },
  83. { "replenish_add_buff_success",
  84. IBMVETH_STAT_OFF(replenish_add_buff_success) },
  85. { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
  86. { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
  87. { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
  88. { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
  89. { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
  90. { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
  91. };
  92. /* simple methods of getting data from the current rxq entry */
  93. static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
  94. {
  95. return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
  96. }
  97. static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
  98. {
  99. return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
  100. IBMVETH_RXQ_TOGGLE_SHIFT;
  101. }
  102. static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
  103. {
  104. return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
  105. }
  106. static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
  107. {
  108. return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
  109. }
  110. static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
  111. {
  112. return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
  113. }
  114. static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
  115. {
  116. return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
  117. }
  118. static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
  119. {
  120. return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
  121. }
  122. /* setup the initial settings for a buffer pool */
  123. static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
  124. u32 pool_index, u32 pool_size,
  125. u32 buff_size, u32 pool_active)
  126. {
  127. pool->size = pool_size;
  128. pool->index = pool_index;
  129. pool->buff_size = buff_size;
  130. pool->threshold = pool_size * 7 / 8;
  131. pool->active = pool_active;
  132. }
  133. /* allocate and setup an buffer pool - called during open */
  134. static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
  135. {
  136. int i;
  137. pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
  138. if (!pool->free_map)
  139. return -1;
  140. pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
  141. if (!pool->dma_addr) {
  142. kfree(pool->free_map);
  143. pool->free_map = NULL;
  144. return -1;
  145. }
  146. pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
  147. if (!pool->skbuff) {
  148. kfree(pool->dma_addr);
  149. pool->dma_addr = NULL;
  150. kfree(pool->free_map);
  151. pool->free_map = NULL;
  152. return -1;
  153. }
  154. memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
  155. for (i = 0; i < pool->size; ++i)
  156. pool->free_map[i] = i;
  157. atomic_set(&pool->available, 0);
  158. pool->producer_index = 0;
  159. pool->consumer_index = 0;
  160. return 0;
  161. }
  162. static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
  163. {
  164. unsigned long offset;
  165. for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
  166. asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
  167. }
  168. /* replenish the buffers for a pool. note that we don't need to
  169. * skb_reserve these since they are used for incoming...
  170. */
  171. static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
  172. struct ibmveth_buff_pool *pool)
  173. {
  174. u32 i;
  175. u32 count = pool->size - atomic_read(&pool->available);
  176. u32 buffers_added = 0;
  177. struct sk_buff *skb;
  178. unsigned int free_index, index;
  179. u64 correlator;
  180. unsigned long lpar_rc;
  181. dma_addr_t dma_addr;
  182. mb();
  183. for (i = 0; i < count; ++i) {
  184. union ibmveth_buf_desc desc;
  185. skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
  186. if (!skb) {
  187. netdev_dbg(adapter->netdev,
  188. "replenish: unable to allocate skb\n");
  189. adapter->replenish_no_mem++;
  190. break;
  191. }
  192. free_index = pool->consumer_index;
  193. pool->consumer_index++;
  194. if (pool->consumer_index >= pool->size)
  195. pool->consumer_index = 0;
  196. index = pool->free_map[free_index];
  197. BUG_ON(index == IBM_VETH_INVALID_MAP);
  198. BUG_ON(pool->skbuff[index] != NULL);
  199. dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
  200. pool->buff_size, DMA_FROM_DEVICE);
  201. if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
  202. goto failure;
  203. pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
  204. pool->dma_addr[index] = dma_addr;
  205. pool->skbuff[index] = skb;
  206. correlator = ((u64)pool->index << 32) | index;
  207. *(u64 *)skb->data = correlator;
  208. desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
  209. desc.fields.address = dma_addr;
  210. if (rx_flush) {
  211. unsigned int len = min(pool->buff_size,
  212. adapter->netdev->mtu +
  213. IBMVETH_BUFF_OH);
  214. ibmveth_flush_buffer(skb->data, len);
  215. }
  216. lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
  217. desc.desc);
  218. if (lpar_rc != H_SUCCESS) {
  219. goto failure;
  220. } else {
  221. buffers_added++;
  222. adapter->replenish_add_buff_success++;
  223. }
  224. }
  225. mb();
  226. atomic_add(buffers_added, &(pool->available));
  227. return;
  228. failure:
  229. pool->free_map[free_index] = index;
  230. pool->skbuff[index] = NULL;
  231. if (pool->consumer_index == 0)
  232. pool->consumer_index = pool->size - 1;
  233. else
  234. pool->consumer_index--;
  235. if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
  236. dma_unmap_single(&adapter->vdev->dev,
  237. pool->dma_addr[index], pool->buff_size,
  238. DMA_FROM_DEVICE);
  239. dev_kfree_skb_any(skb);
  240. adapter->replenish_add_buff_failure++;
  241. mb();
  242. atomic_add(buffers_added, &(pool->available));
  243. }
  244. /* replenish routine */
  245. static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
  246. {
  247. int i;
  248. adapter->replenish_task_cycles++;
  249. for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
  250. struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
  251. if (pool->active &&
  252. (atomic_read(&pool->available) < pool->threshold))
  253. ibmveth_replenish_buffer_pool(adapter, pool);
  254. }
  255. adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
  256. 4096 - 8);
  257. }
  258. /* empty and free ana buffer pool - also used to do cleanup in error paths */
  259. static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
  260. struct ibmveth_buff_pool *pool)
  261. {
  262. int i;
  263. kfree(pool->free_map);
  264. pool->free_map = NULL;
  265. if (pool->skbuff && pool->dma_addr) {
  266. for (i = 0; i < pool->size; ++i) {
  267. struct sk_buff *skb = pool->skbuff[i];
  268. if (skb) {
  269. dma_unmap_single(&adapter->vdev->dev,
  270. pool->dma_addr[i],
  271. pool->buff_size,
  272. DMA_FROM_DEVICE);
  273. dev_kfree_skb_any(skb);
  274. pool->skbuff[i] = NULL;
  275. }
  276. }
  277. }
  278. if (pool->dma_addr) {
  279. kfree(pool->dma_addr);
  280. pool->dma_addr = NULL;
  281. }
  282. if (pool->skbuff) {
  283. kfree(pool->skbuff);
  284. pool->skbuff = NULL;
  285. }
  286. }
  287. /* remove a buffer from a pool */
  288. static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
  289. u64 correlator)
  290. {
  291. unsigned int pool = correlator >> 32;
  292. unsigned int index = correlator & 0xffffffffUL;
  293. unsigned int free_index;
  294. struct sk_buff *skb;
  295. BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
  296. BUG_ON(index >= adapter->rx_buff_pool[pool].size);
  297. skb = adapter->rx_buff_pool[pool].skbuff[index];
  298. BUG_ON(skb == NULL);
  299. adapter->rx_buff_pool[pool].skbuff[index] = NULL;
  300. dma_unmap_single(&adapter->vdev->dev,
  301. adapter->rx_buff_pool[pool].dma_addr[index],
  302. adapter->rx_buff_pool[pool].buff_size,
  303. DMA_FROM_DEVICE);
  304. free_index = adapter->rx_buff_pool[pool].producer_index;
  305. adapter->rx_buff_pool[pool].producer_index++;
  306. if (adapter->rx_buff_pool[pool].producer_index >=
  307. adapter->rx_buff_pool[pool].size)
  308. adapter->rx_buff_pool[pool].producer_index = 0;
  309. adapter->rx_buff_pool[pool].free_map[free_index] = index;
  310. mb();
  311. atomic_dec(&(adapter->rx_buff_pool[pool].available));
  312. }
  313. /* get the current buffer on the rx queue */
  314. static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
  315. {
  316. u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
  317. unsigned int pool = correlator >> 32;
  318. unsigned int index = correlator & 0xffffffffUL;
  319. BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
  320. BUG_ON(index >= adapter->rx_buff_pool[pool].size);
  321. return adapter->rx_buff_pool[pool].skbuff[index];
  322. }
  323. /* recycle the current buffer on the rx queue */
  324. static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
  325. {
  326. u32 q_index = adapter->rx_queue.index;
  327. u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
  328. unsigned int pool = correlator >> 32;
  329. unsigned int index = correlator & 0xffffffffUL;
  330. union ibmveth_buf_desc desc;
  331. unsigned long lpar_rc;
  332. int ret = 1;
  333. BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
  334. BUG_ON(index >= adapter->rx_buff_pool[pool].size);
  335. if (!adapter->rx_buff_pool[pool].active) {
  336. ibmveth_rxq_harvest_buffer(adapter);
  337. ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
  338. goto out;
  339. }
  340. desc.fields.flags_len = IBMVETH_BUF_VALID |
  341. adapter->rx_buff_pool[pool].buff_size;
  342. desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
  343. lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
  344. if (lpar_rc != H_SUCCESS) {
  345. netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
  346. "during recycle rc=%ld", lpar_rc);
  347. ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
  348. ret = 0;
  349. }
  350. if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
  351. adapter->rx_queue.index = 0;
  352. adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
  353. }
  354. out:
  355. return ret;
  356. }
  357. static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
  358. {
  359. ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
  360. if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
  361. adapter->rx_queue.index = 0;
  362. adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
  363. }
  364. }
  365. static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
  366. {
  367. int i;
  368. struct device *dev = &adapter->vdev->dev;
  369. if (adapter->buffer_list_addr != NULL) {
  370. if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
  371. dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
  372. DMA_BIDIRECTIONAL);
  373. adapter->buffer_list_dma = DMA_ERROR_CODE;
  374. }
  375. free_page((unsigned long)adapter->buffer_list_addr);
  376. adapter->buffer_list_addr = NULL;
  377. }
  378. if (adapter->filter_list_addr != NULL) {
  379. if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
  380. dma_unmap_single(dev, adapter->filter_list_dma, 4096,
  381. DMA_BIDIRECTIONAL);
  382. adapter->filter_list_dma = DMA_ERROR_CODE;
  383. }
  384. free_page((unsigned long)adapter->filter_list_addr);
  385. adapter->filter_list_addr = NULL;
  386. }
  387. if (adapter->rx_queue.queue_addr != NULL) {
  388. if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
  389. dma_unmap_single(dev,
  390. adapter->rx_queue.queue_dma,
  391. adapter->rx_queue.queue_len,
  392. DMA_BIDIRECTIONAL);
  393. adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
  394. }
  395. kfree(adapter->rx_queue.queue_addr);
  396. adapter->rx_queue.queue_addr = NULL;
  397. }
  398. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
  399. if (adapter->rx_buff_pool[i].active)
  400. ibmveth_free_buffer_pool(adapter,
  401. &adapter->rx_buff_pool[i]);
  402. if (adapter->bounce_buffer != NULL) {
  403. if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
  404. dma_unmap_single(&adapter->vdev->dev,
  405. adapter->bounce_buffer_dma,
  406. adapter->netdev->mtu + IBMVETH_BUFF_OH,
  407. DMA_BIDIRECTIONAL);
  408. adapter->bounce_buffer_dma = DMA_ERROR_CODE;
  409. }
  410. kfree(adapter->bounce_buffer);
  411. adapter->bounce_buffer = NULL;
  412. }
  413. }
  414. static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
  415. union ibmveth_buf_desc rxq_desc, u64 mac_address)
  416. {
  417. int rc, try_again = 1;
  418. /*
  419. * After a kexec the adapter will still be open, so our attempt to
  420. * open it will fail. So if we get a failure we free the adapter and
  421. * try again, but only once.
  422. */
  423. retry:
  424. rc = h_register_logical_lan(adapter->vdev->unit_address,
  425. adapter->buffer_list_dma, rxq_desc.desc,
  426. adapter->filter_list_dma, mac_address);
  427. if (rc != H_SUCCESS && try_again) {
  428. do {
  429. rc = h_free_logical_lan(adapter->vdev->unit_address);
  430. } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
  431. try_again = 0;
  432. goto retry;
  433. }
  434. return rc;
  435. }
  436. static int ibmveth_open(struct net_device *netdev)
  437. {
  438. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  439. u64 mac_address = 0;
  440. int rxq_entries = 1;
  441. unsigned long lpar_rc;
  442. int rc;
  443. union ibmveth_buf_desc rxq_desc;
  444. int i;
  445. struct device *dev;
  446. netdev_dbg(netdev, "open starting\n");
  447. napi_enable(&adapter->napi);
  448. for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
  449. rxq_entries += adapter->rx_buff_pool[i].size;
  450. adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
  451. adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
  452. if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
  453. netdev_err(netdev, "unable to allocate filter or buffer list "
  454. "pages\n");
  455. rc = -ENOMEM;
  456. goto err_out;
  457. }
  458. adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
  459. rxq_entries;
  460. adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
  461. GFP_KERNEL);
  462. if (!adapter->rx_queue.queue_addr) {
  463. netdev_err(netdev, "unable to allocate rx queue pages\n");
  464. rc = -ENOMEM;
  465. goto err_out;
  466. }
  467. dev = &adapter->vdev->dev;
  468. adapter->buffer_list_dma = dma_map_single(dev,
  469. adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
  470. adapter->filter_list_dma = dma_map_single(dev,
  471. adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
  472. adapter->rx_queue.queue_dma = dma_map_single(dev,
  473. adapter->rx_queue.queue_addr,
  474. adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
  475. if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
  476. (dma_mapping_error(dev, adapter->filter_list_dma)) ||
  477. (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
  478. netdev_err(netdev, "unable to map filter or buffer list "
  479. "pages\n");
  480. rc = -ENOMEM;
  481. goto err_out;
  482. }
  483. adapter->rx_queue.index = 0;
  484. adapter->rx_queue.num_slots = rxq_entries;
  485. adapter->rx_queue.toggle = 1;
  486. memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
  487. mac_address = mac_address >> 16;
  488. rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
  489. adapter->rx_queue.queue_len;
  490. rxq_desc.fields.address = adapter->rx_queue.queue_dma;
  491. netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
  492. netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
  493. netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
  494. h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
  495. lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
  496. if (lpar_rc != H_SUCCESS) {
  497. netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
  498. lpar_rc);
  499. netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
  500. "desc:0x%llx MAC:0x%llx\n",
  501. adapter->buffer_list_dma,
  502. adapter->filter_list_dma,
  503. rxq_desc.desc,
  504. mac_address);
  505. rc = -ENONET;
  506. goto err_out;
  507. }
  508. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
  509. if (!adapter->rx_buff_pool[i].active)
  510. continue;
  511. if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
  512. netdev_err(netdev, "unable to alloc pool\n");
  513. adapter->rx_buff_pool[i].active = 0;
  514. rc = -ENOMEM;
  515. goto err_out;
  516. }
  517. }
  518. netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
  519. rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
  520. netdev);
  521. if (rc != 0) {
  522. netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
  523. netdev->irq, rc);
  524. do {
  525. lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
  526. } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
  527. goto err_out;
  528. }
  529. adapter->bounce_buffer =
  530. kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
  531. if (!adapter->bounce_buffer) {
  532. netdev_err(netdev, "unable to allocate bounce buffer\n");
  533. rc = -ENOMEM;
  534. goto err_out_free_irq;
  535. }
  536. adapter->bounce_buffer_dma =
  537. dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
  538. netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
  539. if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
  540. netdev_err(netdev, "unable to map bounce buffer\n");
  541. rc = -ENOMEM;
  542. goto err_out_free_irq;
  543. }
  544. netdev_dbg(netdev, "initial replenish cycle\n");
  545. ibmveth_interrupt(netdev->irq, netdev);
  546. netif_start_queue(netdev);
  547. netdev_dbg(netdev, "open complete\n");
  548. return 0;
  549. err_out_free_irq:
  550. free_irq(netdev->irq, netdev);
  551. err_out:
  552. ibmveth_cleanup(adapter);
  553. napi_disable(&adapter->napi);
  554. return rc;
  555. }
  556. static int ibmveth_close(struct net_device *netdev)
  557. {
  558. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  559. long lpar_rc;
  560. netdev_dbg(netdev, "close starting\n");
  561. napi_disable(&adapter->napi);
  562. if (!adapter->pool_config)
  563. netif_stop_queue(netdev);
  564. h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
  565. do {
  566. lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
  567. } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
  568. if (lpar_rc != H_SUCCESS) {
  569. netdev_err(netdev, "h_free_logical_lan failed with %lx, "
  570. "continuing with close\n", lpar_rc);
  571. }
  572. free_irq(netdev->irq, netdev);
  573. adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
  574. 4096 - 8);
  575. ibmveth_cleanup(adapter);
  576. netdev_dbg(netdev, "close complete\n");
  577. return 0;
  578. }
  579. static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  580. {
  581. cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
  582. SUPPORTED_FIBRE);
  583. cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
  584. ADVERTISED_FIBRE);
  585. ethtool_cmd_speed_set(cmd, SPEED_1000);
  586. cmd->duplex = DUPLEX_FULL;
  587. cmd->port = PORT_FIBRE;
  588. cmd->phy_address = 0;
  589. cmd->transceiver = XCVR_INTERNAL;
  590. cmd->autoneg = AUTONEG_ENABLE;
  591. cmd->maxtxpkt = 0;
  592. cmd->maxrxpkt = 1;
  593. return 0;
  594. }
  595. static void netdev_get_drvinfo(struct net_device *dev,
  596. struct ethtool_drvinfo *info)
  597. {
  598. strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
  599. strncpy(info->version, ibmveth_driver_version,
  600. sizeof(info->version) - 1);
  601. }
  602. static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
  603. {
  604. /*
  605. * Since the ibmveth firmware interface does not have the
  606. * concept of separate tx/rx checksum offload enable, if rx
  607. * checksum is disabled we also have to disable tx checksum
  608. * offload. Once we disable rx checksum offload, we are no
  609. * longer allowed to send tx buffers that are not properly
  610. * checksummed.
  611. */
  612. if (!(features & NETIF_F_RXCSUM))
  613. features &= ~NETIF_F_ALL_CSUM;
  614. return features;
  615. }
  616. static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
  617. {
  618. struct ibmveth_adapter *adapter = netdev_priv(dev);
  619. unsigned long set_attr, clr_attr, ret_attr;
  620. unsigned long set_attr6, clr_attr6;
  621. long ret, ret4, ret6;
  622. int rc1 = 0, rc2 = 0;
  623. int restart = 0;
  624. if (netif_running(dev)) {
  625. restart = 1;
  626. adapter->pool_config = 1;
  627. ibmveth_close(dev);
  628. adapter->pool_config = 0;
  629. }
  630. set_attr = 0;
  631. clr_attr = 0;
  632. set_attr6 = 0;
  633. clr_attr6 = 0;
  634. if (data) {
  635. set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
  636. set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
  637. } else {
  638. clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
  639. clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
  640. }
  641. ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
  642. if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
  643. !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
  644. (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
  645. ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
  646. set_attr, &ret_attr);
  647. if (ret4 != H_SUCCESS) {
  648. netdev_err(dev, "unable to change IPv4 checksum "
  649. "offload settings. %d rc=%ld\n",
  650. data, ret4);
  651. h_illan_attributes(adapter->vdev->unit_address,
  652. set_attr, clr_attr, &ret_attr);
  653. if (data == 1)
  654. dev->features &= ~NETIF_F_IP_CSUM;
  655. } else {
  656. adapter->fw_ipv4_csum_support = data;
  657. }
  658. ret6 = h_illan_attributes(adapter->vdev->unit_address,
  659. clr_attr6, set_attr6, &ret_attr);
  660. if (ret6 != H_SUCCESS) {
  661. netdev_err(dev, "unable to change IPv6 checksum "
  662. "offload settings. %d rc=%ld\n",
  663. data, ret6);
  664. h_illan_attributes(adapter->vdev->unit_address,
  665. set_attr6, clr_attr6, &ret_attr);
  666. if (data == 1)
  667. dev->features &= ~NETIF_F_IPV6_CSUM;
  668. } else
  669. adapter->fw_ipv6_csum_support = data;
  670. if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
  671. adapter->rx_csum = data;
  672. else
  673. rc1 = -EIO;
  674. } else {
  675. rc1 = -EIO;
  676. netdev_err(dev, "unable to change checksum offload settings."
  677. " %d rc=%ld ret_attr=%lx\n", data, ret,
  678. ret_attr);
  679. }
  680. if (restart)
  681. rc2 = ibmveth_open(dev);
  682. return rc1 ? rc1 : rc2;
  683. }
  684. static int ibmveth_set_features(struct net_device *dev, u32 features)
  685. {
  686. struct ibmveth_adapter *adapter = netdev_priv(dev);
  687. int rx_csum = !!(features & NETIF_F_RXCSUM);
  688. int rc;
  689. if (rx_csum == adapter->rx_csum)
  690. return 0;
  691. rc = ibmveth_set_csum_offload(dev, rx_csum);
  692. if (rc && !adapter->rx_csum)
  693. dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
  694. return rc;
  695. }
  696. static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
  697. {
  698. int i;
  699. if (stringset != ETH_SS_STATS)
  700. return;
  701. for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
  702. memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
  703. }
  704. static int ibmveth_get_sset_count(struct net_device *dev, int sset)
  705. {
  706. switch (sset) {
  707. case ETH_SS_STATS:
  708. return ARRAY_SIZE(ibmveth_stats);
  709. default:
  710. return -EOPNOTSUPP;
  711. }
  712. }
  713. static void ibmveth_get_ethtool_stats(struct net_device *dev,
  714. struct ethtool_stats *stats, u64 *data)
  715. {
  716. int i;
  717. struct ibmveth_adapter *adapter = netdev_priv(dev);
  718. for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
  719. data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
  720. }
  721. static const struct ethtool_ops netdev_ethtool_ops = {
  722. .get_drvinfo = netdev_get_drvinfo,
  723. .get_settings = netdev_get_settings,
  724. .get_link = ethtool_op_get_link,
  725. .get_strings = ibmveth_get_strings,
  726. .get_sset_count = ibmveth_get_sset_count,
  727. .get_ethtool_stats = ibmveth_get_ethtool_stats,
  728. };
  729. static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  730. {
  731. return -EOPNOTSUPP;
  732. }
  733. #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
  734. static int ibmveth_send(struct ibmveth_adapter *adapter,
  735. union ibmveth_buf_desc *descs)
  736. {
  737. unsigned long correlator;
  738. unsigned int retry_count;
  739. unsigned long ret;
  740. /*
  741. * The retry count sets a maximum for the number of broadcast and
  742. * multicast destinations within the system.
  743. */
  744. retry_count = 1024;
  745. correlator = 0;
  746. do {
  747. ret = h_send_logical_lan(adapter->vdev->unit_address,
  748. descs[0].desc, descs[1].desc,
  749. descs[2].desc, descs[3].desc,
  750. descs[4].desc, descs[5].desc,
  751. correlator, &correlator);
  752. } while ((ret == H_BUSY) && (retry_count--));
  753. if (ret != H_SUCCESS && ret != H_DROPPED) {
  754. netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
  755. "with rc=%ld\n", ret);
  756. return 1;
  757. }
  758. return 0;
  759. }
  760. static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
  761. struct net_device *netdev)
  762. {
  763. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  764. unsigned int desc_flags;
  765. union ibmveth_buf_desc descs[6];
  766. int last, i;
  767. int force_bounce = 0;
  768. dma_addr_t dma_addr;
  769. /*
  770. * veth handles a maximum of 6 segments including the header, so
  771. * we have to linearize the skb if there are more than this.
  772. */
  773. if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
  774. netdev->stats.tx_dropped++;
  775. goto out;
  776. }
  777. /* veth can't checksum offload UDP */
  778. if (skb->ip_summed == CHECKSUM_PARTIAL &&
  779. ((skb->protocol == htons(ETH_P_IP) &&
  780. ip_hdr(skb)->protocol != IPPROTO_TCP) ||
  781. (skb->protocol == htons(ETH_P_IPV6) &&
  782. ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
  783. skb_checksum_help(skb)) {
  784. netdev_err(netdev, "tx: failed to checksum packet\n");
  785. netdev->stats.tx_dropped++;
  786. goto out;
  787. }
  788. desc_flags = IBMVETH_BUF_VALID;
  789. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  790. unsigned char *buf = skb_transport_header(skb) +
  791. skb->csum_offset;
  792. desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
  793. /* Need to zero out the checksum */
  794. buf[0] = 0;
  795. buf[1] = 0;
  796. }
  797. retry_bounce:
  798. memset(descs, 0, sizeof(descs));
  799. /*
  800. * If a linear packet is below the rx threshold then
  801. * copy it into the static bounce buffer. This avoids the
  802. * cost of a TCE insert and remove.
  803. */
  804. if (force_bounce || (!skb_is_nonlinear(skb) &&
  805. (skb->len < tx_copybreak))) {
  806. skb_copy_from_linear_data(skb, adapter->bounce_buffer,
  807. skb->len);
  808. descs[0].fields.flags_len = desc_flags | skb->len;
  809. descs[0].fields.address = adapter->bounce_buffer_dma;
  810. if (ibmveth_send(adapter, descs)) {
  811. adapter->tx_send_failed++;
  812. netdev->stats.tx_dropped++;
  813. } else {
  814. netdev->stats.tx_packets++;
  815. netdev->stats.tx_bytes += skb->len;
  816. }
  817. goto out;
  818. }
  819. /* Map the header */
  820. dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
  821. skb_headlen(skb), DMA_TO_DEVICE);
  822. if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
  823. goto map_failed;
  824. descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
  825. descs[0].fields.address = dma_addr;
  826. /* Map the frags */
  827. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  828. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  829. dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
  830. frag->page_offset, frag->size,
  831. DMA_TO_DEVICE);
  832. if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
  833. goto map_failed_frags;
  834. descs[i+1].fields.flags_len = desc_flags | frag->size;
  835. descs[i+1].fields.address = dma_addr;
  836. }
  837. if (ibmveth_send(adapter, descs)) {
  838. adapter->tx_send_failed++;
  839. netdev->stats.tx_dropped++;
  840. } else {
  841. netdev->stats.tx_packets++;
  842. netdev->stats.tx_bytes += skb->len;
  843. }
  844. dma_unmap_single(&adapter->vdev->dev,
  845. descs[0].fields.address,
  846. descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
  847. DMA_TO_DEVICE);
  848. for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
  849. dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
  850. descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
  851. DMA_TO_DEVICE);
  852. out:
  853. dev_kfree_skb(skb);
  854. return NETDEV_TX_OK;
  855. map_failed_frags:
  856. last = i+1;
  857. for (i = 0; i < last; i++)
  858. dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
  859. descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
  860. DMA_TO_DEVICE);
  861. map_failed:
  862. if (!firmware_has_feature(FW_FEATURE_CMO))
  863. netdev_err(netdev, "tx: unable to map xmit buffer\n");
  864. adapter->tx_map_failed++;
  865. skb_linearize(skb);
  866. force_bounce = 1;
  867. goto retry_bounce;
  868. }
  869. static int ibmveth_poll(struct napi_struct *napi, int budget)
  870. {
  871. struct ibmveth_adapter *adapter =
  872. container_of(napi, struct ibmveth_adapter, napi);
  873. struct net_device *netdev = adapter->netdev;
  874. int frames_processed = 0;
  875. unsigned long lpar_rc;
  876. restart_poll:
  877. do {
  878. if (!ibmveth_rxq_pending_buffer(adapter))
  879. break;
  880. smp_rmb();
  881. if (!ibmveth_rxq_buffer_valid(adapter)) {
  882. wmb(); /* suggested by larson1 */
  883. adapter->rx_invalid_buffer++;
  884. netdev_dbg(netdev, "recycling invalid buffer\n");
  885. ibmveth_rxq_recycle_buffer(adapter);
  886. } else {
  887. struct sk_buff *skb, *new_skb;
  888. int length = ibmveth_rxq_frame_length(adapter);
  889. int offset = ibmveth_rxq_frame_offset(adapter);
  890. int csum_good = ibmveth_rxq_csum_good(adapter);
  891. skb = ibmveth_rxq_get_buffer(adapter);
  892. new_skb = NULL;
  893. if (length < rx_copybreak)
  894. new_skb = netdev_alloc_skb(netdev, length);
  895. if (new_skb) {
  896. skb_copy_to_linear_data(new_skb,
  897. skb->data + offset,
  898. length);
  899. if (rx_flush)
  900. ibmveth_flush_buffer(skb->data,
  901. length + offset);
  902. if (!ibmveth_rxq_recycle_buffer(adapter))
  903. kfree_skb(skb);
  904. skb = new_skb;
  905. } else {
  906. ibmveth_rxq_harvest_buffer(adapter);
  907. skb_reserve(skb, offset);
  908. }
  909. skb_put(skb, length);
  910. skb->protocol = eth_type_trans(skb, netdev);
  911. if (csum_good)
  912. skb->ip_summed = CHECKSUM_UNNECESSARY;
  913. netif_receive_skb(skb); /* send it up */
  914. netdev->stats.rx_packets++;
  915. netdev->stats.rx_bytes += length;
  916. frames_processed++;
  917. }
  918. } while (frames_processed < budget);
  919. ibmveth_replenish_task(adapter);
  920. if (frames_processed < budget) {
  921. /* We think we are done - reenable interrupts,
  922. * then check once more to make sure we are done.
  923. */
  924. lpar_rc = h_vio_signal(adapter->vdev->unit_address,
  925. VIO_IRQ_ENABLE);
  926. BUG_ON(lpar_rc != H_SUCCESS);
  927. napi_complete(napi);
  928. if (ibmveth_rxq_pending_buffer(adapter) &&
  929. napi_reschedule(napi)) {
  930. lpar_rc = h_vio_signal(adapter->vdev->unit_address,
  931. VIO_IRQ_DISABLE);
  932. goto restart_poll;
  933. }
  934. }
  935. return frames_processed;
  936. }
  937. static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
  938. {
  939. struct net_device *netdev = dev_instance;
  940. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  941. unsigned long lpar_rc;
  942. if (napi_schedule_prep(&adapter->napi)) {
  943. lpar_rc = h_vio_signal(adapter->vdev->unit_address,
  944. VIO_IRQ_DISABLE);
  945. BUG_ON(lpar_rc != H_SUCCESS);
  946. __napi_schedule(&adapter->napi);
  947. }
  948. return IRQ_HANDLED;
  949. }
  950. static void ibmveth_set_multicast_list(struct net_device *netdev)
  951. {
  952. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  953. unsigned long lpar_rc;
  954. if ((netdev->flags & IFF_PROMISC) ||
  955. (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
  956. lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
  957. IbmVethMcastEnableRecv |
  958. IbmVethMcastDisableFiltering,
  959. 0);
  960. if (lpar_rc != H_SUCCESS) {
  961. netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
  962. "entering promisc mode\n", lpar_rc);
  963. }
  964. } else {
  965. struct netdev_hw_addr *ha;
  966. /* clear the filter table & disable filtering */
  967. lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
  968. IbmVethMcastEnableRecv |
  969. IbmVethMcastDisableFiltering |
  970. IbmVethMcastClearFilterTable,
  971. 0);
  972. if (lpar_rc != H_SUCCESS) {
  973. netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
  974. "attempting to clear filter table\n",
  975. lpar_rc);
  976. }
  977. /* add the addresses to the filter table */
  978. netdev_for_each_mc_addr(ha, netdev) {
  979. /* add the multicast address to the filter table */
  980. unsigned long mcast_addr = 0;
  981. memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
  982. lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
  983. IbmVethMcastAddFilter,
  984. mcast_addr);
  985. if (lpar_rc != H_SUCCESS) {
  986. netdev_err(netdev, "h_multicast_ctrl rc=%ld "
  987. "when adding an entry to the filter "
  988. "table\n", lpar_rc);
  989. }
  990. }
  991. /* re-enable filtering */
  992. lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
  993. IbmVethMcastEnableFiltering,
  994. 0);
  995. if (lpar_rc != H_SUCCESS) {
  996. netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
  997. "enabling filtering\n", lpar_rc);
  998. }
  999. }
  1000. }
  1001. static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
  1002. {
  1003. struct ibmveth_adapter *adapter = netdev_priv(dev);
  1004. struct vio_dev *viodev = adapter->vdev;
  1005. int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
  1006. int i, rc;
  1007. int need_restart = 0;
  1008. if (new_mtu < IBMVETH_MIN_MTU)
  1009. return -EINVAL;
  1010. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
  1011. if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
  1012. break;
  1013. if (i == IBMVETH_NUM_BUFF_POOLS)
  1014. return -EINVAL;
  1015. /* Deactivate all the buffer pools so that the next loop can activate
  1016. only the buffer pools necessary to hold the new MTU */
  1017. if (netif_running(adapter->netdev)) {
  1018. need_restart = 1;
  1019. adapter->pool_config = 1;
  1020. ibmveth_close(adapter->netdev);
  1021. adapter->pool_config = 0;
  1022. }
  1023. /* Look for an active buffer pool that can hold the new MTU */
  1024. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
  1025. adapter->rx_buff_pool[i].active = 1;
  1026. if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
  1027. dev->mtu = new_mtu;
  1028. vio_cmo_set_dev_desired(viodev,
  1029. ibmveth_get_desired_dma
  1030. (viodev));
  1031. if (need_restart) {
  1032. return ibmveth_open(adapter->netdev);
  1033. }
  1034. return 0;
  1035. }
  1036. }
  1037. if (need_restart && (rc = ibmveth_open(adapter->netdev)))
  1038. return rc;
  1039. return -EINVAL;
  1040. }
  1041. #ifdef CONFIG_NET_POLL_CONTROLLER
  1042. static void ibmveth_poll_controller(struct net_device *dev)
  1043. {
  1044. ibmveth_replenish_task(netdev_priv(dev));
  1045. ibmveth_interrupt(dev->irq, dev);
  1046. }
  1047. #endif
  1048. /**
  1049. * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
  1050. *
  1051. * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
  1052. *
  1053. * Return value:
  1054. * Number of bytes of IO data the driver will need to perform well.
  1055. */
  1056. static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
  1057. {
  1058. struct net_device *netdev = dev_get_drvdata(&vdev->dev);
  1059. struct ibmveth_adapter *adapter;
  1060. unsigned long ret;
  1061. int i;
  1062. int rxqentries = 1;
  1063. /* netdev inits at probe time along with the structures we need below*/
  1064. if (netdev == NULL)
  1065. return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
  1066. adapter = netdev_priv(netdev);
  1067. ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
  1068. ret += IOMMU_PAGE_ALIGN(netdev->mtu);
  1069. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
  1070. /* add the size of the active receive buffers */
  1071. if (adapter->rx_buff_pool[i].active)
  1072. ret +=
  1073. adapter->rx_buff_pool[i].size *
  1074. IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
  1075. buff_size);
  1076. rxqentries += adapter->rx_buff_pool[i].size;
  1077. }
  1078. /* add the size of the receive queue entries */
  1079. ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
  1080. return ret;
  1081. }
  1082. static const struct net_device_ops ibmveth_netdev_ops = {
  1083. .ndo_open = ibmveth_open,
  1084. .ndo_stop = ibmveth_close,
  1085. .ndo_start_xmit = ibmveth_start_xmit,
  1086. .ndo_set_multicast_list = ibmveth_set_multicast_list,
  1087. .ndo_do_ioctl = ibmveth_ioctl,
  1088. .ndo_change_mtu = ibmveth_change_mtu,
  1089. .ndo_fix_features = ibmveth_fix_features,
  1090. .ndo_set_features = ibmveth_set_features,
  1091. .ndo_validate_addr = eth_validate_addr,
  1092. .ndo_set_mac_address = eth_mac_addr,
  1093. #ifdef CONFIG_NET_POLL_CONTROLLER
  1094. .ndo_poll_controller = ibmveth_poll_controller,
  1095. #endif
  1096. };
  1097. static int __devinit ibmveth_probe(struct vio_dev *dev,
  1098. const struct vio_device_id *id)
  1099. {
  1100. int rc, i;
  1101. struct net_device *netdev;
  1102. struct ibmveth_adapter *adapter;
  1103. unsigned char *mac_addr_p;
  1104. unsigned int *mcastFilterSize_p;
  1105. dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
  1106. dev->unit_address);
  1107. mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
  1108. NULL);
  1109. if (!mac_addr_p) {
  1110. dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
  1111. return -EINVAL;
  1112. }
  1113. mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
  1114. VETH_MCAST_FILTER_SIZE, NULL);
  1115. if (!mcastFilterSize_p) {
  1116. dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
  1117. "attribute\n");
  1118. return -EINVAL;
  1119. }
  1120. netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
  1121. if (!netdev)
  1122. return -ENOMEM;
  1123. adapter = netdev_priv(netdev);
  1124. dev_set_drvdata(&dev->dev, netdev);
  1125. adapter->vdev = dev;
  1126. adapter->netdev = netdev;
  1127. adapter->mcastFilterSize = *mcastFilterSize_p;
  1128. adapter->pool_config = 0;
  1129. netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
  1130. /*
  1131. * Some older boxes running PHYP non-natively have an OF that returns
  1132. * a 8-byte local-mac-address field (and the first 2 bytes have to be
  1133. * ignored) while newer boxes' OF return a 6-byte field. Note that
  1134. * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
  1135. * The RPA doc specifies that the first byte must be 10b, so we'll
  1136. * just look for it to solve this 8 vs. 6 byte field issue
  1137. */
  1138. if ((*mac_addr_p & 0x3) != 0x02)
  1139. mac_addr_p += 2;
  1140. adapter->mac_addr = 0;
  1141. memcpy(&adapter->mac_addr, mac_addr_p, 6);
  1142. netdev->irq = dev->irq;
  1143. netdev->netdev_ops = &ibmveth_netdev_ops;
  1144. netdev->ethtool_ops = &netdev_ethtool_ops;
  1145. SET_NETDEV_DEV(netdev, &dev->dev);
  1146. netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
  1147. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  1148. netdev->features |= netdev->hw_features;
  1149. memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
  1150. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
  1151. struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
  1152. int error;
  1153. ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
  1154. pool_count[i], pool_size[i],
  1155. pool_active[i]);
  1156. error = kobject_init_and_add(kobj, &ktype_veth_pool,
  1157. &dev->dev.kobj, "pool%d", i);
  1158. if (!error)
  1159. kobject_uevent(kobj, KOBJ_ADD);
  1160. }
  1161. netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
  1162. adapter->buffer_list_dma = DMA_ERROR_CODE;
  1163. adapter->filter_list_dma = DMA_ERROR_CODE;
  1164. adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
  1165. netdev_dbg(netdev, "registering netdev...\n");
  1166. ibmveth_set_features(netdev, netdev->features);
  1167. rc = register_netdev(netdev);
  1168. if (rc) {
  1169. netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
  1170. free_netdev(netdev);
  1171. return rc;
  1172. }
  1173. netdev_dbg(netdev, "registered\n");
  1174. return 0;
  1175. }
  1176. static int __devexit ibmveth_remove(struct vio_dev *dev)
  1177. {
  1178. struct net_device *netdev = dev_get_drvdata(&dev->dev);
  1179. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  1180. int i;
  1181. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
  1182. kobject_put(&adapter->rx_buff_pool[i].kobj);
  1183. unregister_netdev(netdev);
  1184. free_netdev(netdev);
  1185. dev_set_drvdata(&dev->dev, NULL);
  1186. return 0;
  1187. }
  1188. static struct attribute veth_active_attr;
  1189. static struct attribute veth_num_attr;
  1190. static struct attribute veth_size_attr;
  1191. static ssize_t veth_pool_show(struct kobject *kobj,
  1192. struct attribute *attr, char *buf)
  1193. {
  1194. struct ibmveth_buff_pool *pool = container_of(kobj,
  1195. struct ibmveth_buff_pool,
  1196. kobj);
  1197. if (attr == &veth_active_attr)
  1198. return sprintf(buf, "%d\n", pool->active);
  1199. else if (attr == &veth_num_attr)
  1200. return sprintf(buf, "%d\n", pool->size);
  1201. else if (attr == &veth_size_attr)
  1202. return sprintf(buf, "%d\n", pool->buff_size);
  1203. return 0;
  1204. }
  1205. static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
  1206. const char *buf, size_t count)
  1207. {
  1208. struct ibmveth_buff_pool *pool = container_of(kobj,
  1209. struct ibmveth_buff_pool,
  1210. kobj);
  1211. struct net_device *netdev = dev_get_drvdata(
  1212. container_of(kobj->parent, struct device, kobj));
  1213. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  1214. long value = simple_strtol(buf, NULL, 10);
  1215. long rc;
  1216. if (attr == &veth_active_attr) {
  1217. if (value && !pool->active) {
  1218. if (netif_running(netdev)) {
  1219. if (ibmveth_alloc_buffer_pool(pool)) {
  1220. netdev_err(netdev,
  1221. "unable to alloc pool\n");
  1222. return -ENOMEM;
  1223. }
  1224. pool->active = 1;
  1225. adapter->pool_config = 1;
  1226. ibmveth_close(netdev);
  1227. adapter->pool_config = 0;
  1228. if ((rc = ibmveth_open(netdev)))
  1229. return rc;
  1230. } else {
  1231. pool->active = 1;
  1232. }
  1233. } else if (!value && pool->active) {
  1234. int mtu = netdev->mtu + IBMVETH_BUFF_OH;
  1235. int i;
  1236. /* Make sure there is a buffer pool with buffers that
  1237. can hold a packet of the size of the MTU */
  1238. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
  1239. if (pool == &adapter->rx_buff_pool[i])
  1240. continue;
  1241. if (!adapter->rx_buff_pool[i].active)
  1242. continue;
  1243. if (mtu <= adapter->rx_buff_pool[i].buff_size)
  1244. break;
  1245. }
  1246. if (i == IBMVETH_NUM_BUFF_POOLS) {
  1247. netdev_err(netdev, "no active pool >= MTU\n");
  1248. return -EPERM;
  1249. }
  1250. if (netif_running(netdev)) {
  1251. adapter->pool_config = 1;
  1252. ibmveth_close(netdev);
  1253. pool->active = 0;
  1254. adapter->pool_config = 0;
  1255. if ((rc = ibmveth_open(netdev)))
  1256. return rc;
  1257. }
  1258. pool->active = 0;
  1259. }
  1260. } else if (attr == &veth_num_attr) {
  1261. if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
  1262. return -EINVAL;
  1263. } else {
  1264. if (netif_running(netdev)) {
  1265. adapter->pool_config = 1;
  1266. ibmveth_close(netdev);
  1267. adapter->pool_config = 0;
  1268. pool->size = value;
  1269. if ((rc = ibmveth_open(netdev)))
  1270. return rc;
  1271. } else {
  1272. pool->size = value;
  1273. }
  1274. }
  1275. } else if (attr == &veth_size_attr) {
  1276. if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
  1277. return -EINVAL;
  1278. } else {
  1279. if (netif_running(netdev)) {
  1280. adapter->pool_config = 1;
  1281. ibmveth_close(netdev);
  1282. adapter->pool_config = 0;
  1283. pool->buff_size = value;
  1284. if ((rc = ibmveth_open(netdev)))
  1285. return rc;
  1286. } else {
  1287. pool->buff_size = value;
  1288. }
  1289. }
  1290. }
  1291. /* kick the interrupt handler to allocate/deallocate pools */
  1292. ibmveth_interrupt(netdev->irq, netdev);
  1293. return count;
  1294. }
  1295. #define ATTR(_name, _mode) \
  1296. struct attribute veth_##_name##_attr = { \
  1297. .name = __stringify(_name), .mode = _mode, \
  1298. };
  1299. static ATTR(active, 0644);
  1300. static ATTR(num, 0644);
  1301. static ATTR(size, 0644);
  1302. static struct attribute *veth_pool_attrs[] = {
  1303. &veth_active_attr,
  1304. &veth_num_attr,
  1305. &veth_size_attr,
  1306. NULL,
  1307. };
  1308. static const struct sysfs_ops veth_pool_ops = {
  1309. .show = veth_pool_show,
  1310. .store = veth_pool_store,
  1311. };
  1312. static struct kobj_type ktype_veth_pool = {
  1313. .release = NULL,
  1314. .sysfs_ops = &veth_pool_ops,
  1315. .default_attrs = veth_pool_attrs,
  1316. };
  1317. static int ibmveth_resume(struct device *dev)
  1318. {
  1319. struct net_device *netdev = dev_get_drvdata(dev);
  1320. ibmveth_interrupt(netdev->irq, netdev);
  1321. return 0;
  1322. }
  1323. static struct vio_device_id ibmveth_device_table[] __devinitdata = {
  1324. { "network", "IBM,l-lan"},
  1325. { "", "" }
  1326. };
  1327. MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
  1328. static struct dev_pm_ops ibmveth_pm_ops = {
  1329. .resume = ibmveth_resume
  1330. };
  1331. static struct vio_driver ibmveth_driver = {
  1332. .id_table = ibmveth_device_table,
  1333. .probe = ibmveth_probe,
  1334. .remove = ibmveth_remove,
  1335. .get_desired_dma = ibmveth_get_desired_dma,
  1336. .driver = {
  1337. .name = ibmveth_driver_name,
  1338. .owner = THIS_MODULE,
  1339. .pm = &ibmveth_pm_ops,
  1340. }
  1341. };
  1342. static int __init ibmveth_module_init(void)
  1343. {
  1344. printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
  1345. ibmveth_driver_string, ibmveth_driver_version);
  1346. return vio_register_driver(&ibmveth_driver);
  1347. }
  1348. static void __exit ibmveth_module_exit(void)
  1349. {
  1350. vio_unregister_driver(&ibmveth_driver);
  1351. }
  1352. module_init(ibmveth_module_init);
  1353. module_exit(ibmveth_module_exit);