ibmveth.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619
  1. /*
  2. * IBM Power Virtual Ethernet Device Driver
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2003, 2010
  19. *
  20. * Authors: Dave Larson <larson1@us.ibm.com>
  21. * Santiago Leon <santil@linux.vnet.ibm.com>
  22. * Brian King <brking@linux.vnet.ibm.com>
  23. * Robert Jennings <rcj@linux.vnet.ibm.com>
  24. * Anton Blanchard <anton@au.ibm.com>
  25. */
  26. #include <linux/module.h>
  27. #include <linux/moduleparam.h>
  28. #include <linux/types.h>
  29. #include <linux/errno.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/kernel.h>
  32. #include <linux/netdevice.h>
  33. #include <linux/etherdevice.h>
  34. #include <linux/skbuff.h>
  35. #include <linux/init.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/mm.h>
  38. #include <linux/pm.h>
  39. #include <linux/ethtool.h>
  40. #include <linux/in.h>
  41. #include <linux/ip.h>
  42. #include <linux/ipv6.h>
  43. #include <linux/slab.h>
  44. #include <asm/hvcall.h>
  45. #include <linux/atomic.h>
  46. #include <asm/vio.h>
  47. #include <asm/iommu.h>
  48. #include <asm/firmware.h>
  49. #include "ibmveth.h"
  50. static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
  51. static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
  52. static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
  53. static struct kobj_type ktype_veth_pool;
  54. static const char ibmveth_driver_name[] = "ibmveth";
  55. static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
  56. #define ibmveth_driver_version "1.04"
  57. MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
  58. MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
  59. MODULE_LICENSE("GPL");
  60. MODULE_VERSION(ibmveth_driver_version);
  61. static unsigned int tx_copybreak __read_mostly = 128;
  62. module_param(tx_copybreak, uint, 0644);
  63. MODULE_PARM_DESC(tx_copybreak,
  64. "Maximum size of packet that is copied to a new buffer on transmit");
  65. static unsigned int rx_copybreak __read_mostly = 128;
  66. module_param(rx_copybreak, uint, 0644);
  67. MODULE_PARM_DESC(rx_copybreak,
  68. "Maximum size of packet that is copied to a new buffer on receive");
  69. static unsigned int rx_flush __read_mostly = 0;
  70. module_param(rx_flush, uint, 0644);
  71. MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
  72. struct ibmveth_stat {
  73. char name[ETH_GSTRING_LEN];
  74. int offset;
  75. };
  76. #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
  77. #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
  78. struct ibmveth_stat ibmveth_stats[] = {
  79. { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
  80. { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
  81. { "replenish_add_buff_failure",
  82. IBMVETH_STAT_OFF(replenish_add_buff_failure) },
  83. { "replenish_add_buff_success",
  84. IBMVETH_STAT_OFF(replenish_add_buff_success) },
  85. { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
  86. { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
  87. { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
  88. { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
  89. { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
  90. { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
  91. };
  92. /* simple methods of getting data from the current rxq entry */
  93. static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
  94. {
  95. return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
  96. }
  97. static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
  98. {
  99. return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
  100. IBMVETH_RXQ_TOGGLE_SHIFT;
  101. }
  102. static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
  103. {
  104. return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
  105. }
  106. static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
  107. {
  108. return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
  109. }
  110. static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
  111. {
  112. return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
  113. }
  114. static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
  115. {
  116. return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
  117. }
  118. static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
  119. {
  120. return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
  121. }
  122. /* setup the initial settings for a buffer pool */
  123. static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
  124. u32 pool_index, u32 pool_size,
  125. u32 buff_size, u32 pool_active)
  126. {
  127. pool->size = pool_size;
  128. pool->index = pool_index;
  129. pool->buff_size = buff_size;
  130. pool->threshold = pool_size * 7 / 8;
  131. pool->active = pool_active;
  132. }
  133. /* allocate and setup an buffer pool - called during open */
  134. static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
  135. {
  136. int i;
  137. pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
  138. if (!pool->free_map)
  139. return -1;
  140. pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
  141. if (!pool->dma_addr) {
  142. kfree(pool->free_map);
  143. pool->free_map = NULL;
  144. return -1;
  145. }
  146. pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
  147. if (!pool->skbuff) {
  148. kfree(pool->dma_addr);
  149. pool->dma_addr = NULL;
  150. kfree(pool->free_map);
  151. pool->free_map = NULL;
  152. return -1;
  153. }
  154. memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
  155. for (i = 0; i < pool->size; ++i)
  156. pool->free_map[i] = i;
  157. atomic_set(&pool->available, 0);
  158. pool->producer_index = 0;
  159. pool->consumer_index = 0;
  160. return 0;
  161. }
  162. static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
  163. {
  164. unsigned long offset;
  165. for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
  166. asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
  167. }
  168. /* replenish the buffers for a pool. note that we don't need to
  169. * skb_reserve these since they are used for incoming...
  170. */
  171. static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
  172. struct ibmveth_buff_pool *pool)
  173. {
  174. u32 i;
  175. u32 count = pool->size - atomic_read(&pool->available);
  176. u32 buffers_added = 0;
  177. struct sk_buff *skb;
  178. unsigned int free_index, index;
  179. u64 correlator;
  180. unsigned long lpar_rc;
  181. dma_addr_t dma_addr;
  182. mb();
  183. for (i = 0; i < count; ++i) {
  184. union ibmveth_buf_desc desc;
  185. skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
  186. if (!skb) {
  187. netdev_dbg(adapter->netdev,
  188. "replenish: unable to allocate skb\n");
  189. adapter->replenish_no_mem++;
  190. break;
  191. }
  192. free_index = pool->consumer_index;
  193. pool->consumer_index++;
  194. if (pool->consumer_index >= pool->size)
  195. pool->consumer_index = 0;
  196. index = pool->free_map[free_index];
  197. BUG_ON(index == IBM_VETH_INVALID_MAP);
  198. BUG_ON(pool->skbuff[index] != NULL);
  199. dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
  200. pool->buff_size, DMA_FROM_DEVICE);
  201. if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
  202. goto failure;
  203. pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
  204. pool->dma_addr[index] = dma_addr;
  205. pool->skbuff[index] = skb;
  206. correlator = ((u64)pool->index << 32) | index;
  207. *(u64 *)skb->data = correlator;
  208. desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
  209. desc.fields.address = dma_addr;
  210. if (rx_flush) {
  211. unsigned int len = min(pool->buff_size,
  212. adapter->netdev->mtu +
  213. IBMVETH_BUFF_OH);
  214. ibmveth_flush_buffer(skb->data, len);
  215. }
  216. lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
  217. desc.desc);
  218. if (lpar_rc != H_SUCCESS) {
  219. goto failure;
  220. } else {
  221. buffers_added++;
  222. adapter->replenish_add_buff_success++;
  223. }
  224. }
  225. mb();
  226. atomic_add(buffers_added, &(pool->available));
  227. return;
  228. failure:
  229. pool->free_map[free_index] = index;
  230. pool->skbuff[index] = NULL;
  231. if (pool->consumer_index == 0)
  232. pool->consumer_index = pool->size - 1;
  233. else
  234. pool->consumer_index--;
  235. if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
  236. dma_unmap_single(&adapter->vdev->dev,
  237. pool->dma_addr[index], pool->buff_size,
  238. DMA_FROM_DEVICE);
  239. dev_kfree_skb_any(skb);
  240. adapter->replenish_add_buff_failure++;
  241. mb();
  242. atomic_add(buffers_added, &(pool->available));
  243. }
  244. /* replenish routine */
  245. static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
  246. {
  247. int i;
  248. adapter->replenish_task_cycles++;
  249. for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
  250. struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
  251. if (pool->active &&
  252. (atomic_read(&pool->available) < pool->threshold))
  253. ibmveth_replenish_buffer_pool(adapter, pool);
  254. }
  255. adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
  256. 4096 - 8);
  257. }
  258. /* empty and free ana buffer pool - also used to do cleanup in error paths */
  259. static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
  260. struct ibmveth_buff_pool *pool)
  261. {
  262. int i;
  263. kfree(pool->free_map);
  264. pool->free_map = NULL;
  265. if (pool->skbuff && pool->dma_addr) {
  266. for (i = 0; i < pool->size; ++i) {
  267. struct sk_buff *skb = pool->skbuff[i];
  268. if (skb) {
  269. dma_unmap_single(&adapter->vdev->dev,
  270. pool->dma_addr[i],
  271. pool->buff_size,
  272. DMA_FROM_DEVICE);
  273. dev_kfree_skb_any(skb);
  274. pool->skbuff[i] = NULL;
  275. }
  276. }
  277. }
  278. if (pool->dma_addr) {
  279. kfree(pool->dma_addr);
  280. pool->dma_addr = NULL;
  281. }
  282. if (pool->skbuff) {
  283. kfree(pool->skbuff);
  284. pool->skbuff = NULL;
  285. }
  286. }
  287. /* remove a buffer from a pool */
  288. static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
  289. u64 correlator)
  290. {
  291. unsigned int pool = correlator >> 32;
  292. unsigned int index = correlator & 0xffffffffUL;
  293. unsigned int free_index;
  294. struct sk_buff *skb;
  295. BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
  296. BUG_ON(index >= adapter->rx_buff_pool[pool].size);
  297. skb = adapter->rx_buff_pool[pool].skbuff[index];
  298. BUG_ON(skb == NULL);
  299. adapter->rx_buff_pool[pool].skbuff[index] = NULL;
  300. dma_unmap_single(&adapter->vdev->dev,
  301. adapter->rx_buff_pool[pool].dma_addr[index],
  302. adapter->rx_buff_pool[pool].buff_size,
  303. DMA_FROM_DEVICE);
  304. free_index = adapter->rx_buff_pool[pool].producer_index;
  305. adapter->rx_buff_pool[pool].producer_index++;
  306. if (adapter->rx_buff_pool[pool].producer_index >=
  307. adapter->rx_buff_pool[pool].size)
  308. adapter->rx_buff_pool[pool].producer_index = 0;
  309. adapter->rx_buff_pool[pool].free_map[free_index] = index;
  310. mb();
  311. atomic_dec(&(adapter->rx_buff_pool[pool].available));
  312. }
  313. /* get the current buffer on the rx queue */
  314. static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
  315. {
  316. u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
  317. unsigned int pool = correlator >> 32;
  318. unsigned int index = correlator & 0xffffffffUL;
  319. BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
  320. BUG_ON(index >= adapter->rx_buff_pool[pool].size);
  321. return adapter->rx_buff_pool[pool].skbuff[index];
  322. }
  323. /* recycle the current buffer on the rx queue */
  324. static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
  325. {
  326. u32 q_index = adapter->rx_queue.index;
  327. u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
  328. unsigned int pool = correlator >> 32;
  329. unsigned int index = correlator & 0xffffffffUL;
  330. union ibmveth_buf_desc desc;
  331. unsigned long lpar_rc;
  332. BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
  333. BUG_ON(index >= adapter->rx_buff_pool[pool].size);
  334. if (!adapter->rx_buff_pool[pool].active) {
  335. ibmveth_rxq_harvest_buffer(adapter);
  336. ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
  337. return;
  338. }
  339. desc.fields.flags_len = IBMVETH_BUF_VALID |
  340. adapter->rx_buff_pool[pool].buff_size;
  341. desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
  342. lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
  343. if (lpar_rc != H_SUCCESS) {
  344. netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
  345. "during recycle rc=%ld", lpar_rc);
  346. ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
  347. }
  348. if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
  349. adapter->rx_queue.index = 0;
  350. adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
  351. }
  352. }
  353. static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
  354. {
  355. ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
  356. if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
  357. adapter->rx_queue.index = 0;
  358. adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
  359. }
  360. }
  361. static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
  362. {
  363. int i;
  364. struct device *dev = &adapter->vdev->dev;
  365. if (adapter->buffer_list_addr != NULL) {
  366. if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
  367. dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
  368. DMA_BIDIRECTIONAL);
  369. adapter->buffer_list_dma = DMA_ERROR_CODE;
  370. }
  371. free_page((unsigned long)adapter->buffer_list_addr);
  372. adapter->buffer_list_addr = NULL;
  373. }
  374. if (adapter->filter_list_addr != NULL) {
  375. if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
  376. dma_unmap_single(dev, adapter->filter_list_dma, 4096,
  377. DMA_BIDIRECTIONAL);
  378. adapter->filter_list_dma = DMA_ERROR_CODE;
  379. }
  380. free_page((unsigned long)adapter->filter_list_addr);
  381. adapter->filter_list_addr = NULL;
  382. }
  383. if (adapter->rx_queue.queue_addr != NULL) {
  384. if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
  385. dma_unmap_single(dev,
  386. adapter->rx_queue.queue_dma,
  387. adapter->rx_queue.queue_len,
  388. DMA_BIDIRECTIONAL);
  389. adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
  390. }
  391. kfree(adapter->rx_queue.queue_addr);
  392. adapter->rx_queue.queue_addr = NULL;
  393. }
  394. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
  395. if (adapter->rx_buff_pool[i].active)
  396. ibmveth_free_buffer_pool(adapter,
  397. &adapter->rx_buff_pool[i]);
  398. if (adapter->bounce_buffer != NULL) {
  399. if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
  400. dma_unmap_single(&adapter->vdev->dev,
  401. adapter->bounce_buffer_dma,
  402. adapter->netdev->mtu + IBMVETH_BUFF_OH,
  403. DMA_BIDIRECTIONAL);
  404. adapter->bounce_buffer_dma = DMA_ERROR_CODE;
  405. }
  406. kfree(adapter->bounce_buffer);
  407. adapter->bounce_buffer = NULL;
  408. }
  409. }
  410. static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
  411. union ibmveth_buf_desc rxq_desc, u64 mac_address)
  412. {
  413. int rc, try_again = 1;
  414. /*
  415. * After a kexec the adapter will still be open, so our attempt to
  416. * open it will fail. So if we get a failure we free the adapter and
  417. * try again, but only once.
  418. */
  419. retry:
  420. rc = h_register_logical_lan(adapter->vdev->unit_address,
  421. adapter->buffer_list_dma, rxq_desc.desc,
  422. adapter->filter_list_dma, mac_address);
  423. if (rc != H_SUCCESS && try_again) {
  424. do {
  425. rc = h_free_logical_lan(adapter->vdev->unit_address);
  426. } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
  427. try_again = 0;
  428. goto retry;
  429. }
  430. return rc;
  431. }
  432. static int ibmveth_open(struct net_device *netdev)
  433. {
  434. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  435. u64 mac_address = 0;
  436. int rxq_entries = 1;
  437. unsigned long lpar_rc;
  438. int rc;
  439. union ibmveth_buf_desc rxq_desc;
  440. int i;
  441. struct device *dev;
  442. netdev_dbg(netdev, "open starting\n");
  443. napi_enable(&adapter->napi);
  444. for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
  445. rxq_entries += adapter->rx_buff_pool[i].size;
  446. adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
  447. adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
  448. if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
  449. netdev_err(netdev, "unable to allocate filter or buffer list "
  450. "pages\n");
  451. rc = -ENOMEM;
  452. goto err_out;
  453. }
  454. adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
  455. rxq_entries;
  456. adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
  457. GFP_KERNEL);
  458. if (!adapter->rx_queue.queue_addr) {
  459. netdev_err(netdev, "unable to allocate rx queue pages\n");
  460. rc = -ENOMEM;
  461. goto err_out;
  462. }
  463. dev = &adapter->vdev->dev;
  464. adapter->buffer_list_dma = dma_map_single(dev,
  465. adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
  466. adapter->filter_list_dma = dma_map_single(dev,
  467. adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
  468. adapter->rx_queue.queue_dma = dma_map_single(dev,
  469. adapter->rx_queue.queue_addr,
  470. adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
  471. if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
  472. (dma_mapping_error(dev, adapter->filter_list_dma)) ||
  473. (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
  474. netdev_err(netdev, "unable to map filter or buffer list "
  475. "pages\n");
  476. rc = -ENOMEM;
  477. goto err_out;
  478. }
  479. adapter->rx_queue.index = 0;
  480. adapter->rx_queue.num_slots = rxq_entries;
  481. adapter->rx_queue.toggle = 1;
  482. memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
  483. mac_address = mac_address >> 16;
  484. rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
  485. adapter->rx_queue.queue_len;
  486. rxq_desc.fields.address = adapter->rx_queue.queue_dma;
  487. netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
  488. netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
  489. netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
  490. h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
  491. lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
  492. if (lpar_rc != H_SUCCESS) {
  493. netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
  494. lpar_rc);
  495. netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
  496. "desc:0x%llx MAC:0x%llx\n",
  497. adapter->buffer_list_dma,
  498. adapter->filter_list_dma,
  499. rxq_desc.desc,
  500. mac_address);
  501. rc = -ENONET;
  502. goto err_out;
  503. }
  504. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
  505. if (!adapter->rx_buff_pool[i].active)
  506. continue;
  507. if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
  508. netdev_err(netdev, "unable to alloc pool\n");
  509. adapter->rx_buff_pool[i].active = 0;
  510. rc = -ENOMEM;
  511. goto err_out;
  512. }
  513. }
  514. netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
  515. rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
  516. netdev);
  517. if (rc != 0) {
  518. netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
  519. netdev->irq, rc);
  520. do {
  521. rc = h_free_logical_lan(adapter->vdev->unit_address);
  522. } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
  523. goto err_out;
  524. }
  525. adapter->bounce_buffer =
  526. kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
  527. if (!adapter->bounce_buffer) {
  528. netdev_err(netdev, "unable to allocate bounce buffer\n");
  529. rc = -ENOMEM;
  530. goto err_out_free_irq;
  531. }
  532. adapter->bounce_buffer_dma =
  533. dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
  534. netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
  535. if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
  536. netdev_err(netdev, "unable to map bounce buffer\n");
  537. rc = -ENOMEM;
  538. goto err_out_free_irq;
  539. }
  540. netdev_dbg(netdev, "initial replenish cycle\n");
  541. ibmveth_interrupt(netdev->irq, netdev);
  542. netif_start_queue(netdev);
  543. netdev_dbg(netdev, "open complete\n");
  544. return 0;
  545. err_out_free_irq:
  546. free_irq(netdev->irq, netdev);
  547. err_out:
  548. ibmveth_cleanup(adapter);
  549. napi_disable(&adapter->napi);
  550. return rc;
  551. }
  552. static int ibmveth_close(struct net_device *netdev)
  553. {
  554. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  555. long lpar_rc;
  556. netdev_dbg(netdev, "close starting\n");
  557. napi_disable(&adapter->napi);
  558. if (!adapter->pool_config)
  559. netif_stop_queue(netdev);
  560. h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
  561. do {
  562. lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
  563. } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
  564. if (lpar_rc != H_SUCCESS) {
  565. netdev_err(netdev, "h_free_logical_lan failed with %lx, "
  566. "continuing with close\n", lpar_rc);
  567. }
  568. free_irq(netdev->irq, netdev);
  569. adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
  570. 4096 - 8);
  571. ibmveth_cleanup(adapter);
  572. netdev_dbg(netdev, "close complete\n");
  573. return 0;
  574. }
  575. static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  576. {
  577. cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
  578. SUPPORTED_FIBRE);
  579. cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
  580. ADVERTISED_FIBRE);
  581. ethtool_cmd_speed_set(cmd, SPEED_1000);
  582. cmd->duplex = DUPLEX_FULL;
  583. cmd->port = PORT_FIBRE;
  584. cmd->phy_address = 0;
  585. cmd->transceiver = XCVR_INTERNAL;
  586. cmd->autoneg = AUTONEG_ENABLE;
  587. cmd->maxtxpkt = 0;
  588. cmd->maxrxpkt = 1;
  589. return 0;
  590. }
  591. static void netdev_get_drvinfo(struct net_device *dev,
  592. struct ethtool_drvinfo *info)
  593. {
  594. strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
  595. strncpy(info->version, ibmveth_driver_version,
  596. sizeof(info->version) - 1);
  597. }
  598. static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
  599. {
  600. /*
  601. * Since the ibmveth firmware interface does not have the
  602. * concept of separate tx/rx checksum offload enable, if rx
  603. * checksum is disabled we also have to disable tx checksum
  604. * offload. Once we disable rx checksum offload, we are no
  605. * longer allowed to send tx buffers that are not properly
  606. * checksummed.
  607. */
  608. if (!(features & NETIF_F_RXCSUM))
  609. features &= ~NETIF_F_ALL_CSUM;
  610. return features;
  611. }
  612. static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
  613. {
  614. struct ibmveth_adapter *adapter = netdev_priv(dev);
  615. unsigned long set_attr, clr_attr, ret_attr;
  616. unsigned long set_attr6, clr_attr6;
  617. long ret, ret6;
  618. int rc1 = 0, rc2 = 0;
  619. int restart = 0;
  620. if (netif_running(dev)) {
  621. restart = 1;
  622. adapter->pool_config = 1;
  623. ibmveth_close(dev);
  624. adapter->pool_config = 0;
  625. }
  626. set_attr = 0;
  627. clr_attr = 0;
  628. if (data) {
  629. set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
  630. set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
  631. } else {
  632. clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
  633. clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
  634. }
  635. ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
  636. if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
  637. !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
  638. (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
  639. ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
  640. set_attr, &ret_attr);
  641. if (ret != H_SUCCESS) {
  642. netdev_err(dev, "unable to change IPv4 checksum "
  643. "offload settings. %d rc=%ld\n",
  644. data, ret);
  645. ret = h_illan_attributes(adapter->vdev->unit_address,
  646. set_attr, clr_attr, &ret_attr);
  647. } else {
  648. adapter->fw_ipv4_csum_support = data;
  649. }
  650. ret6 = h_illan_attributes(adapter->vdev->unit_address,
  651. clr_attr6, set_attr6, &ret_attr);
  652. if (ret6 != H_SUCCESS) {
  653. netdev_err(dev, "unable to change IPv6 checksum "
  654. "offload settings. %d rc=%ld\n",
  655. data, ret);
  656. ret = h_illan_attributes(adapter->vdev->unit_address,
  657. set_attr6, clr_attr6,
  658. &ret_attr);
  659. } else
  660. adapter->fw_ipv6_csum_support = data;
  661. if (ret != H_SUCCESS || ret6 != H_SUCCESS)
  662. adapter->rx_csum = data;
  663. else
  664. rc1 = -EIO;
  665. } else {
  666. rc1 = -EIO;
  667. netdev_err(dev, "unable to change checksum offload settings."
  668. " %d rc=%ld ret_attr=%lx\n", data, ret,
  669. ret_attr);
  670. }
  671. if (restart)
  672. rc2 = ibmveth_open(dev);
  673. return rc1 ? rc1 : rc2;
  674. }
  675. static int ibmveth_set_features(struct net_device *dev, u32 features)
  676. {
  677. struct ibmveth_adapter *adapter = netdev_priv(dev);
  678. int rx_csum = !!(features & NETIF_F_RXCSUM);
  679. int rc;
  680. if (rx_csum == adapter->rx_csum)
  681. return 0;
  682. rc = ibmveth_set_csum_offload(dev, rx_csum);
  683. if (rc && !adapter->rx_csum)
  684. dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
  685. return rc;
  686. }
  687. static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
  688. {
  689. int i;
  690. if (stringset != ETH_SS_STATS)
  691. return;
  692. for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
  693. memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
  694. }
  695. static int ibmveth_get_sset_count(struct net_device *dev, int sset)
  696. {
  697. switch (sset) {
  698. case ETH_SS_STATS:
  699. return ARRAY_SIZE(ibmveth_stats);
  700. default:
  701. return -EOPNOTSUPP;
  702. }
  703. }
  704. static void ibmveth_get_ethtool_stats(struct net_device *dev,
  705. struct ethtool_stats *stats, u64 *data)
  706. {
  707. int i;
  708. struct ibmveth_adapter *adapter = netdev_priv(dev);
  709. for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
  710. data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
  711. }
  712. static const struct ethtool_ops netdev_ethtool_ops = {
  713. .get_drvinfo = netdev_get_drvinfo,
  714. .get_settings = netdev_get_settings,
  715. .get_link = ethtool_op_get_link,
  716. .get_strings = ibmveth_get_strings,
  717. .get_sset_count = ibmveth_get_sset_count,
  718. .get_ethtool_stats = ibmveth_get_ethtool_stats,
  719. };
  720. static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  721. {
  722. return -EOPNOTSUPP;
  723. }
  724. #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
  725. static int ibmveth_send(struct ibmveth_adapter *adapter,
  726. union ibmveth_buf_desc *descs)
  727. {
  728. unsigned long correlator;
  729. unsigned int retry_count;
  730. unsigned long ret;
  731. /*
  732. * The retry count sets a maximum for the number of broadcast and
  733. * multicast destinations within the system.
  734. */
  735. retry_count = 1024;
  736. correlator = 0;
  737. do {
  738. ret = h_send_logical_lan(adapter->vdev->unit_address,
  739. descs[0].desc, descs[1].desc,
  740. descs[2].desc, descs[3].desc,
  741. descs[4].desc, descs[5].desc,
  742. correlator, &correlator);
  743. } while ((ret == H_BUSY) && (retry_count--));
  744. if (ret != H_SUCCESS && ret != H_DROPPED) {
  745. netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
  746. "with rc=%ld\n", ret);
  747. return 1;
  748. }
  749. return 0;
  750. }
  751. static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
  752. struct net_device *netdev)
  753. {
  754. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  755. unsigned int desc_flags;
  756. union ibmveth_buf_desc descs[6];
  757. int last, i;
  758. int force_bounce = 0;
  759. /*
  760. * veth handles a maximum of 6 segments including the header, so
  761. * we have to linearize the skb if there are more than this.
  762. */
  763. if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
  764. netdev->stats.tx_dropped++;
  765. goto out;
  766. }
  767. /* veth can't checksum offload UDP */
  768. if (skb->ip_summed == CHECKSUM_PARTIAL &&
  769. ((skb->protocol == htons(ETH_P_IP) &&
  770. ip_hdr(skb)->protocol != IPPROTO_TCP) ||
  771. (skb->protocol == htons(ETH_P_IPV6) &&
  772. ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
  773. skb_checksum_help(skb)) {
  774. netdev_err(netdev, "tx: failed to checksum packet\n");
  775. netdev->stats.tx_dropped++;
  776. goto out;
  777. }
  778. desc_flags = IBMVETH_BUF_VALID;
  779. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  780. unsigned char *buf = skb_transport_header(skb) +
  781. skb->csum_offset;
  782. desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
  783. /* Need to zero out the checksum */
  784. buf[0] = 0;
  785. buf[1] = 0;
  786. }
  787. retry_bounce:
  788. memset(descs, 0, sizeof(descs));
  789. /*
  790. * If a linear packet is below the rx threshold then
  791. * copy it into the static bounce buffer. This avoids the
  792. * cost of a TCE insert and remove.
  793. */
  794. if (force_bounce || (!skb_is_nonlinear(skb) &&
  795. (skb->len < tx_copybreak))) {
  796. skb_copy_from_linear_data(skb, adapter->bounce_buffer,
  797. skb->len);
  798. descs[0].fields.flags_len = desc_flags | skb->len;
  799. descs[0].fields.address = adapter->bounce_buffer_dma;
  800. if (ibmveth_send(adapter, descs)) {
  801. adapter->tx_send_failed++;
  802. netdev->stats.tx_dropped++;
  803. } else {
  804. netdev->stats.tx_packets++;
  805. netdev->stats.tx_bytes += skb->len;
  806. }
  807. goto out;
  808. }
  809. /* Map the header */
  810. descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
  811. skb_headlen(skb),
  812. DMA_TO_DEVICE);
  813. if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
  814. goto map_failed;
  815. descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
  816. /* Map the frags */
  817. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  818. unsigned long dma_addr;
  819. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  820. dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
  821. frag->page_offset, frag->size,
  822. DMA_TO_DEVICE);
  823. if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
  824. goto map_failed_frags;
  825. descs[i+1].fields.flags_len = desc_flags | frag->size;
  826. descs[i+1].fields.address = dma_addr;
  827. }
  828. if (ibmveth_send(adapter, descs)) {
  829. adapter->tx_send_failed++;
  830. netdev->stats.tx_dropped++;
  831. } else {
  832. netdev->stats.tx_packets++;
  833. netdev->stats.tx_bytes += skb->len;
  834. }
  835. for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
  836. dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
  837. descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
  838. DMA_TO_DEVICE);
  839. out:
  840. dev_kfree_skb(skb);
  841. return NETDEV_TX_OK;
  842. map_failed_frags:
  843. last = i+1;
  844. for (i = 0; i < last; i++)
  845. dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
  846. descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
  847. DMA_TO_DEVICE);
  848. map_failed:
  849. if (!firmware_has_feature(FW_FEATURE_CMO))
  850. netdev_err(netdev, "tx: unable to map xmit buffer\n");
  851. adapter->tx_map_failed++;
  852. skb_linearize(skb);
  853. force_bounce = 1;
  854. goto retry_bounce;
  855. }
  856. static int ibmveth_poll(struct napi_struct *napi, int budget)
  857. {
  858. struct ibmveth_adapter *adapter =
  859. container_of(napi, struct ibmveth_adapter, napi);
  860. struct net_device *netdev = adapter->netdev;
  861. int frames_processed = 0;
  862. unsigned long lpar_rc;
  863. restart_poll:
  864. do {
  865. if (!ibmveth_rxq_pending_buffer(adapter))
  866. break;
  867. smp_rmb();
  868. if (!ibmveth_rxq_buffer_valid(adapter)) {
  869. wmb(); /* suggested by larson1 */
  870. adapter->rx_invalid_buffer++;
  871. netdev_dbg(netdev, "recycling invalid buffer\n");
  872. ibmveth_rxq_recycle_buffer(adapter);
  873. } else {
  874. struct sk_buff *skb, *new_skb;
  875. int length = ibmveth_rxq_frame_length(adapter);
  876. int offset = ibmveth_rxq_frame_offset(adapter);
  877. int csum_good = ibmveth_rxq_csum_good(adapter);
  878. skb = ibmveth_rxq_get_buffer(adapter);
  879. new_skb = NULL;
  880. if (length < rx_copybreak)
  881. new_skb = netdev_alloc_skb(netdev, length);
  882. if (new_skb) {
  883. skb_copy_to_linear_data(new_skb,
  884. skb->data + offset,
  885. length);
  886. if (rx_flush)
  887. ibmveth_flush_buffer(skb->data,
  888. length + offset);
  889. skb = new_skb;
  890. ibmveth_rxq_recycle_buffer(adapter);
  891. } else {
  892. ibmveth_rxq_harvest_buffer(adapter);
  893. skb_reserve(skb, offset);
  894. }
  895. skb_put(skb, length);
  896. skb->protocol = eth_type_trans(skb, netdev);
  897. if (csum_good)
  898. skb->ip_summed = CHECKSUM_UNNECESSARY;
  899. netif_receive_skb(skb); /* send it up */
  900. netdev->stats.rx_packets++;
  901. netdev->stats.rx_bytes += length;
  902. frames_processed++;
  903. }
  904. } while (frames_processed < budget);
  905. ibmveth_replenish_task(adapter);
  906. if (frames_processed < budget) {
  907. /* We think we are done - reenable interrupts,
  908. * then check once more to make sure we are done.
  909. */
  910. lpar_rc = h_vio_signal(adapter->vdev->unit_address,
  911. VIO_IRQ_ENABLE);
  912. BUG_ON(lpar_rc != H_SUCCESS);
  913. napi_complete(napi);
  914. if (ibmveth_rxq_pending_buffer(adapter) &&
  915. napi_reschedule(napi)) {
  916. lpar_rc = h_vio_signal(adapter->vdev->unit_address,
  917. VIO_IRQ_DISABLE);
  918. goto restart_poll;
  919. }
  920. }
  921. return frames_processed;
  922. }
  923. static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
  924. {
  925. struct net_device *netdev = dev_instance;
  926. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  927. unsigned long lpar_rc;
  928. if (napi_schedule_prep(&adapter->napi)) {
  929. lpar_rc = h_vio_signal(adapter->vdev->unit_address,
  930. VIO_IRQ_DISABLE);
  931. BUG_ON(lpar_rc != H_SUCCESS);
  932. __napi_schedule(&adapter->napi);
  933. }
  934. return IRQ_HANDLED;
  935. }
  936. static void ibmveth_set_multicast_list(struct net_device *netdev)
  937. {
  938. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  939. unsigned long lpar_rc;
  940. if ((netdev->flags & IFF_PROMISC) ||
  941. (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
  942. lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
  943. IbmVethMcastEnableRecv |
  944. IbmVethMcastDisableFiltering,
  945. 0);
  946. if (lpar_rc != H_SUCCESS) {
  947. netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
  948. "entering promisc mode\n", lpar_rc);
  949. }
  950. } else {
  951. struct netdev_hw_addr *ha;
  952. /* clear the filter table & disable filtering */
  953. lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
  954. IbmVethMcastEnableRecv |
  955. IbmVethMcastDisableFiltering |
  956. IbmVethMcastClearFilterTable,
  957. 0);
  958. if (lpar_rc != H_SUCCESS) {
  959. netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
  960. "attempting to clear filter table\n",
  961. lpar_rc);
  962. }
  963. /* add the addresses to the filter table */
  964. netdev_for_each_mc_addr(ha, netdev) {
  965. /* add the multicast address to the filter table */
  966. unsigned long mcast_addr = 0;
  967. memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
  968. lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
  969. IbmVethMcastAddFilter,
  970. mcast_addr);
  971. if (lpar_rc != H_SUCCESS) {
  972. netdev_err(netdev, "h_multicast_ctrl rc=%ld "
  973. "when adding an entry to the filter "
  974. "table\n", lpar_rc);
  975. }
  976. }
  977. /* re-enable filtering */
  978. lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
  979. IbmVethMcastEnableFiltering,
  980. 0);
  981. if (lpar_rc != H_SUCCESS) {
  982. netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
  983. "enabling filtering\n", lpar_rc);
  984. }
  985. }
  986. }
  987. static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
  988. {
  989. struct ibmveth_adapter *adapter = netdev_priv(dev);
  990. struct vio_dev *viodev = adapter->vdev;
  991. int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
  992. int i, rc;
  993. int need_restart = 0;
  994. if (new_mtu < IBMVETH_MIN_MTU)
  995. return -EINVAL;
  996. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
  997. if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
  998. break;
  999. if (i == IBMVETH_NUM_BUFF_POOLS)
  1000. return -EINVAL;
  1001. /* Deactivate all the buffer pools so that the next loop can activate
  1002. only the buffer pools necessary to hold the new MTU */
  1003. if (netif_running(adapter->netdev)) {
  1004. need_restart = 1;
  1005. adapter->pool_config = 1;
  1006. ibmveth_close(adapter->netdev);
  1007. adapter->pool_config = 0;
  1008. }
  1009. /* Look for an active buffer pool that can hold the new MTU */
  1010. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
  1011. adapter->rx_buff_pool[i].active = 1;
  1012. if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
  1013. dev->mtu = new_mtu;
  1014. vio_cmo_set_dev_desired(viodev,
  1015. ibmveth_get_desired_dma
  1016. (viodev));
  1017. if (need_restart) {
  1018. return ibmveth_open(adapter->netdev);
  1019. }
  1020. return 0;
  1021. }
  1022. }
  1023. if (need_restart && (rc = ibmveth_open(adapter->netdev)))
  1024. return rc;
  1025. return -EINVAL;
  1026. }
  1027. #ifdef CONFIG_NET_POLL_CONTROLLER
  1028. static void ibmveth_poll_controller(struct net_device *dev)
  1029. {
  1030. ibmveth_replenish_task(netdev_priv(dev));
  1031. ibmveth_interrupt(dev->irq, dev);
  1032. }
  1033. #endif
  1034. /**
  1035. * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
  1036. *
  1037. * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
  1038. *
  1039. * Return value:
  1040. * Number of bytes of IO data the driver will need to perform well.
  1041. */
  1042. static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
  1043. {
  1044. struct net_device *netdev = dev_get_drvdata(&vdev->dev);
  1045. struct ibmveth_adapter *adapter;
  1046. unsigned long ret;
  1047. int i;
  1048. int rxqentries = 1;
  1049. /* netdev inits at probe time along with the structures we need below*/
  1050. if (netdev == NULL)
  1051. return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
  1052. adapter = netdev_priv(netdev);
  1053. ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
  1054. ret += IOMMU_PAGE_ALIGN(netdev->mtu);
  1055. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
  1056. /* add the size of the active receive buffers */
  1057. if (adapter->rx_buff_pool[i].active)
  1058. ret +=
  1059. adapter->rx_buff_pool[i].size *
  1060. IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
  1061. buff_size);
  1062. rxqentries += adapter->rx_buff_pool[i].size;
  1063. }
  1064. /* add the size of the receive queue entries */
  1065. ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
  1066. return ret;
  1067. }
  1068. static const struct net_device_ops ibmveth_netdev_ops = {
  1069. .ndo_open = ibmveth_open,
  1070. .ndo_stop = ibmveth_close,
  1071. .ndo_start_xmit = ibmveth_start_xmit,
  1072. .ndo_set_multicast_list = ibmveth_set_multicast_list,
  1073. .ndo_do_ioctl = ibmveth_ioctl,
  1074. .ndo_change_mtu = ibmveth_change_mtu,
  1075. .ndo_fix_features = ibmveth_fix_features,
  1076. .ndo_set_features = ibmveth_set_features,
  1077. .ndo_validate_addr = eth_validate_addr,
  1078. .ndo_set_mac_address = eth_mac_addr,
  1079. #ifdef CONFIG_NET_POLL_CONTROLLER
  1080. .ndo_poll_controller = ibmveth_poll_controller,
  1081. #endif
  1082. };
  1083. static int __devinit ibmveth_probe(struct vio_dev *dev,
  1084. const struct vio_device_id *id)
  1085. {
  1086. int rc, i;
  1087. struct net_device *netdev;
  1088. struct ibmveth_adapter *adapter;
  1089. unsigned char *mac_addr_p;
  1090. unsigned int *mcastFilterSize_p;
  1091. dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
  1092. dev->unit_address);
  1093. mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
  1094. NULL);
  1095. if (!mac_addr_p) {
  1096. dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
  1097. return -EINVAL;
  1098. }
  1099. mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
  1100. VETH_MCAST_FILTER_SIZE, NULL);
  1101. if (!mcastFilterSize_p) {
  1102. dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
  1103. "attribute\n");
  1104. return -EINVAL;
  1105. }
  1106. netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
  1107. if (!netdev)
  1108. return -ENOMEM;
  1109. adapter = netdev_priv(netdev);
  1110. dev_set_drvdata(&dev->dev, netdev);
  1111. adapter->vdev = dev;
  1112. adapter->netdev = netdev;
  1113. adapter->mcastFilterSize = *mcastFilterSize_p;
  1114. adapter->pool_config = 0;
  1115. netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
  1116. /*
  1117. * Some older boxes running PHYP non-natively have an OF that returns
  1118. * a 8-byte local-mac-address field (and the first 2 bytes have to be
  1119. * ignored) while newer boxes' OF return a 6-byte field. Note that
  1120. * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
  1121. * The RPA doc specifies that the first byte must be 10b, so we'll
  1122. * just look for it to solve this 8 vs. 6 byte field issue
  1123. */
  1124. if ((*mac_addr_p & 0x3) != 0x02)
  1125. mac_addr_p += 2;
  1126. adapter->mac_addr = 0;
  1127. memcpy(&adapter->mac_addr, mac_addr_p, 6);
  1128. netdev->irq = dev->irq;
  1129. netdev->netdev_ops = &ibmveth_netdev_ops;
  1130. netdev->ethtool_ops = &netdev_ethtool_ops;
  1131. SET_NETDEV_DEV(netdev, &dev->dev);
  1132. netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
  1133. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  1134. netdev->features |= netdev->hw_features;
  1135. memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
  1136. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
  1137. struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
  1138. int error;
  1139. ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
  1140. pool_count[i], pool_size[i],
  1141. pool_active[i]);
  1142. error = kobject_init_and_add(kobj, &ktype_veth_pool,
  1143. &dev->dev.kobj, "pool%d", i);
  1144. if (!error)
  1145. kobject_uevent(kobj, KOBJ_ADD);
  1146. }
  1147. netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
  1148. adapter->buffer_list_dma = DMA_ERROR_CODE;
  1149. adapter->filter_list_dma = DMA_ERROR_CODE;
  1150. adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
  1151. netdev_dbg(netdev, "registering netdev...\n");
  1152. ibmveth_set_features(netdev, netdev->features);
  1153. rc = register_netdev(netdev);
  1154. if (rc) {
  1155. netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
  1156. free_netdev(netdev);
  1157. return rc;
  1158. }
  1159. netdev_dbg(netdev, "registered\n");
  1160. return 0;
  1161. }
  1162. static int __devexit ibmveth_remove(struct vio_dev *dev)
  1163. {
  1164. struct net_device *netdev = dev_get_drvdata(&dev->dev);
  1165. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  1166. int i;
  1167. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
  1168. kobject_put(&adapter->rx_buff_pool[i].kobj);
  1169. unregister_netdev(netdev);
  1170. free_netdev(netdev);
  1171. dev_set_drvdata(&dev->dev, NULL);
  1172. return 0;
  1173. }
  1174. static struct attribute veth_active_attr;
  1175. static struct attribute veth_num_attr;
  1176. static struct attribute veth_size_attr;
  1177. static ssize_t veth_pool_show(struct kobject *kobj,
  1178. struct attribute *attr, char *buf)
  1179. {
  1180. struct ibmveth_buff_pool *pool = container_of(kobj,
  1181. struct ibmveth_buff_pool,
  1182. kobj);
  1183. if (attr == &veth_active_attr)
  1184. return sprintf(buf, "%d\n", pool->active);
  1185. else if (attr == &veth_num_attr)
  1186. return sprintf(buf, "%d\n", pool->size);
  1187. else if (attr == &veth_size_attr)
  1188. return sprintf(buf, "%d\n", pool->buff_size);
  1189. return 0;
  1190. }
  1191. static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
  1192. const char *buf, size_t count)
  1193. {
  1194. struct ibmveth_buff_pool *pool = container_of(kobj,
  1195. struct ibmveth_buff_pool,
  1196. kobj);
  1197. struct net_device *netdev = dev_get_drvdata(
  1198. container_of(kobj->parent, struct device, kobj));
  1199. struct ibmveth_adapter *adapter = netdev_priv(netdev);
  1200. long value = simple_strtol(buf, NULL, 10);
  1201. long rc;
  1202. if (attr == &veth_active_attr) {
  1203. if (value && !pool->active) {
  1204. if (netif_running(netdev)) {
  1205. if (ibmveth_alloc_buffer_pool(pool)) {
  1206. netdev_err(netdev,
  1207. "unable to alloc pool\n");
  1208. return -ENOMEM;
  1209. }
  1210. pool->active = 1;
  1211. adapter->pool_config = 1;
  1212. ibmveth_close(netdev);
  1213. adapter->pool_config = 0;
  1214. if ((rc = ibmveth_open(netdev)))
  1215. return rc;
  1216. } else {
  1217. pool->active = 1;
  1218. }
  1219. } else if (!value && pool->active) {
  1220. int mtu = netdev->mtu + IBMVETH_BUFF_OH;
  1221. int i;
  1222. /* Make sure there is a buffer pool with buffers that
  1223. can hold a packet of the size of the MTU */
  1224. for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
  1225. if (pool == &adapter->rx_buff_pool[i])
  1226. continue;
  1227. if (!adapter->rx_buff_pool[i].active)
  1228. continue;
  1229. if (mtu <= adapter->rx_buff_pool[i].buff_size)
  1230. break;
  1231. }
  1232. if (i == IBMVETH_NUM_BUFF_POOLS) {
  1233. netdev_err(netdev, "no active pool >= MTU\n");
  1234. return -EPERM;
  1235. }
  1236. if (netif_running(netdev)) {
  1237. adapter->pool_config = 1;
  1238. ibmveth_close(netdev);
  1239. pool->active = 0;
  1240. adapter->pool_config = 0;
  1241. if ((rc = ibmveth_open(netdev)))
  1242. return rc;
  1243. }
  1244. pool->active = 0;
  1245. }
  1246. } else if (attr == &veth_num_attr) {
  1247. if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
  1248. return -EINVAL;
  1249. } else {
  1250. if (netif_running(netdev)) {
  1251. adapter->pool_config = 1;
  1252. ibmveth_close(netdev);
  1253. adapter->pool_config = 0;
  1254. pool->size = value;
  1255. if ((rc = ibmveth_open(netdev)))
  1256. return rc;
  1257. } else {
  1258. pool->size = value;
  1259. }
  1260. }
  1261. } else if (attr == &veth_size_attr) {
  1262. if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
  1263. return -EINVAL;
  1264. } else {
  1265. if (netif_running(netdev)) {
  1266. adapter->pool_config = 1;
  1267. ibmveth_close(netdev);
  1268. adapter->pool_config = 0;
  1269. pool->buff_size = value;
  1270. if ((rc = ibmveth_open(netdev)))
  1271. return rc;
  1272. } else {
  1273. pool->buff_size = value;
  1274. }
  1275. }
  1276. }
  1277. /* kick the interrupt handler to allocate/deallocate pools */
  1278. ibmveth_interrupt(netdev->irq, netdev);
  1279. return count;
  1280. }
  1281. #define ATTR(_name, _mode) \
  1282. struct attribute veth_##_name##_attr = { \
  1283. .name = __stringify(_name), .mode = _mode, \
  1284. };
  1285. static ATTR(active, 0644);
  1286. static ATTR(num, 0644);
  1287. static ATTR(size, 0644);
  1288. static struct attribute *veth_pool_attrs[] = {
  1289. &veth_active_attr,
  1290. &veth_num_attr,
  1291. &veth_size_attr,
  1292. NULL,
  1293. };
  1294. static const struct sysfs_ops veth_pool_ops = {
  1295. .show = veth_pool_show,
  1296. .store = veth_pool_store,
  1297. };
  1298. static struct kobj_type ktype_veth_pool = {
  1299. .release = NULL,
  1300. .sysfs_ops = &veth_pool_ops,
  1301. .default_attrs = veth_pool_attrs,
  1302. };
  1303. static int ibmveth_resume(struct device *dev)
  1304. {
  1305. struct net_device *netdev = dev_get_drvdata(dev);
  1306. ibmveth_interrupt(netdev->irq, netdev);
  1307. return 0;
  1308. }
  1309. static struct vio_device_id ibmveth_device_table[] __devinitdata = {
  1310. { "network", "IBM,l-lan"},
  1311. { "", "" }
  1312. };
  1313. MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
  1314. static struct dev_pm_ops ibmveth_pm_ops = {
  1315. .resume = ibmveth_resume
  1316. };
  1317. static struct vio_driver ibmveth_driver = {
  1318. .id_table = ibmveth_device_table,
  1319. .probe = ibmveth_probe,
  1320. .remove = ibmveth_remove,
  1321. .get_desired_dma = ibmveth_get_desired_dma,
  1322. .driver = {
  1323. .name = ibmveth_driver_name,
  1324. .owner = THIS_MODULE,
  1325. .pm = &ibmveth_pm_ops,
  1326. }
  1327. };
  1328. static int __init ibmveth_module_init(void)
  1329. {
  1330. printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
  1331. ibmveth_driver_string, ibmveth_driver_version);
  1332. return vio_register_driver(&ibmveth_driver);
  1333. }
  1334. static void __exit ibmveth_module_exit(void)
  1335. {
  1336. vio_unregister_driver(&ibmveth_driver);
  1337. }
  1338. module_init(ibmveth_module_init);
  1339. module_exit(ibmveth_module_exit);