net-sysfs.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328
  1. /*
  2. * net-sysfs.c - network device class and attributes
  3. *
  4. * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/capability.h>
  12. #include <linux/kernel.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/if_arp.h>
  15. #include <linux/slab.h>
  16. #include <linux/nsproxy.h>
  17. #include <net/sock.h>
  18. #include <net/net_namespace.h>
  19. #include <linux/rtnetlink.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/export.h>
  22. #include <linux/jiffies.h>
  23. #include <linux/pm_runtime.h>
  24. #include "net-sysfs.h"
  25. #ifdef CONFIG_SYSFS
  26. static const char fmt_hex[] = "%#x\n";
  27. static const char fmt_long_hex[] = "%#lx\n";
  28. static const char fmt_dec[] = "%d\n";
  29. static const char fmt_udec[] = "%u\n";
  30. static const char fmt_ulong[] = "%lu\n";
  31. static const char fmt_u64[] = "%llu\n";
  32. static inline int dev_isalive(const struct net_device *dev)
  33. {
  34. return dev->reg_state <= NETREG_REGISTERED;
  35. }
  36. /* use same locking rules as GIF* ioctl's */
  37. static ssize_t netdev_show(const struct device *dev,
  38. struct device_attribute *attr, char *buf,
  39. ssize_t (*format)(const struct net_device *, char *))
  40. {
  41. struct net_device *net = to_net_dev(dev);
  42. ssize_t ret = -EINVAL;
  43. read_lock(&dev_base_lock);
  44. if (dev_isalive(net))
  45. ret = (*format)(net, buf);
  46. read_unlock(&dev_base_lock);
  47. return ret;
  48. }
  49. /* generate a show function for simple field */
  50. #define NETDEVICE_SHOW(field, format_string) \
  51. static ssize_t format_##field(const struct net_device *net, char *buf) \
  52. { \
  53. return sprintf(buf, format_string, net->field); \
  54. } \
  55. static ssize_t show_##field(struct device *dev, \
  56. struct device_attribute *attr, char *buf) \
  57. { \
  58. return netdev_show(dev, attr, buf, format_##field); \
  59. }
  60. /* use same locking and permission rules as SIF* ioctl's */
  61. static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
  62. const char *buf, size_t len,
  63. int (*set)(struct net_device *, unsigned long))
  64. {
  65. struct net_device *netdev = to_net_dev(dev);
  66. struct net *net = dev_net(netdev);
  67. unsigned long new;
  68. int ret = -EINVAL;
  69. if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  70. return -EPERM;
  71. ret = kstrtoul(buf, 0, &new);
  72. if (ret)
  73. goto err;
  74. if (!rtnl_trylock())
  75. return restart_syscall();
  76. if (dev_isalive(netdev)) {
  77. if ((ret = (*set)(netdev, new)) == 0)
  78. ret = len;
  79. }
  80. rtnl_unlock();
  81. err:
  82. return ret;
  83. }
  84. NETDEVICE_SHOW(dev_id, fmt_hex);
  85. NETDEVICE_SHOW(addr_assign_type, fmt_dec);
  86. NETDEVICE_SHOW(addr_len, fmt_dec);
  87. NETDEVICE_SHOW(iflink, fmt_dec);
  88. NETDEVICE_SHOW(ifindex, fmt_dec);
  89. NETDEVICE_SHOW(type, fmt_dec);
  90. NETDEVICE_SHOW(link_mode, fmt_dec);
  91. /* use same locking rules as GIFHWADDR ioctl's */
  92. static ssize_t show_address(struct device *dev, struct device_attribute *attr,
  93. char *buf)
  94. {
  95. struct net_device *net = to_net_dev(dev);
  96. ssize_t ret = -EINVAL;
  97. read_lock(&dev_base_lock);
  98. if (dev_isalive(net))
  99. ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
  100. read_unlock(&dev_base_lock);
  101. return ret;
  102. }
  103. static ssize_t show_broadcast(struct device *dev,
  104. struct device_attribute *attr, char *buf)
  105. {
  106. struct net_device *net = to_net_dev(dev);
  107. if (dev_isalive(net))
  108. return sysfs_format_mac(buf, net->broadcast, net->addr_len);
  109. return -EINVAL;
  110. }
  111. static int change_carrier(struct net_device *net, unsigned long new_carrier)
  112. {
  113. if (!netif_running(net))
  114. return -EINVAL;
  115. return dev_change_carrier(net, (bool) new_carrier);
  116. }
  117. static ssize_t store_carrier(struct device *dev, struct device_attribute *attr,
  118. const char *buf, size_t len)
  119. {
  120. return netdev_store(dev, attr, buf, len, change_carrier);
  121. }
  122. static ssize_t show_carrier(struct device *dev,
  123. struct device_attribute *attr, char *buf)
  124. {
  125. struct net_device *netdev = to_net_dev(dev);
  126. if (netif_running(netdev)) {
  127. return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
  128. }
  129. return -EINVAL;
  130. }
  131. static ssize_t show_speed(struct device *dev,
  132. struct device_attribute *attr, char *buf)
  133. {
  134. struct net_device *netdev = to_net_dev(dev);
  135. int ret = -EINVAL;
  136. if (!rtnl_trylock())
  137. return restart_syscall();
  138. if (netif_running(netdev)) {
  139. struct ethtool_cmd cmd;
  140. if (!__ethtool_get_settings(netdev, &cmd))
  141. ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
  142. }
  143. rtnl_unlock();
  144. return ret;
  145. }
  146. static ssize_t show_duplex(struct device *dev,
  147. struct device_attribute *attr, char *buf)
  148. {
  149. struct net_device *netdev = to_net_dev(dev);
  150. int ret = -EINVAL;
  151. if (!rtnl_trylock())
  152. return restart_syscall();
  153. if (netif_running(netdev)) {
  154. struct ethtool_cmd cmd;
  155. if (!__ethtool_get_settings(netdev, &cmd)) {
  156. const char *duplex;
  157. switch (cmd.duplex) {
  158. case DUPLEX_HALF:
  159. duplex = "half";
  160. break;
  161. case DUPLEX_FULL:
  162. duplex = "full";
  163. break;
  164. default:
  165. duplex = "unknown";
  166. break;
  167. }
  168. ret = sprintf(buf, "%s\n", duplex);
  169. }
  170. }
  171. rtnl_unlock();
  172. return ret;
  173. }
  174. static ssize_t show_dormant(struct device *dev,
  175. struct device_attribute *attr, char *buf)
  176. {
  177. struct net_device *netdev = to_net_dev(dev);
  178. if (netif_running(netdev))
  179. return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
  180. return -EINVAL;
  181. }
  182. static const char *const operstates[] = {
  183. "unknown",
  184. "notpresent", /* currently unused */
  185. "down",
  186. "lowerlayerdown",
  187. "testing", /* currently unused */
  188. "dormant",
  189. "up"
  190. };
  191. static ssize_t show_operstate(struct device *dev,
  192. struct device_attribute *attr, char *buf)
  193. {
  194. const struct net_device *netdev = to_net_dev(dev);
  195. unsigned char operstate;
  196. read_lock(&dev_base_lock);
  197. operstate = netdev->operstate;
  198. if (!netif_running(netdev))
  199. operstate = IF_OPER_DOWN;
  200. read_unlock(&dev_base_lock);
  201. if (operstate >= ARRAY_SIZE(operstates))
  202. return -EINVAL; /* should not happen */
  203. return sprintf(buf, "%s\n", operstates[operstate]);
  204. }
  205. /* read-write attributes */
  206. NETDEVICE_SHOW(mtu, fmt_dec);
  207. static int change_mtu(struct net_device *net, unsigned long new_mtu)
  208. {
  209. return dev_set_mtu(net, (int) new_mtu);
  210. }
  211. static ssize_t store_mtu(struct device *dev, struct device_attribute *attr,
  212. const char *buf, size_t len)
  213. {
  214. return netdev_store(dev, attr, buf, len, change_mtu);
  215. }
  216. NETDEVICE_SHOW(flags, fmt_hex);
  217. static int change_flags(struct net_device *net, unsigned long new_flags)
  218. {
  219. return dev_change_flags(net, (unsigned int) new_flags);
  220. }
  221. static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
  222. const char *buf, size_t len)
  223. {
  224. return netdev_store(dev, attr, buf, len, change_flags);
  225. }
  226. NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
  227. static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
  228. {
  229. net->tx_queue_len = new_len;
  230. return 0;
  231. }
  232. static ssize_t store_tx_queue_len(struct device *dev,
  233. struct device_attribute *attr,
  234. const char *buf, size_t len)
  235. {
  236. if (!capable(CAP_NET_ADMIN))
  237. return -EPERM;
  238. return netdev_store(dev, attr, buf, len, change_tx_queue_len);
  239. }
  240. static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
  241. const char *buf, size_t len)
  242. {
  243. struct net_device *netdev = to_net_dev(dev);
  244. struct net *net = dev_net(netdev);
  245. size_t count = len;
  246. ssize_t ret;
  247. if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  248. return -EPERM;
  249. /* ignore trailing newline */
  250. if (len > 0 && buf[len - 1] == '\n')
  251. --count;
  252. if (!rtnl_trylock())
  253. return restart_syscall();
  254. ret = dev_set_alias(netdev, buf, count);
  255. rtnl_unlock();
  256. return ret < 0 ? ret : len;
  257. }
  258. static ssize_t show_ifalias(struct device *dev,
  259. struct device_attribute *attr, char *buf)
  260. {
  261. const struct net_device *netdev = to_net_dev(dev);
  262. ssize_t ret = 0;
  263. if (!rtnl_trylock())
  264. return restart_syscall();
  265. if (netdev->ifalias)
  266. ret = sprintf(buf, "%s\n", netdev->ifalias);
  267. rtnl_unlock();
  268. return ret;
  269. }
  270. NETDEVICE_SHOW(group, fmt_dec);
  271. static int change_group(struct net_device *net, unsigned long new_group)
  272. {
  273. dev_set_group(net, (int) new_group);
  274. return 0;
  275. }
  276. static ssize_t store_group(struct device *dev, struct device_attribute *attr,
  277. const char *buf, size_t len)
  278. {
  279. return netdev_store(dev, attr, buf, len, change_group);
  280. }
  281. static struct device_attribute net_class_attributes[] = {
  282. __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
  283. __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
  284. __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
  285. __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
  286. __ATTR(iflink, S_IRUGO, show_iflink, NULL),
  287. __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
  288. __ATTR(type, S_IRUGO, show_type, NULL),
  289. __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
  290. __ATTR(address, S_IRUGO, show_address, NULL),
  291. __ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
  292. __ATTR(carrier, S_IRUGO | S_IWUSR, show_carrier, store_carrier),
  293. __ATTR(speed, S_IRUGO, show_speed, NULL),
  294. __ATTR(duplex, S_IRUGO, show_duplex, NULL),
  295. __ATTR(dormant, S_IRUGO, show_dormant, NULL),
  296. __ATTR(operstate, S_IRUGO, show_operstate, NULL),
  297. __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu),
  298. __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
  299. __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
  300. store_tx_queue_len),
  301. __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
  302. {}
  303. };
  304. /* Show a given an attribute in the statistics group */
  305. static ssize_t netstat_show(const struct device *d,
  306. struct device_attribute *attr, char *buf,
  307. unsigned long offset)
  308. {
  309. struct net_device *dev = to_net_dev(d);
  310. ssize_t ret = -EINVAL;
  311. WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
  312. offset % sizeof(u64) != 0);
  313. read_lock(&dev_base_lock);
  314. if (dev_isalive(dev)) {
  315. struct rtnl_link_stats64 temp;
  316. const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
  317. ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
  318. }
  319. read_unlock(&dev_base_lock);
  320. return ret;
  321. }
  322. /* generate a read-only statistics attribute */
  323. #define NETSTAT_ENTRY(name) \
  324. static ssize_t show_##name(struct device *d, \
  325. struct device_attribute *attr, char *buf) \
  326. { \
  327. return netstat_show(d, attr, buf, \
  328. offsetof(struct rtnl_link_stats64, name)); \
  329. } \
  330. static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
  331. NETSTAT_ENTRY(rx_packets);
  332. NETSTAT_ENTRY(tx_packets);
  333. NETSTAT_ENTRY(rx_bytes);
  334. NETSTAT_ENTRY(tx_bytes);
  335. NETSTAT_ENTRY(rx_errors);
  336. NETSTAT_ENTRY(tx_errors);
  337. NETSTAT_ENTRY(rx_dropped);
  338. NETSTAT_ENTRY(tx_dropped);
  339. NETSTAT_ENTRY(multicast);
  340. NETSTAT_ENTRY(collisions);
  341. NETSTAT_ENTRY(rx_length_errors);
  342. NETSTAT_ENTRY(rx_over_errors);
  343. NETSTAT_ENTRY(rx_crc_errors);
  344. NETSTAT_ENTRY(rx_frame_errors);
  345. NETSTAT_ENTRY(rx_fifo_errors);
  346. NETSTAT_ENTRY(rx_missed_errors);
  347. NETSTAT_ENTRY(tx_aborted_errors);
  348. NETSTAT_ENTRY(tx_carrier_errors);
  349. NETSTAT_ENTRY(tx_fifo_errors);
  350. NETSTAT_ENTRY(tx_heartbeat_errors);
  351. NETSTAT_ENTRY(tx_window_errors);
  352. NETSTAT_ENTRY(rx_compressed);
  353. NETSTAT_ENTRY(tx_compressed);
  354. static struct attribute *netstat_attrs[] = {
  355. &dev_attr_rx_packets.attr,
  356. &dev_attr_tx_packets.attr,
  357. &dev_attr_rx_bytes.attr,
  358. &dev_attr_tx_bytes.attr,
  359. &dev_attr_rx_errors.attr,
  360. &dev_attr_tx_errors.attr,
  361. &dev_attr_rx_dropped.attr,
  362. &dev_attr_tx_dropped.attr,
  363. &dev_attr_multicast.attr,
  364. &dev_attr_collisions.attr,
  365. &dev_attr_rx_length_errors.attr,
  366. &dev_attr_rx_over_errors.attr,
  367. &dev_attr_rx_crc_errors.attr,
  368. &dev_attr_rx_frame_errors.attr,
  369. &dev_attr_rx_fifo_errors.attr,
  370. &dev_attr_rx_missed_errors.attr,
  371. &dev_attr_tx_aborted_errors.attr,
  372. &dev_attr_tx_carrier_errors.attr,
  373. &dev_attr_tx_fifo_errors.attr,
  374. &dev_attr_tx_heartbeat_errors.attr,
  375. &dev_attr_tx_window_errors.attr,
  376. &dev_attr_rx_compressed.attr,
  377. &dev_attr_tx_compressed.attr,
  378. NULL
  379. };
  380. static struct attribute_group netstat_group = {
  381. .name = "statistics",
  382. .attrs = netstat_attrs,
  383. };
  384. #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
  385. static struct attribute *wireless_attrs[] = {
  386. NULL
  387. };
  388. static struct attribute_group wireless_group = {
  389. .name = "wireless",
  390. .attrs = wireless_attrs,
  391. };
  392. #endif
  393. #endif /* CONFIG_SYSFS */
  394. #ifdef CONFIG_RPS
  395. /*
  396. * RX queue sysfs structures and functions.
  397. */
  398. struct rx_queue_attribute {
  399. struct attribute attr;
  400. ssize_t (*show)(struct netdev_rx_queue *queue,
  401. struct rx_queue_attribute *attr, char *buf);
  402. ssize_t (*store)(struct netdev_rx_queue *queue,
  403. struct rx_queue_attribute *attr, const char *buf, size_t len);
  404. };
  405. #define to_rx_queue_attr(_attr) container_of(_attr, \
  406. struct rx_queue_attribute, attr)
  407. #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
  408. static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
  409. char *buf)
  410. {
  411. struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
  412. struct netdev_rx_queue *queue = to_rx_queue(kobj);
  413. if (!attribute->show)
  414. return -EIO;
  415. return attribute->show(queue, attribute, buf);
  416. }
  417. static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
  418. const char *buf, size_t count)
  419. {
  420. struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
  421. struct netdev_rx_queue *queue = to_rx_queue(kobj);
  422. if (!attribute->store)
  423. return -EIO;
  424. return attribute->store(queue, attribute, buf, count);
  425. }
  426. static const struct sysfs_ops rx_queue_sysfs_ops = {
  427. .show = rx_queue_attr_show,
  428. .store = rx_queue_attr_store,
  429. };
  430. static ssize_t show_rps_map(struct netdev_rx_queue *queue,
  431. struct rx_queue_attribute *attribute, char *buf)
  432. {
  433. struct rps_map *map;
  434. cpumask_var_t mask;
  435. size_t len = 0;
  436. int i;
  437. if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
  438. return -ENOMEM;
  439. rcu_read_lock();
  440. map = rcu_dereference(queue->rps_map);
  441. if (map)
  442. for (i = 0; i < map->len; i++)
  443. cpumask_set_cpu(map->cpus[i], mask);
  444. len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
  445. if (PAGE_SIZE - len < 3) {
  446. rcu_read_unlock();
  447. free_cpumask_var(mask);
  448. return -EINVAL;
  449. }
  450. rcu_read_unlock();
  451. free_cpumask_var(mask);
  452. len += sprintf(buf + len, "\n");
  453. return len;
  454. }
  455. static ssize_t store_rps_map(struct netdev_rx_queue *queue,
  456. struct rx_queue_attribute *attribute,
  457. const char *buf, size_t len)
  458. {
  459. struct rps_map *old_map, *map;
  460. cpumask_var_t mask;
  461. int err, cpu, i;
  462. static DEFINE_SPINLOCK(rps_map_lock);
  463. if (!capable(CAP_NET_ADMIN))
  464. return -EPERM;
  465. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  466. return -ENOMEM;
  467. err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
  468. if (err) {
  469. free_cpumask_var(mask);
  470. return err;
  471. }
  472. map = kzalloc(max_t(unsigned int,
  473. RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
  474. GFP_KERNEL);
  475. if (!map) {
  476. free_cpumask_var(mask);
  477. return -ENOMEM;
  478. }
  479. i = 0;
  480. for_each_cpu_and(cpu, mask, cpu_online_mask)
  481. map->cpus[i++] = cpu;
  482. if (i)
  483. map->len = i;
  484. else {
  485. kfree(map);
  486. map = NULL;
  487. }
  488. spin_lock(&rps_map_lock);
  489. old_map = rcu_dereference_protected(queue->rps_map,
  490. lockdep_is_held(&rps_map_lock));
  491. rcu_assign_pointer(queue->rps_map, map);
  492. spin_unlock(&rps_map_lock);
  493. if (map)
  494. static_key_slow_inc(&rps_needed);
  495. if (old_map) {
  496. kfree_rcu(old_map, rcu);
  497. static_key_slow_dec(&rps_needed);
  498. }
  499. free_cpumask_var(mask);
  500. return len;
  501. }
  502. static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
  503. struct rx_queue_attribute *attr,
  504. char *buf)
  505. {
  506. struct rps_dev_flow_table *flow_table;
  507. unsigned long val = 0;
  508. rcu_read_lock();
  509. flow_table = rcu_dereference(queue->rps_flow_table);
  510. if (flow_table)
  511. val = (unsigned long)flow_table->mask + 1;
  512. rcu_read_unlock();
  513. return sprintf(buf, "%lu\n", val);
  514. }
  515. static void rps_dev_flow_table_release_work(struct work_struct *work)
  516. {
  517. struct rps_dev_flow_table *table = container_of(work,
  518. struct rps_dev_flow_table, free_work);
  519. vfree(table);
  520. }
  521. static void rps_dev_flow_table_release(struct rcu_head *rcu)
  522. {
  523. struct rps_dev_flow_table *table = container_of(rcu,
  524. struct rps_dev_flow_table, rcu);
  525. INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
  526. schedule_work(&table->free_work);
  527. }
  528. static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
  529. struct rx_queue_attribute *attr,
  530. const char *buf, size_t len)
  531. {
  532. unsigned long mask, count;
  533. struct rps_dev_flow_table *table, *old_table;
  534. static DEFINE_SPINLOCK(rps_dev_flow_lock);
  535. int rc;
  536. if (!capable(CAP_NET_ADMIN))
  537. return -EPERM;
  538. rc = kstrtoul(buf, 0, &count);
  539. if (rc < 0)
  540. return rc;
  541. if (count) {
  542. mask = count - 1;
  543. /* mask = roundup_pow_of_two(count) - 1;
  544. * without overflows...
  545. */
  546. while ((mask | (mask >> 1)) != mask)
  547. mask |= (mask >> 1);
  548. /* On 64 bit arches, must check mask fits in table->mask (u32),
  549. * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
  550. * doesnt overflow.
  551. */
  552. #if BITS_PER_LONG > 32
  553. if (mask > (unsigned long)(u32)mask)
  554. return -EINVAL;
  555. #else
  556. if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
  557. / sizeof(struct rps_dev_flow)) {
  558. /* Enforce a limit to prevent overflow */
  559. return -EINVAL;
  560. }
  561. #endif
  562. table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
  563. if (!table)
  564. return -ENOMEM;
  565. table->mask = mask;
  566. for (count = 0; count <= mask; count++)
  567. table->flows[count].cpu = RPS_NO_CPU;
  568. } else
  569. table = NULL;
  570. spin_lock(&rps_dev_flow_lock);
  571. old_table = rcu_dereference_protected(queue->rps_flow_table,
  572. lockdep_is_held(&rps_dev_flow_lock));
  573. rcu_assign_pointer(queue->rps_flow_table, table);
  574. spin_unlock(&rps_dev_flow_lock);
  575. if (old_table)
  576. call_rcu(&old_table->rcu, rps_dev_flow_table_release);
  577. return len;
  578. }
  579. static struct rx_queue_attribute rps_cpus_attribute =
  580. __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
  581. static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
  582. __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
  583. show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
  584. static struct attribute *rx_queue_default_attrs[] = {
  585. &rps_cpus_attribute.attr,
  586. &rps_dev_flow_table_cnt_attribute.attr,
  587. NULL
  588. };
  589. static void rx_queue_release(struct kobject *kobj)
  590. {
  591. struct netdev_rx_queue *queue = to_rx_queue(kobj);
  592. struct rps_map *map;
  593. struct rps_dev_flow_table *flow_table;
  594. map = rcu_dereference_protected(queue->rps_map, 1);
  595. if (map) {
  596. RCU_INIT_POINTER(queue->rps_map, NULL);
  597. kfree_rcu(map, rcu);
  598. }
  599. flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
  600. if (flow_table) {
  601. RCU_INIT_POINTER(queue->rps_flow_table, NULL);
  602. call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
  603. }
  604. memset(kobj, 0, sizeof(*kobj));
  605. dev_put(queue->dev);
  606. }
  607. static struct kobj_type rx_queue_ktype = {
  608. .sysfs_ops = &rx_queue_sysfs_ops,
  609. .release = rx_queue_release,
  610. .default_attrs = rx_queue_default_attrs,
  611. };
  612. static int rx_queue_add_kobject(struct net_device *net, int index)
  613. {
  614. struct netdev_rx_queue *queue = net->_rx + index;
  615. struct kobject *kobj = &queue->kobj;
  616. int error = 0;
  617. kobj->kset = net->queues_kset;
  618. error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
  619. "rx-%u", index);
  620. if (error) {
  621. kobject_put(kobj);
  622. return error;
  623. }
  624. kobject_uevent(kobj, KOBJ_ADD);
  625. dev_hold(queue->dev);
  626. return error;
  627. }
  628. #endif /* CONFIG_RPS */
  629. int
  630. net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
  631. {
  632. #ifdef CONFIG_RPS
  633. int i;
  634. int error = 0;
  635. for (i = old_num; i < new_num; i++) {
  636. error = rx_queue_add_kobject(net, i);
  637. if (error) {
  638. new_num = old_num;
  639. break;
  640. }
  641. }
  642. while (--i >= new_num)
  643. kobject_put(&net->_rx[i].kobj);
  644. return error;
  645. #else
  646. return 0;
  647. #endif
  648. }
  649. #ifdef CONFIG_SYSFS
  650. /*
  651. * netdev_queue sysfs structures and functions.
  652. */
  653. struct netdev_queue_attribute {
  654. struct attribute attr;
  655. ssize_t (*show)(struct netdev_queue *queue,
  656. struct netdev_queue_attribute *attr, char *buf);
  657. ssize_t (*store)(struct netdev_queue *queue,
  658. struct netdev_queue_attribute *attr, const char *buf, size_t len);
  659. };
  660. #define to_netdev_queue_attr(_attr) container_of(_attr, \
  661. struct netdev_queue_attribute, attr)
  662. #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
  663. static ssize_t netdev_queue_attr_show(struct kobject *kobj,
  664. struct attribute *attr, char *buf)
  665. {
  666. struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
  667. struct netdev_queue *queue = to_netdev_queue(kobj);
  668. if (!attribute->show)
  669. return -EIO;
  670. return attribute->show(queue, attribute, buf);
  671. }
  672. static ssize_t netdev_queue_attr_store(struct kobject *kobj,
  673. struct attribute *attr,
  674. const char *buf, size_t count)
  675. {
  676. struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
  677. struct netdev_queue *queue = to_netdev_queue(kobj);
  678. if (!attribute->store)
  679. return -EIO;
  680. return attribute->store(queue, attribute, buf, count);
  681. }
  682. static const struct sysfs_ops netdev_queue_sysfs_ops = {
  683. .show = netdev_queue_attr_show,
  684. .store = netdev_queue_attr_store,
  685. };
  686. static ssize_t show_trans_timeout(struct netdev_queue *queue,
  687. struct netdev_queue_attribute *attribute,
  688. char *buf)
  689. {
  690. unsigned long trans_timeout;
  691. spin_lock_irq(&queue->_xmit_lock);
  692. trans_timeout = queue->trans_timeout;
  693. spin_unlock_irq(&queue->_xmit_lock);
  694. return sprintf(buf, "%lu", trans_timeout);
  695. }
  696. static struct netdev_queue_attribute queue_trans_timeout =
  697. __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
  698. #ifdef CONFIG_BQL
  699. /*
  700. * Byte queue limits sysfs structures and functions.
  701. */
  702. static ssize_t bql_show(char *buf, unsigned int value)
  703. {
  704. return sprintf(buf, "%u\n", value);
  705. }
  706. static ssize_t bql_set(const char *buf, const size_t count,
  707. unsigned int *pvalue)
  708. {
  709. unsigned int value;
  710. int err;
  711. if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
  712. value = DQL_MAX_LIMIT;
  713. else {
  714. err = kstrtouint(buf, 10, &value);
  715. if (err < 0)
  716. return err;
  717. if (value > DQL_MAX_LIMIT)
  718. return -EINVAL;
  719. }
  720. *pvalue = value;
  721. return count;
  722. }
  723. static ssize_t bql_show_hold_time(struct netdev_queue *queue,
  724. struct netdev_queue_attribute *attr,
  725. char *buf)
  726. {
  727. struct dql *dql = &queue->dql;
  728. return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
  729. }
  730. static ssize_t bql_set_hold_time(struct netdev_queue *queue,
  731. struct netdev_queue_attribute *attribute,
  732. const char *buf, size_t len)
  733. {
  734. struct dql *dql = &queue->dql;
  735. unsigned int value;
  736. int err;
  737. err = kstrtouint(buf, 10, &value);
  738. if (err < 0)
  739. return err;
  740. dql->slack_hold_time = msecs_to_jiffies(value);
  741. return len;
  742. }
  743. static struct netdev_queue_attribute bql_hold_time_attribute =
  744. __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
  745. bql_set_hold_time);
  746. static ssize_t bql_show_inflight(struct netdev_queue *queue,
  747. struct netdev_queue_attribute *attr,
  748. char *buf)
  749. {
  750. struct dql *dql = &queue->dql;
  751. return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
  752. }
  753. static struct netdev_queue_attribute bql_inflight_attribute =
  754. __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
  755. #define BQL_ATTR(NAME, FIELD) \
  756. static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
  757. struct netdev_queue_attribute *attr, \
  758. char *buf) \
  759. { \
  760. return bql_show(buf, queue->dql.FIELD); \
  761. } \
  762. \
  763. static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
  764. struct netdev_queue_attribute *attr, \
  765. const char *buf, size_t len) \
  766. { \
  767. return bql_set(buf, len, &queue->dql.FIELD); \
  768. } \
  769. \
  770. static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
  771. __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
  772. bql_set_ ## NAME);
  773. BQL_ATTR(limit, limit)
  774. BQL_ATTR(limit_max, max_limit)
  775. BQL_ATTR(limit_min, min_limit)
  776. static struct attribute *dql_attrs[] = {
  777. &bql_limit_attribute.attr,
  778. &bql_limit_max_attribute.attr,
  779. &bql_limit_min_attribute.attr,
  780. &bql_hold_time_attribute.attr,
  781. &bql_inflight_attribute.attr,
  782. NULL
  783. };
  784. static struct attribute_group dql_group = {
  785. .name = "byte_queue_limits",
  786. .attrs = dql_attrs,
  787. };
  788. #endif /* CONFIG_BQL */
  789. #ifdef CONFIG_XPS
  790. static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
  791. {
  792. struct net_device *dev = queue->dev;
  793. int i;
  794. for (i = 0; i < dev->num_tx_queues; i++)
  795. if (queue == &dev->_tx[i])
  796. break;
  797. BUG_ON(i >= dev->num_tx_queues);
  798. return i;
  799. }
  800. static ssize_t show_xps_map(struct netdev_queue *queue,
  801. struct netdev_queue_attribute *attribute, char *buf)
  802. {
  803. struct net_device *dev = queue->dev;
  804. struct xps_dev_maps *dev_maps;
  805. cpumask_var_t mask;
  806. unsigned long index;
  807. size_t len = 0;
  808. int i;
  809. if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
  810. return -ENOMEM;
  811. index = get_netdev_queue_index(queue);
  812. rcu_read_lock();
  813. dev_maps = rcu_dereference(dev->xps_maps);
  814. if (dev_maps) {
  815. for_each_possible_cpu(i) {
  816. struct xps_map *map =
  817. rcu_dereference(dev_maps->cpu_map[i]);
  818. if (map) {
  819. int j;
  820. for (j = 0; j < map->len; j++) {
  821. if (map->queues[j] == index) {
  822. cpumask_set_cpu(i, mask);
  823. break;
  824. }
  825. }
  826. }
  827. }
  828. }
  829. rcu_read_unlock();
  830. len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
  831. if (PAGE_SIZE - len < 3) {
  832. free_cpumask_var(mask);
  833. return -EINVAL;
  834. }
  835. free_cpumask_var(mask);
  836. len += sprintf(buf + len, "\n");
  837. return len;
  838. }
  839. static ssize_t store_xps_map(struct netdev_queue *queue,
  840. struct netdev_queue_attribute *attribute,
  841. const char *buf, size_t len)
  842. {
  843. struct net_device *dev = queue->dev;
  844. unsigned long index;
  845. cpumask_var_t mask;
  846. int err;
  847. if (!capable(CAP_NET_ADMIN))
  848. return -EPERM;
  849. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  850. return -ENOMEM;
  851. index = get_netdev_queue_index(queue);
  852. err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
  853. if (err) {
  854. free_cpumask_var(mask);
  855. return err;
  856. }
  857. err = netif_set_xps_queue(dev, mask, index);
  858. free_cpumask_var(mask);
  859. return err ? : len;
  860. }
  861. static struct netdev_queue_attribute xps_cpus_attribute =
  862. __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
  863. #endif /* CONFIG_XPS */
  864. static struct attribute *netdev_queue_default_attrs[] = {
  865. &queue_trans_timeout.attr,
  866. #ifdef CONFIG_XPS
  867. &xps_cpus_attribute.attr,
  868. #endif
  869. NULL
  870. };
  871. static void netdev_queue_release(struct kobject *kobj)
  872. {
  873. struct netdev_queue *queue = to_netdev_queue(kobj);
  874. memset(kobj, 0, sizeof(*kobj));
  875. dev_put(queue->dev);
  876. }
  877. static struct kobj_type netdev_queue_ktype = {
  878. .sysfs_ops = &netdev_queue_sysfs_ops,
  879. .release = netdev_queue_release,
  880. .default_attrs = netdev_queue_default_attrs,
  881. };
  882. static int netdev_queue_add_kobject(struct net_device *net, int index)
  883. {
  884. struct netdev_queue *queue = net->_tx + index;
  885. struct kobject *kobj = &queue->kobj;
  886. int error = 0;
  887. kobj->kset = net->queues_kset;
  888. error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
  889. "tx-%u", index);
  890. if (error)
  891. goto exit;
  892. #ifdef CONFIG_BQL
  893. error = sysfs_create_group(kobj, &dql_group);
  894. if (error)
  895. goto exit;
  896. #endif
  897. kobject_uevent(kobj, KOBJ_ADD);
  898. dev_hold(queue->dev);
  899. return 0;
  900. exit:
  901. kobject_put(kobj);
  902. return error;
  903. }
  904. #endif /* CONFIG_SYSFS */
  905. int
  906. netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
  907. {
  908. #ifdef CONFIG_SYSFS
  909. int i;
  910. int error = 0;
  911. for (i = old_num; i < new_num; i++) {
  912. error = netdev_queue_add_kobject(net, i);
  913. if (error) {
  914. new_num = old_num;
  915. break;
  916. }
  917. }
  918. while (--i >= new_num) {
  919. struct netdev_queue *queue = net->_tx + i;
  920. #ifdef CONFIG_BQL
  921. sysfs_remove_group(&queue->kobj, &dql_group);
  922. #endif
  923. kobject_put(&queue->kobj);
  924. }
  925. return error;
  926. #else
  927. return 0;
  928. #endif /* CONFIG_SYSFS */
  929. }
  930. static int register_queue_kobjects(struct net_device *net)
  931. {
  932. int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
  933. #ifdef CONFIG_SYSFS
  934. net->queues_kset = kset_create_and_add("queues",
  935. NULL, &net->dev.kobj);
  936. if (!net->queues_kset)
  937. return -ENOMEM;
  938. #endif
  939. #ifdef CONFIG_RPS
  940. real_rx = net->real_num_rx_queues;
  941. #endif
  942. real_tx = net->real_num_tx_queues;
  943. error = net_rx_queue_update_kobjects(net, 0, real_rx);
  944. if (error)
  945. goto error;
  946. rxq = real_rx;
  947. error = netdev_queue_update_kobjects(net, 0, real_tx);
  948. if (error)
  949. goto error;
  950. txq = real_tx;
  951. return 0;
  952. error:
  953. netdev_queue_update_kobjects(net, txq, 0);
  954. net_rx_queue_update_kobjects(net, rxq, 0);
  955. return error;
  956. }
  957. static void remove_queue_kobjects(struct net_device *net)
  958. {
  959. int real_rx = 0, real_tx = 0;
  960. #ifdef CONFIG_RPS
  961. real_rx = net->real_num_rx_queues;
  962. #endif
  963. real_tx = net->real_num_tx_queues;
  964. net_rx_queue_update_kobjects(net, real_rx, 0);
  965. netdev_queue_update_kobjects(net, real_tx, 0);
  966. #ifdef CONFIG_SYSFS
  967. kset_unregister(net->queues_kset);
  968. #endif
  969. }
  970. static void *net_grab_current_ns(void)
  971. {
  972. struct net *ns = current->nsproxy->net_ns;
  973. #ifdef CONFIG_NET_NS
  974. if (ns)
  975. atomic_inc(&ns->passive);
  976. #endif
  977. return ns;
  978. }
  979. static const void *net_initial_ns(void)
  980. {
  981. return &init_net;
  982. }
  983. static const void *net_netlink_ns(struct sock *sk)
  984. {
  985. return sock_net(sk);
  986. }
  987. struct kobj_ns_type_operations net_ns_type_operations = {
  988. .type = KOBJ_NS_TYPE_NET,
  989. .grab_current_ns = net_grab_current_ns,
  990. .netlink_ns = net_netlink_ns,
  991. .initial_ns = net_initial_ns,
  992. .drop_ns = net_drop_ns,
  993. };
  994. EXPORT_SYMBOL_GPL(net_ns_type_operations);
  995. static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
  996. {
  997. struct net_device *dev = to_net_dev(d);
  998. int retval;
  999. /* pass interface to uevent. */
  1000. retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
  1001. if (retval)
  1002. goto exit;
  1003. /* pass ifindex to uevent.
  1004. * ifindex is useful as it won't change (interface name may change)
  1005. * and is what RtNetlink uses natively. */
  1006. retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
  1007. exit:
  1008. return retval;
  1009. }
  1010. /*
  1011. * netdev_release -- destroy and free a dead device.
  1012. * Called when last reference to device kobject is gone.
  1013. */
  1014. static void netdev_release(struct device *d)
  1015. {
  1016. struct net_device *dev = to_net_dev(d);
  1017. BUG_ON(dev->reg_state != NETREG_RELEASED);
  1018. kfree(dev->ifalias);
  1019. kfree((char *)dev - dev->padded);
  1020. }
  1021. static const void *net_namespace(struct device *d)
  1022. {
  1023. struct net_device *dev;
  1024. dev = container_of(d, struct net_device, dev);
  1025. return dev_net(dev);
  1026. }
  1027. static struct class net_class = {
  1028. .name = "net",
  1029. .dev_release = netdev_release,
  1030. #ifdef CONFIG_SYSFS
  1031. .dev_attrs = net_class_attributes,
  1032. #endif /* CONFIG_SYSFS */
  1033. .dev_uevent = netdev_uevent,
  1034. .ns_type = &net_ns_type_operations,
  1035. .namespace = net_namespace,
  1036. };
  1037. /* Delete sysfs entries but hold kobject reference until after all
  1038. * netdev references are gone.
  1039. */
  1040. void netdev_unregister_kobject(struct net_device * net)
  1041. {
  1042. struct device *dev = &(net->dev);
  1043. kobject_get(&dev->kobj);
  1044. remove_queue_kobjects(net);
  1045. pm_runtime_set_memalloc_noio(dev, false);
  1046. device_del(dev);
  1047. }
  1048. /* Create sysfs entries for network device. */
  1049. int netdev_register_kobject(struct net_device *net)
  1050. {
  1051. struct device *dev = &(net->dev);
  1052. const struct attribute_group **groups = net->sysfs_groups;
  1053. int error = 0;
  1054. device_initialize(dev);
  1055. dev->class = &net_class;
  1056. dev->platform_data = net;
  1057. dev->groups = groups;
  1058. dev_set_name(dev, "%s", net->name);
  1059. #ifdef CONFIG_SYSFS
  1060. /* Allow for a device specific group */
  1061. if (*groups)
  1062. groups++;
  1063. *groups++ = &netstat_group;
  1064. #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
  1065. if (net->ieee80211_ptr)
  1066. *groups++ = &wireless_group;
  1067. #if IS_ENABLED(CONFIG_WIRELESS_EXT)
  1068. else if (net->wireless_handlers)
  1069. *groups++ = &wireless_group;
  1070. #endif
  1071. #endif
  1072. #endif /* CONFIG_SYSFS */
  1073. error = device_add(dev);
  1074. if (error)
  1075. return error;
  1076. error = register_queue_kobjects(net);
  1077. if (error) {
  1078. device_del(dev);
  1079. return error;
  1080. }
  1081. pm_runtime_set_memalloc_noio(dev, true);
  1082. return error;
  1083. }
  1084. int netdev_class_create_file(struct class_attribute *class_attr)
  1085. {
  1086. return class_create_file(&net_class, class_attr);
  1087. }
  1088. EXPORT_SYMBOL(netdev_class_create_file);
  1089. void netdev_class_remove_file(struct class_attribute *class_attr)
  1090. {
  1091. class_remove_file(&net_class, class_attr);
  1092. }
  1093. EXPORT_SYMBOL(netdev_class_remove_file);
  1094. int netdev_kobject_init(void)
  1095. {
  1096. kobj_ns_type_register(&net_ns_type_operations);
  1097. return class_register(&net_class);
  1098. }