genetlink.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070
  1. /*
  2. * NETLINK Generic Netlink Family
  3. *
  4. * Authors: Jamal Hadi Salim
  5. * Thomas Graf <tgraf@suug.ch>
  6. * Johannes Berg <johannes@sipsolutions.net>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/slab.h>
  11. #include <linux/errno.h>
  12. #include <linux/types.h>
  13. #include <linux/socket.h>
  14. #include <linux/string.h>
  15. #include <linux/skbuff.h>
  16. #include <linux/mutex.h>
  17. #include <linux/bitmap.h>
  18. #include <linux/rwsem.h>
  19. #include <net/sock.h>
  20. #include <net/genetlink.h>
  21. static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
  22. static DECLARE_RWSEM(cb_lock);
  23. void genl_lock(void)
  24. {
  25. mutex_lock(&genl_mutex);
  26. }
  27. EXPORT_SYMBOL(genl_lock);
  28. void genl_unlock(void)
  29. {
  30. mutex_unlock(&genl_mutex);
  31. }
  32. EXPORT_SYMBOL(genl_unlock);
  33. #ifdef CONFIG_LOCKDEP
  34. int lockdep_genl_is_held(void)
  35. {
  36. return lockdep_is_held(&genl_mutex);
  37. }
  38. EXPORT_SYMBOL(lockdep_genl_is_held);
  39. #endif
  40. static void genl_lock_all(void)
  41. {
  42. down_write(&cb_lock);
  43. genl_lock();
  44. }
  45. static void genl_unlock_all(void)
  46. {
  47. genl_unlock();
  48. up_write(&cb_lock);
  49. }
  50. #define GENL_FAM_TAB_SIZE 16
  51. #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
  52. static struct list_head family_ht[GENL_FAM_TAB_SIZE];
  53. /*
  54. * Bitmap of multicast groups that are currently in use.
  55. *
  56. * To avoid an allocation at boot of just one unsigned long,
  57. * declare it global instead.
  58. * Bit 0 is marked as already used since group 0 is invalid.
  59. * Bit 1 is marked as already used since the drop-monitor code
  60. * abuses the API and thinks it can statically use group 1.
  61. * That group will typically conflict with other groups that
  62. * any proper users use.
  63. * Bit 16 is marked as used since it's used for generic netlink
  64. * and the code no longer marks pre-reserved IDs as used.
  65. * Bit 17 is marked as already used since the VFS quota code
  66. * also abused this API and relied on family == group ID, we
  67. * cater to that by giving it a static family and group ID.
  68. */
  69. static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
  70. BIT(GENL_ID_VFS_DQUOT);
  71. static unsigned long *mc_groups = &mc_group_start;
  72. static unsigned long mc_groups_longs = 1;
  73. static int genl_ctrl_event(int event, struct genl_family *family,
  74. const struct genl_multicast_group *grp,
  75. int grp_id);
  76. static inline unsigned int genl_family_hash(unsigned int id)
  77. {
  78. return id & GENL_FAM_TAB_MASK;
  79. }
  80. static inline struct list_head *genl_family_chain(unsigned int id)
  81. {
  82. return &family_ht[genl_family_hash(id)];
  83. }
  84. static struct genl_family *genl_family_find_byid(unsigned int id)
  85. {
  86. struct genl_family *f;
  87. list_for_each_entry(f, genl_family_chain(id), family_list)
  88. if (f->id == id)
  89. return f;
  90. return NULL;
  91. }
  92. static struct genl_family *genl_family_find_byname(char *name)
  93. {
  94. struct genl_family *f;
  95. int i;
  96. for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
  97. list_for_each_entry(f, genl_family_chain(i), family_list)
  98. if (strcmp(f->name, name) == 0)
  99. return f;
  100. return NULL;
  101. }
  102. static const struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
  103. {
  104. int i;
  105. for (i = 0; i < family->n_ops; i++)
  106. if (family->ops[i].cmd == cmd)
  107. return &family->ops[i];
  108. return NULL;
  109. }
  110. /* Of course we are going to have problems once we hit
  111. * 2^16 alive types, but that can only happen by year 2K
  112. */
  113. static u16 genl_generate_id(void)
  114. {
  115. static u16 id_gen_idx = GENL_MIN_ID;
  116. int i;
  117. for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) {
  118. if (id_gen_idx != GENL_ID_VFS_DQUOT &&
  119. !genl_family_find_byid(id_gen_idx))
  120. return id_gen_idx;
  121. if (++id_gen_idx > GENL_MAX_ID)
  122. id_gen_idx = GENL_MIN_ID;
  123. }
  124. return 0;
  125. }
  126. static int genl_allocate_reserve_groups(int n_groups, int *first_id)
  127. {
  128. unsigned long *new_groups;
  129. int start = 0;
  130. int i;
  131. int id;
  132. bool fits;
  133. do {
  134. if (start == 0)
  135. id = find_first_zero_bit(mc_groups,
  136. mc_groups_longs *
  137. BITS_PER_LONG);
  138. else
  139. id = find_next_zero_bit(mc_groups,
  140. mc_groups_longs * BITS_PER_LONG,
  141. start);
  142. fits = true;
  143. for (i = id;
  144. i < min_t(int, id + n_groups,
  145. mc_groups_longs * BITS_PER_LONG);
  146. i++) {
  147. if (test_bit(i, mc_groups)) {
  148. start = i;
  149. fits = false;
  150. break;
  151. }
  152. }
  153. if (id >= mc_groups_longs * BITS_PER_LONG) {
  154. unsigned long new_longs = mc_groups_longs +
  155. BITS_TO_LONGS(n_groups);
  156. size_t nlen = new_longs * sizeof(unsigned long);
  157. if (mc_groups == &mc_group_start) {
  158. new_groups = kzalloc(nlen, GFP_KERNEL);
  159. if (!new_groups)
  160. return -ENOMEM;
  161. mc_groups = new_groups;
  162. *mc_groups = mc_group_start;
  163. } else {
  164. new_groups = krealloc(mc_groups, nlen,
  165. GFP_KERNEL);
  166. if (!new_groups)
  167. return -ENOMEM;
  168. mc_groups = new_groups;
  169. for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
  170. mc_groups[mc_groups_longs + i] = 0;
  171. }
  172. mc_groups_longs = new_longs;
  173. }
  174. } while (!fits);
  175. for (i = id; i < id + n_groups; i++)
  176. set_bit(i, mc_groups);
  177. *first_id = id;
  178. return 0;
  179. }
  180. static struct genl_family genl_ctrl;
  181. static int genl_validate_assign_mc_groups(struct genl_family *family)
  182. {
  183. int first_id;
  184. int n_groups = family->n_mcgrps;
  185. int err, i;
  186. bool groups_allocated = false;
  187. if (!n_groups)
  188. return 0;
  189. for (i = 0; i < n_groups; i++) {
  190. const struct genl_multicast_group *grp = &family->mcgrps[i];
  191. if (WARN_ON(grp->name[0] == '\0'))
  192. return -EINVAL;
  193. if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL))
  194. return -EINVAL;
  195. }
  196. /* special-case our own group and hacks */
  197. if (family == &genl_ctrl) {
  198. first_id = GENL_ID_CTRL;
  199. BUG_ON(n_groups != 1);
  200. } else if (strcmp(family->name, "NET_DM") == 0) {
  201. first_id = 1;
  202. BUG_ON(n_groups != 1);
  203. } else if (strcmp(family->name, "VFS_DQUOT") == 0) {
  204. first_id = GENL_ID_VFS_DQUOT;
  205. BUG_ON(n_groups != 1);
  206. } else {
  207. groups_allocated = true;
  208. err = genl_allocate_reserve_groups(n_groups, &first_id);
  209. if (err)
  210. return err;
  211. }
  212. family->mcgrp_offset = first_id;
  213. /* if still initializing, can't and don't need to to realloc bitmaps */
  214. if (!init_net.genl_sock)
  215. return 0;
  216. if (family->netnsok) {
  217. struct net *net;
  218. netlink_table_grab();
  219. rcu_read_lock();
  220. for_each_net_rcu(net) {
  221. err = __netlink_change_ngroups(net->genl_sock,
  222. mc_groups_longs * BITS_PER_LONG);
  223. if (err) {
  224. /*
  225. * No need to roll back, can only fail if
  226. * memory allocation fails and then the
  227. * number of _possible_ groups has been
  228. * increased on some sockets which is ok.
  229. */
  230. break;
  231. }
  232. }
  233. rcu_read_unlock();
  234. netlink_table_ungrab();
  235. } else {
  236. err = netlink_change_ngroups(init_net.genl_sock,
  237. mc_groups_longs * BITS_PER_LONG);
  238. }
  239. if (groups_allocated && err) {
  240. for (i = 0; i < family->n_mcgrps; i++)
  241. clear_bit(family->mcgrp_offset + i, mc_groups);
  242. }
  243. return err;
  244. }
  245. static void genl_unregister_mc_groups(struct genl_family *family)
  246. {
  247. struct net *net;
  248. int i;
  249. netlink_table_grab();
  250. rcu_read_lock();
  251. for_each_net_rcu(net) {
  252. for (i = 0; i < family->n_mcgrps; i++)
  253. __netlink_clear_multicast_users(
  254. net->genl_sock, family->mcgrp_offset + i);
  255. }
  256. rcu_read_unlock();
  257. netlink_table_ungrab();
  258. for (i = 0; i < family->n_mcgrps; i++) {
  259. int grp_id = family->mcgrp_offset + i;
  260. if (grp_id != 1)
  261. clear_bit(grp_id, mc_groups);
  262. genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
  263. &family->mcgrps[i], grp_id);
  264. }
  265. }
  266. static int genl_validate_ops(struct genl_family *family)
  267. {
  268. const struct genl_ops *ops = family->ops;
  269. unsigned int n_ops = family->n_ops;
  270. int i, j;
  271. if (WARN_ON(n_ops && !ops))
  272. return -EINVAL;
  273. if (!n_ops)
  274. return 0;
  275. for (i = 0; i < n_ops; i++) {
  276. if (ops[i].dumpit == NULL && ops[i].doit == NULL)
  277. return -EINVAL;
  278. for (j = i + 1; j < n_ops; j++)
  279. if (ops[i].cmd == ops[j].cmd)
  280. return -EINVAL;
  281. }
  282. /* family is not registered yet, so no locking needed */
  283. family->ops = ops;
  284. family->n_ops = n_ops;
  285. return 0;
  286. }
  287. /**
  288. * __genl_register_family - register a generic netlink family
  289. * @family: generic netlink family
  290. *
  291. * Registers the specified family after validating it first. Only one
  292. * family may be registered with the same family name or identifier.
  293. * The family id may equal GENL_ID_GENERATE causing an unique id to
  294. * be automatically generated and assigned.
  295. *
  296. * The family's ops array must already be assigned, you can use the
  297. * genl_register_family_with_ops() helper function.
  298. *
  299. * Return 0 on success or a negative error code.
  300. */
  301. int __genl_register_family(struct genl_family *family)
  302. {
  303. int err = -EINVAL, i;
  304. if (family->id && family->id < GENL_MIN_ID)
  305. goto errout;
  306. if (family->id > GENL_MAX_ID)
  307. goto errout;
  308. err = genl_validate_ops(family);
  309. if (err)
  310. return err;
  311. genl_lock_all();
  312. if (genl_family_find_byname(family->name)) {
  313. err = -EEXIST;
  314. goto errout_locked;
  315. }
  316. if (family->id == GENL_ID_GENERATE) {
  317. u16 newid = genl_generate_id();
  318. if (!newid) {
  319. err = -ENOMEM;
  320. goto errout_locked;
  321. }
  322. family->id = newid;
  323. } else if (genl_family_find_byid(family->id)) {
  324. err = -EEXIST;
  325. goto errout_locked;
  326. }
  327. if (family->maxattr && !family->parallel_ops) {
  328. family->attrbuf = kmalloc((family->maxattr+1) *
  329. sizeof(struct nlattr *), GFP_KERNEL);
  330. if (family->attrbuf == NULL) {
  331. err = -ENOMEM;
  332. goto errout_locked;
  333. }
  334. } else
  335. family->attrbuf = NULL;
  336. err = genl_validate_assign_mc_groups(family);
  337. if (err)
  338. goto errout_locked;
  339. list_add_tail(&family->family_list, genl_family_chain(family->id));
  340. genl_unlock_all();
  341. /* send all events */
  342. genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
  343. for (i = 0; i < family->n_mcgrps; i++)
  344. genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
  345. &family->mcgrps[i], family->mcgrp_offset + i);
  346. return 0;
  347. errout_locked:
  348. genl_unlock_all();
  349. errout:
  350. return err;
  351. }
  352. EXPORT_SYMBOL(__genl_register_family);
  353. /**
  354. * genl_unregister_family - unregister generic netlink family
  355. * @family: generic netlink family
  356. *
  357. * Unregisters the specified family.
  358. *
  359. * Returns 0 on success or a negative error code.
  360. */
  361. int genl_unregister_family(struct genl_family *family)
  362. {
  363. struct genl_family *rc;
  364. genl_lock_all();
  365. genl_unregister_mc_groups(family);
  366. list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
  367. if (family->id != rc->id || strcmp(rc->name, family->name))
  368. continue;
  369. list_del(&rc->family_list);
  370. family->n_ops = 0;
  371. genl_unlock_all();
  372. kfree(family->attrbuf);
  373. genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
  374. return 0;
  375. }
  376. genl_unlock_all();
  377. return -ENOENT;
  378. }
  379. EXPORT_SYMBOL(genl_unregister_family);
  380. /**
  381. * genlmsg_put - Add generic netlink header to netlink message
  382. * @skb: socket buffer holding the message
  383. * @portid: netlink portid the message is addressed to
  384. * @seq: sequence number (usually the one of the sender)
  385. * @family: generic netlink family
  386. * @flags: netlink message flags
  387. * @cmd: generic netlink command
  388. *
  389. * Returns pointer to user specific header
  390. */
  391. void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
  392. struct genl_family *family, int flags, u8 cmd)
  393. {
  394. struct nlmsghdr *nlh;
  395. struct genlmsghdr *hdr;
  396. nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
  397. family->hdrsize, flags);
  398. if (nlh == NULL)
  399. return NULL;
  400. hdr = nlmsg_data(nlh);
  401. hdr->cmd = cmd;
  402. hdr->version = family->version;
  403. hdr->reserved = 0;
  404. return (char *) hdr + GENL_HDRLEN;
  405. }
  406. EXPORT_SYMBOL(genlmsg_put);
  407. static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
  408. {
  409. /* our ops are always const - netlink API doesn't propagate that */
  410. const struct genl_ops *ops = cb->data;
  411. int rc;
  412. genl_lock();
  413. rc = ops->dumpit(skb, cb);
  414. genl_unlock();
  415. return rc;
  416. }
  417. static int genl_lock_done(struct netlink_callback *cb)
  418. {
  419. /* our ops are always const - netlink API doesn't propagate that */
  420. const struct genl_ops *ops = cb->data;
  421. int rc = 0;
  422. if (ops->done) {
  423. genl_lock();
  424. rc = ops->done(cb);
  425. genl_unlock();
  426. }
  427. return rc;
  428. }
  429. static int genl_family_rcv_msg(struct genl_family *family,
  430. struct sk_buff *skb,
  431. struct nlmsghdr *nlh)
  432. {
  433. const struct genl_ops *ops;
  434. struct net *net = sock_net(skb->sk);
  435. struct genl_info info;
  436. struct genlmsghdr *hdr = nlmsg_data(nlh);
  437. struct nlattr **attrbuf;
  438. int hdrlen, err;
  439. /* this family doesn't exist in this netns */
  440. if (!family->netnsok && !net_eq(net, &init_net))
  441. return -ENOENT;
  442. hdrlen = GENL_HDRLEN + family->hdrsize;
  443. if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
  444. return -EINVAL;
  445. ops = genl_get_cmd(hdr->cmd, family);
  446. if (ops == NULL)
  447. return -EOPNOTSUPP;
  448. if ((ops->flags & GENL_ADMIN_PERM) &&
  449. !capable(CAP_NET_ADMIN))
  450. return -EPERM;
  451. if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
  452. int rc;
  453. if (ops->dumpit == NULL)
  454. return -EOPNOTSUPP;
  455. if (!family->parallel_ops) {
  456. struct netlink_dump_control c = {
  457. .module = family->module,
  458. /* we have const, but the netlink API doesn't */
  459. .data = (void *)ops,
  460. .dump = genl_lock_dumpit,
  461. .done = genl_lock_done,
  462. };
  463. genl_unlock();
  464. rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
  465. genl_lock();
  466. } else {
  467. struct netlink_dump_control c = {
  468. .module = family->module,
  469. .dump = ops->dumpit,
  470. .done = ops->done,
  471. };
  472. rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
  473. }
  474. return rc;
  475. }
  476. if (ops->doit == NULL)
  477. return -EOPNOTSUPP;
  478. if (family->maxattr && family->parallel_ops) {
  479. attrbuf = kmalloc((family->maxattr+1) *
  480. sizeof(struct nlattr *), GFP_KERNEL);
  481. if (attrbuf == NULL)
  482. return -ENOMEM;
  483. } else
  484. attrbuf = family->attrbuf;
  485. if (attrbuf) {
  486. err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
  487. ops->policy);
  488. if (err < 0)
  489. goto out;
  490. }
  491. info.snd_seq = nlh->nlmsg_seq;
  492. info.snd_portid = NETLINK_CB(skb).portid;
  493. info.nlhdr = nlh;
  494. info.genlhdr = nlmsg_data(nlh);
  495. info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
  496. info.attrs = attrbuf;
  497. genl_info_net_set(&info, net);
  498. memset(&info.user_ptr, 0, sizeof(info.user_ptr));
  499. if (family->pre_doit) {
  500. err = family->pre_doit(ops, skb, &info);
  501. if (err)
  502. goto out;
  503. }
  504. err = ops->doit(skb, &info);
  505. if (family->post_doit)
  506. family->post_doit(ops, skb, &info);
  507. out:
  508. if (family->parallel_ops)
  509. kfree(attrbuf);
  510. return err;
  511. }
  512. static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
  513. {
  514. struct genl_family *family;
  515. int err;
  516. family = genl_family_find_byid(nlh->nlmsg_type);
  517. if (family == NULL)
  518. return -ENOENT;
  519. if (!family->parallel_ops)
  520. genl_lock();
  521. err = genl_family_rcv_msg(family, skb, nlh);
  522. if (!family->parallel_ops)
  523. genl_unlock();
  524. return err;
  525. }
  526. static void genl_rcv(struct sk_buff *skb)
  527. {
  528. down_read(&cb_lock);
  529. netlink_rcv_skb(skb, &genl_rcv_msg);
  530. up_read(&cb_lock);
  531. }
  532. /**************************************************************************
  533. * Controller
  534. **************************************************************************/
  535. static struct genl_family genl_ctrl = {
  536. .id = GENL_ID_CTRL,
  537. .name = "nlctrl",
  538. .version = 0x2,
  539. .maxattr = CTRL_ATTR_MAX,
  540. .netnsok = true,
  541. };
  542. static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
  543. u32 flags, struct sk_buff *skb, u8 cmd)
  544. {
  545. void *hdr;
  546. hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
  547. if (hdr == NULL)
  548. return -1;
  549. if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
  550. nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
  551. nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
  552. nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
  553. nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
  554. goto nla_put_failure;
  555. if (family->n_ops) {
  556. struct nlattr *nla_ops;
  557. int i;
  558. nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS);
  559. if (nla_ops == NULL)
  560. goto nla_put_failure;
  561. for (i = 0; i < family->n_ops; i++) {
  562. struct nlattr *nest;
  563. const struct genl_ops *ops = &family->ops[i];
  564. u32 op_flags = ops->flags;
  565. if (ops->dumpit)
  566. op_flags |= GENL_CMD_CAP_DUMP;
  567. if (ops->doit)
  568. op_flags |= GENL_CMD_CAP_DO;
  569. if (ops->policy)
  570. op_flags |= GENL_CMD_CAP_HASPOL;
  571. nest = nla_nest_start(skb, i + 1);
  572. if (nest == NULL)
  573. goto nla_put_failure;
  574. if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
  575. nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
  576. goto nla_put_failure;
  577. nla_nest_end(skb, nest);
  578. }
  579. nla_nest_end(skb, nla_ops);
  580. }
  581. if (family->n_mcgrps) {
  582. struct nlattr *nla_grps;
  583. int i;
  584. nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
  585. if (nla_grps == NULL)
  586. goto nla_put_failure;
  587. for (i = 0; i < family->n_mcgrps; i++) {
  588. struct nlattr *nest;
  589. const struct genl_multicast_group *grp;
  590. grp = &family->mcgrps[i];
  591. nest = nla_nest_start(skb, i + 1);
  592. if (nest == NULL)
  593. goto nla_put_failure;
  594. if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
  595. family->mcgrp_offset + i) ||
  596. nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
  597. grp->name))
  598. goto nla_put_failure;
  599. nla_nest_end(skb, nest);
  600. }
  601. nla_nest_end(skb, nla_grps);
  602. }
  603. return genlmsg_end(skb, hdr);
  604. nla_put_failure:
  605. genlmsg_cancel(skb, hdr);
  606. return -EMSGSIZE;
  607. }
  608. static int ctrl_fill_mcgrp_info(struct genl_family *family,
  609. const struct genl_multicast_group *grp,
  610. int grp_id, u32 portid, u32 seq, u32 flags,
  611. struct sk_buff *skb, u8 cmd)
  612. {
  613. void *hdr;
  614. struct nlattr *nla_grps;
  615. struct nlattr *nest;
  616. hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
  617. if (hdr == NULL)
  618. return -1;
  619. if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
  620. nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
  621. goto nla_put_failure;
  622. nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
  623. if (nla_grps == NULL)
  624. goto nla_put_failure;
  625. nest = nla_nest_start(skb, 1);
  626. if (nest == NULL)
  627. goto nla_put_failure;
  628. if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
  629. nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
  630. grp->name))
  631. goto nla_put_failure;
  632. nla_nest_end(skb, nest);
  633. nla_nest_end(skb, nla_grps);
  634. return genlmsg_end(skb, hdr);
  635. nla_put_failure:
  636. genlmsg_cancel(skb, hdr);
  637. return -EMSGSIZE;
  638. }
  639. static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
  640. {
  641. int i, n = 0;
  642. struct genl_family *rt;
  643. struct net *net = sock_net(skb->sk);
  644. int chains_to_skip = cb->args[0];
  645. int fams_to_skip = cb->args[1];
  646. for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
  647. n = 0;
  648. list_for_each_entry(rt, genl_family_chain(i), family_list) {
  649. if (!rt->netnsok && !net_eq(net, &init_net))
  650. continue;
  651. if (++n < fams_to_skip)
  652. continue;
  653. if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
  654. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  655. skb, CTRL_CMD_NEWFAMILY) < 0)
  656. goto errout;
  657. }
  658. fams_to_skip = 0;
  659. }
  660. errout:
  661. cb->args[0] = i;
  662. cb->args[1] = n;
  663. return skb->len;
  664. }
  665. static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
  666. u32 portid, int seq, u8 cmd)
  667. {
  668. struct sk_buff *skb;
  669. int err;
  670. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  671. if (skb == NULL)
  672. return ERR_PTR(-ENOBUFS);
  673. err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
  674. if (err < 0) {
  675. nlmsg_free(skb);
  676. return ERR_PTR(err);
  677. }
  678. return skb;
  679. }
  680. static struct sk_buff *
  681. ctrl_build_mcgrp_msg(struct genl_family *family,
  682. const struct genl_multicast_group *grp,
  683. int grp_id, u32 portid, int seq, u8 cmd)
  684. {
  685. struct sk_buff *skb;
  686. int err;
  687. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  688. if (skb == NULL)
  689. return ERR_PTR(-ENOBUFS);
  690. err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
  691. seq, 0, skb, cmd);
  692. if (err < 0) {
  693. nlmsg_free(skb);
  694. return ERR_PTR(err);
  695. }
  696. return skb;
  697. }
  698. static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = {
  699. [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
  700. [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
  701. .len = GENL_NAMSIZ - 1 },
  702. };
  703. static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
  704. {
  705. struct sk_buff *msg;
  706. struct genl_family *res = NULL;
  707. int err = -EINVAL;
  708. if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
  709. u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
  710. res = genl_family_find_byid(id);
  711. err = -ENOENT;
  712. }
  713. if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
  714. char *name;
  715. name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
  716. res = genl_family_find_byname(name);
  717. #ifdef CONFIG_MODULES
  718. if (res == NULL) {
  719. genl_unlock();
  720. up_read(&cb_lock);
  721. request_module("net-pf-%d-proto-%d-family-%s",
  722. PF_NETLINK, NETLINK_GENERIC, name);
  723. down_read(&cb_lock);
  724. genl_lock();
  725. res = genl_family_find_byname(name);
  726. }
  727. #endif
  728. err = -ENOENT;
  729. }
  730. if (res == NULL)
  731. return err;
  732. if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
  733. /* family doesn't exist here */
  734. return -ENOENT;
  735. }
  736. msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
  737. CTRL_CMD_NEWFAMILY);
  738. if (IS_ERR(msg))
  739. return PTR_ERR(msg);
  740. return genlmsg_reply(msg, info);
  741. }
  742. static int genl_ctrl_event(int event, struct genl_family *family,
  743. const struct genl_multicast_group *grp,
  744. int grp_id)
  745. {
  746. struct sk_buff *msg;
  747. /* genl is still initialising */
  748. if (!init_net.genl_sock)
  749. return 0;
  750. switch (event) {
  751. case CTRL_CMD_NEWFAMILY:
  752. case CTRL_CMD_DELFAMILY:
  753. WARN_ON(grp);
  754. msg = ctrl_build_family_msg(family, 0, 0, event);
  755. break;
  756. case CTRL_CMD_NEWMCAST_GRP:
  757. case CTRL_CMD_DELMCAST_GRP:
  758. BUG_ON(!grp);
  759. msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
  760. break;
  761. default:
  762. return -EINVAL;
  763. }
  764. if (IS_ERR(msg))
  765. return PTR_ERR(msg);
  766. if (!family->netnsok) {
  767. genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
  768. 0, GFP_KERNEL);
  769. } else {
  770. rcu_read_lock();
  771. genlmsg_multicast_allns(&genl_ctrl, msg, 0,
  772. 0, GFP_ATOMIC);
  773. rcu_read_unlock();
  774. }
  775. return 0;
  776. }
  777. static struct genl_ops genl_ctrl_ops[] = {
  778. {
  779. .cmd = CTRL_CMD_GETFAMILY,
  780. .doit = ctrl_getfamily,
  781. .dumpit = ctrl_dumpfamily,
  782. .policy = ctrl_policy,
  783. },
  784. };
  785. static struct genl_multicast_group genl_ctrl_groups[] = {
  786. { .name = "notify", },
  787. };
  788. static int __net_init genl_pernet_init(struct net *net)
  789. {
  790. struct netlink_kernel_cfg cfg = {
  791. .input = genl_rcv,
  792. .flags = NL_CFG_F_NONROOT_RECV,
  793. };
  794. /* we'll bump the group number right afterwards */
  795. net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
  796. if (!net->genl_sock && net_eq(net, &init_net))
  797. panic("GENL: Cannot initialize generic netlink\n");
  798. if (!net->genl_sock)
  799. return -ENOMEM;
  800. return 0;
  801. }
  802. static void __net_exit genl_pernet_exit(struct net *net)
  803. {
  804. netlink_kernel_release(net->genl_sock);
  805. net->genl_sock = NULL;
  806. }
  807. static struct pernet_operations genl_pernet_ops = {
  808. .init = genl_pernet_init,
  809. .exit = genl_pernet_exit,
  810. };
  811. static int __init genl_init(void)
  812. {
  813. int i, err;
  814. for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
  815. INIT_LIST_HEAD(&family_ht[i]);
  816. err = genl_register_family_with_ops_groups(&genl_ctrl, genl_ctrl_ops,
  817. genl_ctrl_groups);
  818. if (err < 0)
  819. goto problem;
  820. err = register_pernet_subsys(&genl_pernet_ops);
  821. if (err)
  822. goto problem;
  823. return 0;
  824. problem:
  825. panic("GENL: Cannot register controller: %d\n", err);
  826. }
  827. subsys_initcall(genl_init);
  828. static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
  829. gfp_t flags)
  830. {
  831. struct sk_buff *tmp;
  832. struct net *net, *prev = NULL;
  833. int err;
  834. for_each_net_rcu(net) {
  835. if (prev) {
  836. tmp = skb_clone(skb, flags);
  837. if (!tmp) {
  838. err = -ENOMEM;
  839. goto error;
  840. }
  841. err = nlmsg_multicast(prev->genl_sock, tmp,
  842. portid, group, flags);
  843. if (err)
  844. goto error;
  845. }
  846. prev = net;
  847. }
  848. return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
  849. error:
  850. kfree_skb(skb);
  851. return err;
  852. }
  853. int genlmsg_multicast_allns(struct genl_family *family, struct sk_buff *skb,
  854. u32 portid, unsigned int group, gfp_t flags)
  855. {
  856. if (WARN_ON_ONCE(group >= family->n_mcgrps))
  857. return -EINVAL;
  858. group = family->mcgrp_offset + group;
  859. return genlmsg_mcast(skb, portid, group, flags);
  860. }
  861. EXPORT_SYMBOL(genlmsg_multicast_allns);
  862. void genl_notify(struct genl_family *family,
  863. struct sk_buff *skb, struct net *net, u32 portid, u32 group,
  864. struct nlmsghdr *nlh, gfp_t flags)
  865. {
  866. struct sock *sk = net->genl_sock;
  867. int report = 0;
  868. if (nlh)
  869. report = nlmsg_report(nlh);
  870. if (WARN_ON_ONCE(group >= family->n_mcgrps))
  871. return;
  872. group = family->mcgrp_offset + group;
  873. nlmsg_notify(sk, skb, portid, group, report, flags);
  874. }
  875. EXPORT_SYMBOL(genl_notify);