socket.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752
  1. /*
  2. * File: socket.c
  3. *
  4. * Phonet sockets
  5. *
  6. * Copyright (C) 2008 Nokia Corporation.
  7. *
  8. * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
  9. * Original author: Sakari Ailus <sakari.ailus@nokia.com>
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * version 2 as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  23. * 02110-1301 USA
  24. */
  25. #include <linux/gfp.h>
  26. #include <linux/kernel.h>
  27. #include <linux/net.h>
  28. #include <linux/poll.h>
  29. #include <net/sock.h>
  30. #include <net/tcp_states.h>
  31. #include <linux/phonet.h>
  32. #include <net/phonet/phonet.h>
  33. #include <net/phonet/pep.h>
  34. #include <net/phonet/pn_dev.h>
  35. static int pn_socket_release(struct socket *sock)
  36. {
  37. struct sock *sk = sock->sk;
  38. if (sk) {
  39. sock->sk = NULL;
  40. sk->sk_prot->close(sk, 0);
  41. }
  42. return 0;
  43. }
  44. #define PN_HASHSIZE 16
  45. #define PN_HASHMASK (PN_HASHSIZE-1)
  46. static struct {
  47. struct hlist_head hlist[PN_HASHSIZE];
  48. spinlock_t lock;
  49. } pnsocks;
  50. void __init pn_sock_init(void)
  51. {
  52. unsigned i;
  53. for (i = 0; i < PN_HASHSIZE; i++)
  54. INIT_HLIST_HEAD(pnsocks.hlist + i);
  55. spin_lock_init(&pnsocks.lock);
  56. }
  57. static struct hlist_head *pn_hash_list(u16 obj)
  58. {
  59. return pnsocks.hlist + (obj & PN_HASHMASK);
  60. }
  61. /*
  62. * Find address based on socket address, match only certain fields.
  63. * Also grab sock if it was found. Remember to sock_put it later.
  64. */
  65. struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
  66. {
  67. struct hlist_node *node;
  68. struct sock *sknode;
  69. struct sock *rval = NULL;
  70. u16 obj = pn_sockaddr_get_object(spn);
  71. u8 res = spn->spn_resource;
  72. struct hlist_head *hlist = pn_hash_list(obj);
  73. spin_lock_bh(&pnsocks.lock);
  74. sk_for_each(sknode, node, hlist) {
  75. struct pn_sock *pn = pn_sk(sknode);
  76. BUG_ON(!pn->sobject); /* unbound socket */
  77. if (!net_eq(sock_net(sknode), net))
  78. continue;
  79. if (pn_port(obj)) {
  80. /* Look up socket by port */
  81. if (pn_port(pn->sobject) != pn_port(obj))
  82. continue;
  83. } else {
  84. /* If port is zero, look up by resource */
  85. if (pn->resource != res)
  86. continue;
  87. }
  88. if (pn_addr(pn->sobject) &&
  89. pn_addr(pn->sobject) != pn_addr(obj))
  90. continue;
  91. rval = sknode;
  92. sock_hold(sknode);
  93. break;
  94. }
  95. spin_unlock_bh(&pnsocks.lock);
  96. return rval;
  97. }
  98. /* Deliver a broadcast packet (only in bottom-half) */
  99. void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
  100. {
  101. struct hlist_head *hlist = pnsocks.hlist;
  102. unsigned h;
  103. spin_lock(&pnsocks.lock);
  104. for (h = 0; h < PN_HASHSIZE; h++) {
  105. struct hlist_node *node;
  106. struct sock *sknode;
  107. sk_for_each(sknode, node, hlist) {
  108. struct sk_buff *clone;
  109. if (!net_eq(sock_net(sknode), net))
  110. continue;
  111. if (!sock_flag(sknode, SOCK_BROADCAST))
  112. continue;
  113. clone = skb_clone(skb, GFP_ATOMIC);
  114. if (clone) {
  115. sock_hold(sknode);
  116. sk_receive_skb(sknode, clone, 0);
  117. }
  118. }
  119. hlist++;
  120. }
  121. spin_unlock(&pnsocks.lock);
  122. }
  123. void pn_sock_hash(struct sock *sk)
  124. {
  125. struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
  126. spin_lock_bh(&pnsocks.lock);
  127. sk_add_node(sk, hlist);
  128. spin_unlock_bh(&pnsocks.lock);
  129. }
  130. EXPORT_SYMBOL(pn_sock_hash);
  131. void pn_sock_unhash(struct sock *sk)
  132. {
  133. spin_lock_bh(&pnsocks.lock);
  134. sk_del_node_init(sk);
  135. spin_unlock_bh(&pnsocks.lock);
  136. pn_sock_unbind_all_res(sk);
  137. }
  138. EXPORT_SYMBOL(pn_sock_unhash);
  139. static DEFINE_MUTEX(port_mutex);
  140. static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
  141. {
  142. struct sock *sk = sock->sk;
  143. struct pn_sock *pn = pn_sk(sk);
  144. struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
  145. int err;
  146. u16 handle;
  147. u8 saddr;
  148. if (sk->sk_prot->bind)
  149. return sk->sk_prot->bind(sk, addr, len);
  150. if (len < sizeof(struct sockaddr_pn))
  151. return -EINVAL;
  152. if (spn->spn_family != AF_PHONET)
  153. return -EAFNOSUPPORT;
  154. handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
  155. saddr = pn_addr(handle);
  156. if (saddr && phonet_address_lookup(sock_net(sk), saddr))
  157. return -EADDRNOTAVAIL;
  158. lock_sock(sk);
  159. if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
  160. err = -EINVAL; /* attempt to rebind */
  161. goto out;
  162. }
  163. WARN_ON(sk_hashed(sk));
  164. mutex_lock(&port_mutex);
  165. err = sk->sk_prot->get_port(sk, pn_port(handle));
  166. if (err)
  167. goto out_port;
  168. /* get_port() sets the port, bind() sets the address if applicable */
  169. pn->sobject = pn_object(saddr, pn_port(pn->sobject));
  170. pn->resource = spn->spn_resource;
  171. /* Enable RX on the socket */
  172. sk->sk_prot->hash(sk);
  173. out_port:
  174. mutex_unlock(&port_mutex);
  175. out:
  176. release_sock(sk);
  177. return err;
  178. }
  179. static int pn_socket_autobind(struct socket *sock)
  180. {
  181. struct sockaddr_pn sa;
  182. int err;
  183. memset(&sa, 0, sizeof(sa));
  184. sa.spn_family = AF_PHONET;
  185. err = pn_socket_bind(sock, (struct sockaddr *)&sa,
  186. sizeof(struct sockaddr_pn));
  187. if (err != -EINVAL)
  188. return err;
  189. BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
  190. return 0; /* socket was already bound */
  191. }
  192. static int pn_socket_accept(struct socket *sock, struct socket *newsock,
  193. int flags)
  194. {
  195. struct sock *sk = sock->sk;
  196. struct sock *newsk;
  197. int err;
  198. newsk = sk->sk_prot->accept(sk, flags, &err);
  199. if (!newsk)
  200. return err;
  201. lock_sock(newsk);
  202. sock_graft(newsk, newsock);
  203. newsock->state = SS_CONNECTED;
  204. release_sock(newsk);
  205. return 0;
  206. }
  207. static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
  208. int *sockaddr_len, int peer)
  209. {
  210. struct sock *sk = sock->sk;
  211. struct pn_sock *pn = pn_sk(sk);
  212. memset(addr, 0, sizeof(struct sockaddr_pn));
  213. addr->sa_family = AF_PHONET;
  214. if (!peer) /* Race with bind() here is userland's problem. */
  215. pn_sockaddr_set_object((struct sockaddr_pn *)addr,
  216. pn->sobject);
  217. *sockaddr_len = sizeof(struct sockaddr_pn);
  218. return 0;
  219. }
  220. static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
  221. poll_table *wait)
  222. {
  223. struct sock *sk = sock->sk;
  224. struct pep_sock *pn = pep_sk(sk);
  225. unsigned int mask = 0;
  226. poll_wait(file, sk_sleep(sk), wait);
  227. switch (sk->sk_state) {
  228. case TCP_LISTEN:
  229. return hlist_empty(&pn->ackq) ? 0 : POLLIN;
  230. case TCP_CLOSE:
  231. return POLLERR;
  232. }
  233. if (!skb_queue_empty(&sk->sk_receive_queue))
  234. mask |= POLLIN | POLLRDNORM;
  235. if (!skb_queue_empty(&pn->ctrlreq_queue))
  236. mask |= POLLPRI;
  237. if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
  238. return POLLHUP;
  239. if (sk->sk_state == TCP_ESTABLISHED &&
  240. atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
  241. atomic_read(&pn->tx_credits))
  242. mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
  243. return mask;
  244. }
  245. static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
  246. unsigned long arg)
  247. {
  248. struct sock *sk = sock->sk;
  249. struct pn_sock *pn = pn_sk(sk);
  250. if (cmd == SIOCPNGETOBJECT) {
  251. struct net_device *dev;
  252. u16 handle;
  253. u8 saddr;
  254. if (get_user(handle, (__u16 __user *)arg))
  255. return -EFAULT;
  256. lock_sock(sk);
  257. if (sk->sk_bound_dev_if)
  258. dev = dev_get_by_index(sock_net(sk),
  259. sk->sk_bound_dev_if);
  260. else
  261. dev = phonet_device_get(sock_net(sk));
  262. if (dev && (dev->flags & IFF_UP))
  263. saddr = phonet_address_get(dev, pn_addr(handle));
  264. else
  265. saddr = PN_NO_ADDR;
  266. release_sock(sk);
  267. if (dev)
  268. dev_put(dev);
  269. if (saddr == PN_NO_ADDR)
  270. return -EHOSTUNREACH;
  271. handle = pn_object(saddr, pn_port(pn->sobject));
  272. return put_user(handle, (__u16 __user *)arg);
  273. }
  274. return sk->sk_prot->ioctl(sk, cmd, arg);
  275. }
  276. static int pn_socket_listen(struct socket *sock, int backlog)
  277. {
  278. struct sock *sk = sock->sk;
  279. int err = 0;
  280. if (sock->state != SS_UNCONNECTED)
  281. return -EINVAL;
  282. if (pn_socket_autobind(sock))
  283. return -ENOBUFS;
  284. lock_sock(sk);
  285. if (sk->sk_state != TCP_CLOSE) {
  286. err = -EINVAL;
  287. goto out;
  288. }
  289. sk->sk_state = TCP_LISTEN;
  290. sk->sk_ack_backlog = 0;
  291. sk->sk_max_ack_backlog = backlog;
  292. out:
  293. release_sock(sk);
  294. return err;
  295. }
  296. static int pn_socket_sendmsg(struct kiocb *iocb, struct socket *sock,
  297. struct msghdr *m, size_t total_len)
  298. {
  299. struct sock *sk = sock->sk;
  300. if (pn_socket_autobind(sock))
  301. return -EAGAIN;
  302. return sk->sk_prot->sendmsg(iocb, sk, m, total_len);
  303. }
  304. const struct proto_ops phonet_dgram_ops = {
  305. .family = AF_PHONET,
  306. .owner = THIS_MODULE,
  307. .release = pn_socket_release,
  308. .bind = pn_socket_bind,
  309. .connect = sock_no_connect,
  310. .socketpair = sock_no_socketpair,
  311. .accept = sock_no_accept,
  312. .getname = pn_socket_getname,
  313. .poll = datagram_poll,
  314. .ioctl = pn_socket_ioctl,
  315. .listen = sock_no_listen,
  316. .shutdown = sock_no_shutdown,
  317. .setsockopt = sock_no_setsockopt,
  318. .getsockopt = sock_no_getsockopt,
  319. #ifdef CONFIG_COMPAT
  320. .compat_setsockopt = sock_no_setsockopt,
  321. .compat_getsockopt = sock_no_getsockopt,
  322. #endif
  323. .sendmsg = pn_socket_sendmsg,
  324. .recvmsg = sock_common_recvmsg,
  325. .mmap = sock_no_mmap,
  326. .sendpage = sock_no_sendpage,
  327. };
  328. const struct proto_ops phonet_stream_ops = {
  329. .family = AF_PHONET,
  330. .owner = THIS_MODULE,
  331. .release = pn_socket_release,
  332. .bind = pn_socket_bind,
  333. .connect = sock_no_connect,
  334. .socketpair = sock_no_socketpair,
  335. .accept = pn_socket_accept,
  336. .getname = pn_socket_getname,
  337. .poll = pn_socket_poll,
  338. .ioctl = pn_socket_ioctl,
  339. .listen = pn_socket_listen,
  340. .shutdown = sock_no_shutdown,
  341. .setsockopt = sock_common_setsockopt,
  342. .getsockopt = sock_common_getsockopt,
  343. #ifdef CONFIG_COMPAT
  344. .compat_setsockopt = compat_sock_common_setsockopt,
  345. .compat_getsockopt = compat_sock_common_getsockopt,
  346. #endif
  347. .sendmsg = pn_socket_sendmsg,
  348. .recvmsg = sock_common_recvmsg,
  349. .mmap = sock_no_mmap,
  350. .sendpage = sock_no_sendpage,
  351. };
  352. EXPORT_SYMBOL(phonet_stream_ops);
  353. /* allocate port for a socket */
  354. int pn_sock_get_port(struct sock *sk, unsigned short sport)
  355. {
  356. static int port_cur;
  357. struct net *net = sock_net(sk);
  358. struct pn_sock *pn = pn_sk(sk);
  359. struct sockaddr_pn try_sa;
  360. struct sock *tmpsk;
  361. memset(&try_sa, 0, sizeof(struct sockaddr_pn));
  362. try_sa.spn_family = AF_PHONET;
  363. WARN_ON(!mutex_is_locked(&port_mutex));
  364. if (!sport) {
  365. /* search free port */
  366. int port, pmin, pmax;
  367. phonet_get_local_port_range(&pmin, &pmax);
  368. for (port = pmin; port <= pmax; port++) {
  369. port_cur++;
  370. if (port_cur < pmin || port_cur > pmax)
  371. port_cur = pmin;
  372. pn_sockaddr_set_port(&try_sa, port_cur);
  373. tmpsk = pn_find_sock_by_sa(net, &try_sa);
  374. if (tmpsk == NULL) {
  375. sport = port_cur;
  376. goto found;
  377. } else
  378. sock_put(tmpsk);
  379. }
  380. } else {
  381. /* try to find specific port */
  382. pn_sockaddr_set_port(&try_sa, sport);
  383. tmpsk = pn_find_sock_by_sa(net, &try_sa);
  384. if (tmpsk == NULL)
  385. /* No sock there! We can use that port... */
  386. goto found;
  387. else
  388. sock_put(tmpsk);
  389. }
  390. /* the port must be in use already */
  391. return -EADDRINUSE;
  392. found:
  393. pn->sobject = pn_object(pn_addr(pn->sobject), sport);
  394. return 0;
  395. }
  396. EXPORT_SYMBOL(pn_sock_get_port);
  397. #ifdef CONFIG_PROC_FS
  398. static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
  399. {
  400. struct net *net = seq_file_net(seq);
  401. struct hlist_head *hlist = pnsocks.hlist;
  402. struct hlist_node *node;
  403. struct sock *sknode;
  404. unsigned h;
  405. for (h = 0; h < PN_HASHSIZE; h++) {
  406. sk_for_each(sknode, node, hlist) {
  407. if (!net_eq(net, sock_net(sknode)))
  408. continue;
  409. if (!pos)
  410. return sknode;
  411. pos--;
  412. }
  413. hlist++;
  414. }
  415. return NULL;
  416. }
  417. static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
  418. {
  419. struct net *net = seq_file_net(seq);
  420. do
  421. sk = sk_next(sk);
  422. while (sk && !net_eq(net, sock_net(sk)));
  423. return sk;
  424. }
  425. static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
  426. __acquires(pnsocks.lock)
  427. {
  428. spin_lock_bh(&pnsocks.lock);
  429. return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  430. }
  431. static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  432. {
  433. struct sock *sk;
  434. if (v == SEQ_START_TOKEN)
  435. sk = pn_sock_get_idx(seq, 0);
  436. else
  437. sk = pn_sock_get_next(seq, v);
  438. (*pos)++;
  439. return sk;
  440. }
  441. static void pn_sock_seq_stop(struct seq_file *seq, void *v)
  442. __releases(pnsocks.lock)
  443. {
  444. spin_unlock_bh(&pnsocks.lock);
  445. }
  446. static int pn_sock_seq_show(struct seq_file *seq, void *v)
  447. {
  448. int len;
  449. if (v == SEQ_START_TOKEN)
  450. seq_printf(seq, "%s%n", "pt loc rem rs st tx_queue rx_queue "
  451. " uid inode ref pointer drops", &len);
  452. else {
  453. struct sock *sk = v;
  454. struct pn_sock *pn = pn_sk(sk);
  455. seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
  456. "%d %p %d%n",
  457. sk->sk_protocol, pn->sobject, 0, pn->resource,
  458. sk->sk_state,
  459. sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
  460. sock_i_uid(sk), sock_i_ino(sk),
  461. atomic_read(&sk->sk_refcnt), sk,
  462. atomic_read(&sk->sk_drops), &len);
  463. }
  464. seq_printf(seq, "%*s\n", 127 - len, "");
  465. return 0;
  466. }
  467. static const struct seq_operations pn_sock_seq_ops = {
  468. .start = pn_sock_seq_start,
  469. .next = pn_sock_seq_next,
  470. .stop = pn_sock_seq_stop,
  471. .show = pn_sock_seq_show,
  472. };
  473. static int pn_sock_open(struct inode *inode, struct file *file)
  474. {
  475. return seq_open_net(inode, file, &pn_sock_seq_ops,
  476. sizeof(struct seq_net_private));
  477. }
  478. const struct file_operations pn_sock_seq_fops = {
  479. .owner = THIS_MODULE,
  480. .open = pn_sock_open,
  481. .read = seq_read,
  482. .llseek = seq_lseek,
  483. .release = seq_release_net,
  484. };
  485. #endif
  486. static struct {
  487. struct sock *sk[256];
  488. } pnres;
  489. /*
  490. * Find and hold socket based on resource.
  491. */
  492. struct sock *pn_find_sock_by_res(struct net *net, u8 res)
  493. {
  494. struct sock *sk;
  495. if (!net_eq(net, &init_net))
  496. return NULL;
  497. rcu_read_lock();
  498. sk = rcu_dereference(pnres.sk[res]);
  499. if (sk)
  500. sock_hold(sk);
  501. rcu_read_unlock();
  502. return sk;
  503. }
  504. static DEFINE_MUTEX(resource_mutex);
  505. int pn_sock_bind_res(struct sock *sk, u8 res)
  506. {
  507. int ret = -EADDRINUSE;
  508. if (!net_eq(sock_net(sk), &init_net))
  509. return -ENOIOCTLCMD;
  510. if (!capable(CAP_SYS_ADMIN))
  511. return -EPERM;
  512. if (pn_socket_autobind(sk->sk_socket))
  513. return -EAGAIN;
  514. mutex_lock(&resource_mutex);
  515. if (pnres.sk[res] == NULL) {
  516. sock_hold(sk);
  517. rcu_assign_pointer(pnres.sk[res], sk);
  518. ret = 0;
  519. }
  520. mutex_unlock(&resource_mutex);
  521. return ret;
  522. }
  523. int pn_sock_unbind_res(struct sock *sk, u8 res)
  524. {
  525. int ret = -ENOENT;
  526. if (!capable(CAP_SYS_ADMIN))
  527. return -EPERM;
  528. mutex_lock(&resource_mutex);
  529. if (pnres.sk[res] == sk) {
  530. rcu_assign_pointer(pnres.sk[res], NULL);
  531. ret = 0;
  532. }
  533. mutex_unlock(&resource_mutex);
  534. if (ret == 0) {
  535. synchronize_rcu();
  536. sock_put(sk);
  537. }
  538. return ret;
  539. }
  540. void pn_sock_unbind_all_res(struct sock *sk)
  541. {
  542. unsigned res, match = 0;
  543. mutex_lock(&resource_mutex);
  544. for (res = 0; res < 256; res++) {
  545. if (pnres.sk[res] == sk) {
  546. rcu_assign_pointer(pnres.sk[res], NULL);
  547. match++;
  548. }
  549. }
  550. mutex_unlock(&resource_mutex);
  551. if (match == 0)
  552. return;
  553. synchronize_rcu();
  554. while (match > 0) {
  555. sock_put(sk);
  556. match--;
  557. }
  558. }
  559. #ifdef CONFIG_PROC_FS
  560. static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
  561. {
  562. struct net *net = seq_file_net(seq);
  563. unsigned i;
  564. if (!net_eq(net, &init_net))
  565. return NULL;
  566. for (i = 0; i < 256; i++) {
  567. if (pnres.sk[i] == NULL)
  568. continue;
  569. if (!pos)
  570. return pnres.sk + i;
  571. pos--;
  572. }
  573. return NULL;
  574. }
  575. static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
  576. {
  577. struct net *net = seq_file_net(seq);
  578. unsigned i;
  579. BUG_ON(!net_eq(net, &init_net));
  580. for (i = (sk - pnres.sk) + 1; i < 256; i++)
  581. if (pnres.sk[i])
  582. return pnres.sk + i;
  583. return NULL;
  584. }
  585. static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
  586. __acquires(resource_mutex)
  587. {
  588. mutex_lock(&resource_mutex);
  589. return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  590. }
  591. static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  592. {
  593. struct sock **sk;
  594. if (v == SEQ_START_TOKEN)
  595. sk = pn_res_get_idx(seq, 0);
  596. else
  597. sk = pn_res_get_next(seq, v);
  598. (*pos)++;
  599. return sk;
  600. }
  601. static void pn_res_seq_stop(struct seq_file *seq, void *v)
  602. __releases(resource_mutex)
  603. {
  604. mutex_unlock(&resource_mutex);
  605. }
  606. static int pn_res_seq_show(struct seq_file *seq, void *v)
  607. {
  608. int len;
  609. if (v == SEQ_START_TOKEN)
  610. seq_printf(seq, "%s%n", "rs uid inode", &len);
  611. else {
  612. struct sock **psk = v;
  613. struct sock *sk = *psk;
  614. seq_printf(seq, "%02X %5d %lu%n",
  615. psk - pnres.sk, sock_i_uid(sk), sock_i_ino(sk), &len);
  616. }
  617. seq_printf(seq, "%*s\n", 63 - len, "");
  618. return 0;
  619. }
  620. static const struct seq_operations pn_res_seq_ops = {
  621. .start = pn_res_seq_start,
  622. .next = pn_res_seq_next,
  623. .stop = pn_res_seq_stop,
  624. .show = pn_res_seq_show,
  625. };
  626. static int pn_res_open(struct inode *inode, struct file *file)
  627. {
  628. return seq_open_net(inode, file, &pn_res_seq_ops,
  629. sizeof(struct seq_net_private));
  630. }
  631. const struct file_operations pn_res_seq_fops = {
  632. .owner = THIS_MODULE,
  633. .open = pn_res_open,
  634. .read = seq_read,
  635. .llseek = seq_lseek,
  636. .release = seq_release_net,
  637. };
  638. #endif