ccid.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. /*
  2. * net/dccp/ccid.c
  3. *
  4. * An implementation of the DCCP protocol
  5. * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  6. *
  7. * CCID infrastructure
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include "ccid.h"
  14. static u8 builtin_ccids[] = {
  15. DCCPC_CCID2, /* CCID2 is supported by default */
  16. #if defined(CONFIG_IP_DCCP_CCID3) || defined(CONFIG_IP_DCCP_CCID3_MODULE)
  17. DCCPC_CCID3,
  18. #endif
  19. };
  20. static struct ccid_operations *ccids[CCID_MAX];
  21. #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
  22. static atomic_t ccids_lockct = ATOMIC_INIT(0);
  23. static DEFINE_SPINLOCK(ccids_lock);
  24. /*
  25. * The strategy is: modifications ccids vector are short, do not sleep and
  26. * veeery rare, but read access should be free of any exclusive locks.
  27. */
  28. static void ccids_write_lock(void)
  29. {
  30. spin_lock(&ccids_lock);
  31. while (atomic_read(&ccids_lockct) != 0) {
  32. spin_unlock(&ccids_lock);
  33. yield();
  34. spin_lock(&ccids_lock);
  35. }
  36. }
  37. static inline void ccids_write_unlock(void)
  38. {
  39. spin_unlock(&ccids_lock);
  40. }
  41. static inline void ccids_read_lock(void)
  42. {
  43. atomic_inc(&ccids_lockct);
  44. smp_mb__after_atomic_inc();
  45. spin_unlock_wait(&ccids_lock);
  46. }
  47. static inline void ccids_read_unlock(void)
  48. {
  49. atomic_dec(&ccids_lockct);
  50. }
  51. #else
  52. #define ccids_write_lock() do { } while(0)
  53. #define ccids_write_unlock() do { } while(0)
  54. #define ccids_read_lock() do { } while(0)
  55. #define ccids_read_unlock() do { } while(0)
  56. #endif
  57. static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
  58. {
  59. struct kmem_cache *slab;
  60. char slab_name_fmt[32], *slab_name;
  61. va_list args;
  62. va_start(args, fmt);
  63. vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
  64. va_end(args);
  65. slab_name = kstrdup(slab_name_fmt, GFP_KERNEL);
  66. if (slab_name == NULL)
  67. return NULL;
  68. slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0,
  69. SLAB_HWCACHE_ALIGN, NULL);
  70. if (slab == NULL)
  71. kfree(slab_name);
  72. return slab;
  73. }
  74. static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
  75. {
  76. if (slab != NULL) {
  77. const char *name = kmem_cache_name(slab);
  78. kmem_cache_destroy(slab);
  79. kfree(name);
  80. }
  81. }
  82. /* check that up to @array_len members in @ccid_array are supported */
  83. bool ccid_support_check(u8 const *ccid_array, u8 array_len)
  84. {
  85. u8 i, j, found;
  86. for (i = 0, found = 0; i < array_len; i++, found = 0) {
  87. for (j = 0; !found && j < ARRAY_SIZE(builtin_ccids); j++)
  88. found = (ccid_array[i] == builtin_ccids[j]);
  89. if (!found)
  90. return false;
  91. }
  92. return true;
  93. }
  94. /**
  95. * ccid_get_builtin_ccids - Provide copy of `builtin' CCID array
  96. * @ccid_array: pointer to copy into
  97. * @array_len: value to return length into
  98. * This function allocates memory - caller must see that it is freed after use.
  99. */
  100. int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
  101. {
  102. *ccid_array = kmemdup(builtin_ccids, sizeof(builtin_ccids), gfp_any());
  103. if (*ccid_array == NULL)
  104. return -ENOBUFS;
  105. *array_len = ARRAY_SIZE(builtin_ccids);
  106. return 0;
  107. }
  108. int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
  109. char __user *optval, int __user *optlen)
  110. {
  111. if (len < sizeof(builtin_ccids))
  112. return -EINVAL;
  113. if (put_user(sizeof(builtin_ccids), optlen) ||
  114. copy_to_user(optval, builtin_ccids, sizeof(builtin_ccids)))
  115. return -EFAULT;
  116. return 0;
  117. }
  118. int ccid_register(struct ccid_operations *ccid_ops)
  119. {
  120. int err = -ENOBUFS;
  121. ccid_ops->ccid_hc_rx_slab =
  122. ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
  123. "ccid%u_hc_rx_sock",
  124. ccid_ops->ccid_id);
  125. if (ccid_ops->ccid_hc_rx_slab == NULL)
  126. goto out;
  127. ccid_ops->ccid_hc_tx_slab =
  128. ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
  129. "ccid%u_hc_tx_sock",
  130. ccid_ops->ccid_id);
  131. if (ccid_ops->ccid_hc_tx_slab == NULL)
  132. goto out_free_rx_slab;
  133. ccids_write_lock();
  134. err = -EEXIST;
  135. if (ccids[ccid_ops->ccid_id] == NULL) {
  136. ccids[ccid_ops->ccid_id] = ccid_ops;
  137. err = 0;
  138. }
  139. ccids_write_unlock();
  140. if (err != 0)
  141. goto out_free_tx_slab;
  142. pr_info("CCID: Registered CCID %d (%s)\n",
  143. ccid_ops->ccid_id, ccid_ops->ccid_name);
  144. out:
  145. return err;
  146. out_free_tx_slab:
  147. ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
  148. ccid_ops->ccid_hc_tx_slab = NULL;
  149. goto out;
  150. out_free_rx_slab:
  151. ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
  152. ccid_ops->ccid_hc_rx_slab = NULL;
  153. goto out;
  154. }
  155. EXPORT_SYMBOL_GPL(ccid_register);
  156. int ccid_unregister(struct ccid_operations *ccid_ops)
  157. {
  158. ccids_write_lock();
  159. ccids[ccid_ops->ccid_id] = NULL;
  160. ccids_write_unlock();
  161. ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
  162. ccid_ops->ccid_hc_tx_slab = NULL;
  163. ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
  164. ccid_ops->ccid_hc_rx_slab = NULL;
  165. pr_info("CCID: Unregistered CCID %d (%s)\n",
  166. ccid_ops->ccid_id, ccid_ops->ccid_name);
  167. return 0;
  168. }
  169. EXPORT_SYMBOL_GPL(ccid_unregister);
  170. struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp)
  171. {
  172. struct ccid_operations *ccid_ops;
  173. struct ccid *ccid = NULL;
  174. ccids_read_lock();
  175. #ifdef CONFIG_MODULES
  176. if (ccids[id] == NULL) {
  177. /* We only try to load if in process context */
  178. ccids_read_unlock();
  179. if (gfp & GFP_ATOMIC)
  180. goto out;
  181. request_module("net-dccp-ccid-%d", id);
  182. ccids_read_lock();
  183. }
  184. #endif
  185. ccid_ops = ccids[id];
  186. if (ccid_ops == NULL)
  187. goto out_unlock;
  188. if (!try_module_get(ccid_ops->ccid_owner))
  189. goto out_unlock;
  190. ccids_read_unlock();
  191. ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
  192. ccid_ops->ccid_hc_tx_slab, gfp);
  193. if (ccid == NULL)
  194. goto out_module_put;
  195. ccid->ccid_ops = ccid_ops;
  196. if (rx) {
  197. memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size);
  198. if (ccid->ccid_ops->ccid_hc_rx_init != NULL &&
  199. ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0)
  200. goto out_free_ccid;
  201. } else {
  202. memset(ccid + 1, 0, ccid_ops->ccid_hc_tx_obj_size);
  203. if (ccid->ccid_ops->ccid_hc_tx_init != NULL &&
  204. ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0)
  205. goto out_free_ccid;
  206. }
  207. out:
  208. return ccid;
  209. out_unlock:
  210. ccids_read_unlock();
  211. goto out;
  212. out_free_ccid:
  213. kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab :
  214. ccid_ops->ccid_hc_tx_slab, ccid);
  215. ccid = NULL;
  216. out_module_put:
  217. module_put(ccid_ops->ccid_owner);
  218. goto out;
  219. }
  220. EXPORT_SYMBOL_GPL(ccid_new);
  221. static void ccid_delete(struct ccid *ccid, struct sock *sk, int rx)
  222. {
  223. struct ccid_operations *ccid_ops;
  224. if (ccid == NULL)
  225. return;
  226. ccid_ops = ccid->ccid_ops;
  227. if (rx) {
  228. if (ccid_ops->ccid_hc_rx_exit != NULL)
  229. ccid_ops->ccid_hc_rx_exit(sk);
  230. kmem_cache_free(ccid_ops->ccid_hc_rx_slab, ccid);
  231. } else {
  232. if (ccid_ops->ccid_hc_tx_exit != NULL)
  233. ccid_ops->ccid_hc_tx_exit(sk);
  234. kmem_cache_free(ccid_ops->ccid_hc_tx_slab, ccid);
  235. }
  236. ccids_read_lock();
  237. if (ccids[ccid_ops->ccid_id] != NULL)
  238. module_put(ccid_ops->ccid_owner);
  239. ccids_read_unlock();
  240. }
  241. void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)
  242. {
  243. ccid_delete(ccid, sk, 1);
  244. }
  245. EXPORT_SYMBOL_GPL(ccid_hc_rx_delete);
  246. void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
  247. {
  248. ccid_delete(ccid, sk, 0);
  249. }
  250. EXPORT_SYMBOL_GPL(ccid_hc_tx_delete);