tcp_fastopen.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. #include <linux/err.h>
  2. #include <linux/init.h>
  3. #include <linux/kernel.h>
  4. #include <linux/list.h>
  5. #include <linux/tcp.h>
  6. #include <linux/rcupdate.h>
  7. #include <linux/rculist.h>
  8. #include <net/inetpeer.h>
  9. #include <net/tcp.h>
  10. int sysctl_tcp_fastopen __read_mostly = TFO_CLIENT_ENABLE;
  11. struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
  12. static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
  13. void tcp_fastopen_init_key_once(bool publish)
  14. {
  15. static u8 key[TCP_FASTOPEN_KEY_LENGTH];
  16. /* tcp_fastopen_reset_cipher publishes the new context
  17. * atomically, so we allow this race happening here.
  18. *
  19. * All call sites of tcp_fastopen_cookie_gen also check
  20. * for a valid cookie, so this is an acceptable risk.
  21. */
  22. if (net_get_random_once(key, sizeof(key)) && publish)
  23. tcp_fastopen_reset_cipher(key, sizeof(key));
  24. }
  25. static void tcp_fastopen_ctx_free(struct rcu_head *head)
  26. {
  27. struct tcp_fastopen_context *ctx =
  28. container_of(head, struct tcp_fastopen_context, rcu);
  29. crypto_free_cipher(ctx->tfm);
  30. kfree(ctx);
  31. }
  32. int tcp_fastopen_reset_cipher(void *key, unsigned int len)
  33. {
  34. int err;
  35. struct tcp_fastopen_context *ctx, *octx;
  36. ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  37. if (!ctx)
  38. return -ENOMEM;
  39. ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
  40. if (IS_ERR(ctx->tfm)) {
  41. err = PTR_ERR(ctx->tfm);
  42. error: kfree(ctx);
  43. pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
  44. return err;
  45. }
  46. err = crypto_cipher_setkey(ctx->tfm, key, len);
  47. if (err) {
  48. pr_err("TCP: TFO cipher key error: %d\n", err);
  49. crypto_free_cipher(ctx->tfm);
  50. goto error;
  51. }
  52. memcpy(ctx->key, key, len);
  53. spin_lock(&tcp_fastopen_ctx_lock);
  54. octx = rcu_dereference_protected(tcp_fastopen_ctx,
  55. lockdep_is_held(&tcp_fastopen_ctx_lock));
  56. rcu_assign_pointer(tcp_fastopen_ctx, ctx);
  57. spin_unlock(&tcp_fastopen_ctx_lock);
  58. if (octx)
  59. call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
  60. return err;
  61. }
  62. /* Computes the fastopen cookie for the IP path.
  63. * The path is a 128 bits long (pad with zeros for IPv4).
  64. *
  65. * The caller must check foc->len to determine if a valid cookie
  66. * has been generated successfully.
  67. */
  68. void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
  69. struct tcp_fastopen_cookie *foc)
  70. {
  71. __be32 path[4] = { src, dst, 0, 0 };
  72. struct tcp_fastopen_context *ctx;
  73. tcp_fastopen_init_key_once(true);
  74. rcu_read_lock();
  75. ctx = rcu_dereference(tcp_fastopen_ctx);
  76. if (ctx) {
  77. crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path);
  78. foc->len = TCP_FASTOPEN_COOKIE_SIZE;
  79. }
  80. rcu_read_unlock();
  81. }