rt2x00crypto.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
  3. <http://rt2x00.serialmonkey.com>
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the
  14. Free Software Foundation, Inc.,
  15. 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  16. */
  17. /*
  18. Module: rt2x00lib
  19. Abstract: rt2x00 crypto specific routines.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include "rt2x00.h"
  24. #include "rt2x00lib.h"
  25. enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
  26. {
  27. switch (key->alg) {
  28. case ALG_WEP:
  29. if (key->keylen == LEN_WEP40)
  30. return CIPHER_WEP64;
  31. else
  32. return CIPHER_WEP128;
  33. case ALG_TKIP:
  34. return CIPHER_TKIP;
  35. case ALG_CCMP:
  36. return CIPHER_AES;
  37. default:
  38. return CIPHER_NONE;
  39. }
  40. }
  41. void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
  42. struct txentry_desc *txdesc)
  43. {
  44. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  45. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
  46. struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
  47. if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) ||
  48. !hw_key || entry->skb->do_not_encrypt)
  49. return;
  50. __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
  51. txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key);
  52. if (hw_key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
  53. __set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags);
  54. txdesc->key_idx = hw_key->hw_key_idx;
  55. txdesc->iv_offset = ieee80211_get_hdrlen_from_skb(entry->skb);
  56. if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
  57. __set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags);
  58. if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
  59. __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags);
  60. }
  61. unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
  62. struct sk_buff *skb)
  63. {
  64. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  65. struct ieee80211_key_conf *key = tx_info->control.hw_key;
  66. unsigned int overhead = 0;
  67. if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) ||
  68. !key || skb->do_not_encrypt)
  69. return overhead;
  70. /*
  71. * Extend frame length to include IV/EIV/ICV/MMIC,
  72. * note that these lengths should only be added when
  73. * mac80211 does not generate it.
  74. */
  75. overhead += key->icv_len;
  76. if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
  77. overhead += key->iv_len;
  78. if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
  79. if (key->alg == ALG_TKIP)
  80. overhead += 8;
  81. }
  82. return overhead;
  83. }
  84. void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, unsigned int iv_len)
  85. {
  86. struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
  87. unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
  88. if (unlikely(!iv_len))
  89. return;
  90. /* Copy IV/EIV data */
  91. memcpy(skbdesc->iv, skb->data + header_length, iv_len);
  92. }
  93. void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len)
  94. {
  95. struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
  96. unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
  97. if (unlikely(!iv_len))
  98. return;
  99. /* Copy IV/EIV data */
  100. memcpy(skbdesc->iv, skb->data + header_length, iv_len);
  101. /* Move ieee80211 header */
  102. memmove(skb->data + iv_len, skb->data, header_length);
  103. /* Pull buffer to correct size */
  104. skb_pull(skb, iv_len);
  105. /* IV/EIV data has officially be stripped */
  106. skbdesc->flags |= FRAME_DESC_IV_STRIPPED;
  107. }
  108. void rt2x00crypto_tx_insert_iv(struct sk_buff *skb)
  109. {
  110. struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
  111. unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
  112. const unsigned int iv_len =
  113. ((!!(skbdesc->iv[0])) * 4) + ((!!(skbdesc->iv[1])) * 4);
  114. if (!(skbdesc->flags & FRAME_DESC_IV_STRIPPED))
  115. return;
  116. skb_push(skb, iv_len);
  117. /* Move ieee80211 header */
  118. memmove(skb->data, skb->data + iv_len, header_length);
  119. /* Copy IV/EIV data */
  120. memcpy(skb->data + header_length, skbdesc->iv, iv_len);
  121. /* IV/EIV data has returned into the frame */
  122. skbdesc->flags &= ~FRAME_DESC_IV_STRIPPED;
  123. }
  124. void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align,
  125. unsigned int header_length,
  126. struct rxdone_entry_desc *rxdesc)
  127. {
  128. unsigned int payload_len = rxdesc->size - header_length;
  129. unsigned int iv_len;
  130. unsigned int icv_len;
  131. unsigned int transfer = 0;
  132. /*
  133. * WEP64/WEP128: Provides IV & ICV
  134. * TKIP: Provides IV/EIV & ICV
  135. * AES: Provies IV/EIV & ICV
  136. */
  137. switch (rxdesc->cipher) {
  138. case CIPHER_WEP64:
  139. case CIPHER_WEP128:
  140. iv_len = 4;
  141. icv_len = 4;
  142. break;
  143. case CIPHER_TKIP:
  144. iv_len = 8;
  145. icv_len = 4;
  146. break;
  147. case CIPHER_AES:
  148. iv_len = 8;
  149. icv_len = 8;
  150. break;
  151. default:
  152. /* Unsupport type */
  153. return;
  154. }
  155. /*
  156. * Make room for new data, note that we increase both
  157. * headsize and tailsize when required. The tailsize is
  158. * only needed when ICV data needs to be inserted and
  159. * the padding is smaller than the ICV data.
  160. * When alignment requirements is greater than the
  161. * ICV data we must trim the skb to the correct size
  162. * because we need to remove the extra bytes.
  163. */
  164. skb_push(skb, iv_len + align);
  165. if (align < icv_len)
  166. skb_put(skb, icv_len - align);
  167. else if (align > icv_len)
  168. skb_trim(skb, rxdesc->size + iv_len + icv_len);
  169. /* Move ieee80211 header */
  170. memmove(skb->data + transfer,
  171. skb->data + transfer + iv_len + align,
  172. header_length);
  173. transfer += header_length;
  174. /* Copy IV/EIV data */
  175. memcpy(skb->data + transfer, rxdesc->iv, iv_len);
  176. transfer += iv_len;
  177. /* Move payload */
  178. if (align) {
  179. memmove(skb->data + transfer,
  180. skb->data + transfer + align,
  181. payload_len);
  182. }
  183. /*
  184. * NOTE: Always count the payload as transfered,
  185. * even when alignment was set to zero. This is required
  186. * for determining the correct offset for the ICV data.
  187. */
  188. transfer += payload_len;
  189. /*
  190. * Copy ICV data
  191. * AES appends 8 bytes, we can't fill the upper
  192. * 4 bytes, but mac80211 doesn't care about what
  193. * we provide here anyway and strips it immediately.
  194. */
  195. memcpy(skb->data + transfer, &rxdesc->icv, 4);
  196. transfer += icv_len;
  197. /* IV/EIV/ICV has been inserted into frame */
  198. rxdesc->size = transfer;
  199. rxdesc->flags &= ~RX_FLAG_IV_STRIPPED;
  200. }