wmi.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <linux/skbuff.h>
  18. #include "core.h"
  19. #include "htc.h"
  20. #include "debug.h"
  21. #include "wmi.h"
  22. #include "mac.h"
  23. void ath10k_wmi_flush_tx(struct ath10k *ar)
  24. {
  25. int ret;
  26. ret = wait_event_timeout(ar->wmi.wq,
  27. atomic_read(&ar->wmi.pending_tx_count) == 0,
  28. 5*HZ);
  29. if (atomic_read(&ar->wmi.pending_tx_count) == 0)
  30. return;
  31. if (ret == 0)
  32. ret = -ETIMEDOUT;
  33. if (ret < 0)
  34. ath10k_warn("wmi flush failed (%d)\n", ret);
  35. }
  36. int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
  37. {
  38. int ret;
  39. ret = wait_for_completion_timeout(&ar->wmi.service_ready,
  40. WMI_SERVICE_READY_TIMEOUT_HZ);
  41. return ret;
  42. }
  43. int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
  44. {
  45. int ret;
  46. ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
  47. WMI_UNIFIED_READY_TIMEOUT_HZ);
  48. return ret;
  49. }
  50. static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
  51. {
  52. struct sk_buff *skb;
  53. u32 round_len = roundup(len, 4);
  54. skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len);
  55. if (!skb)
  56. return NULL;
  57. skb_reserve(skb, WMI_SKB_HEADROOM);
  58. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  59. ath10k_warn("Unaligned WMI skb\n");
  60. skb_put(skb, round_len);
  61. memset(skb->data, 0, round_len);
  62. return skb;
  63. }
  64. static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
  65. {
  66. dev_kfree_skb(skb);
  67. if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
  68. wake_up(&ar->wmi.wq);
  69. }
  70. /* WMI command API */
  71. static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
  72. enum wmi_cmd_id cmd_id)
  73. {
  74. struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
  75. struct wmi_cmd_hdr *cmd_hdr;
  76. int status;
  77. u32 cmd = 0;
  78. if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  79. return -ENOMEM;
  80. cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
  81. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  82. cmd_hdr->cmd_id = __cpu_to_le32(cmd);
  83. if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
  84. WMI_MAX_PENDING_TX_COUNT) {
  85. /* avoid using up memory when FW hangs */
  86. atomic_dec(&ar->wmi.pending_tx_count);
  87. return -EBUSY;
  88. }
  89. memset(skb_cb, 0, sizeof(*skb_cb));
  90. trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
  91. status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb);
  92. if (status) {
  93. dev_kfree_skb_any(skb);
  94. atomic_dec(&ar->wmi.pending_tx_count);
  95. return status;
  96. }
  97. return 0;
  98. }
  99. static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
  100. {
  101. struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
  102. enum wmi_scan_event_type event_type;
  103. enum wmi_scan_completion_reason reason;
  104. u32 freq;
  105. u32 req_id;
  106. u32 scan_id;
  107. u32 vdev_id;
  108. event_type = __le32_to_cpu(event->event_type);
  109. reason = __le32_to_cpu(event->reason);
  110. freq = __le32_to_cpu(event->channel_freq);
  111. req_id = __le32_to_cpu(event->scan_req_id);
  112. scan_id = __le32_to_cpu(event->scan_id);
  113. vdev_id = __le32_to_cpu(event->vdev_id);
  114. ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n");
  115. ath10k_dbg(ATH10K_DBG_WMI,
  116. "scan event type %d reason %d freq %d req_id %d "
  117. "scan_id %d vdev_id %d\n",
  118. event_type, reason, freq, req_id, scan_id, vdev_id);
  119. spin_lock_bh(&ar->data_lock);
  120. switch (event_type) {
  121. case WMI_SCAN_EVENT_STARTED:
  122. ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n");
  123. if (ar->scan.in_progress && ar->scan.is_roc)
  124. ieee80211_ready_on_channel(ar->hw);
  125. complete(&ar->scan.started);
  126. break;
  127. case WMI_SCAN_EVENT_COMPLETED:
  128. ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n");
  129. switch (reason) {
  130. case WMI_SCAN_REASON_COMPLETED:
  131. ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n");
  132. break;
  133. case WMI_SCAN_REASON_CANCELLED:
  134. ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n");
  135. break;
  136. case WMI_SCAN_REASON_PREEMPTED:
  137. ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n");
  138. break;
  139. case WMI_SCAN_REASON_TIMEDOUT:
  140. ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n");
  141. break;
  142. default:
  143. break;
  144. }
  145. ar->scan_channel = NULL;
  146. if (!ar->scan.in_progress) {
  147. ath10k_warn("no scan requested, ignoring\n");
  148. break;
  149. }
  150. if (ar->scan.is_roc) {
  151. ath10k_offchan_tx_purge(ar);
  152. if (!ar->scan.aborting)
  153. ieee80211_remain_on_channel_expired(ar->hw);
  154. } else {
  155. ieee80211_scan_completed(ar->hw, ar->scan.aborting);
  156. }
  157. del_timer(&ar->scan.timeout);
  158. complete_all(&ar->scan.completed);
  159. ar->scan.in_progress = false;
  160. break;
  161. case WMI_SCAN_EVENT_BSS_CHANNEL:
  162. ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n");
  163. ar->scan_channel = NULL;
  164. break;
  165. case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
  166. ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n");
  167. ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
  168. if (ar->scan.in_progress && ar->scan.is_roc &&
  169. ar->scan.roc_freq == freq) {
  170. complete(&ar->scan.on_channel);
  171. }
  172. break;
  173. case WMI_SCAN_EVENT_DEQUEUED:
  174. ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n");
  175. break;
  176. case WMI_SCAN_EVENT_PREEMPTED:
  177. ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n");
  178. break;
  179. case WMI_SCAN_EVENT_START_FAILED:
  180. ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n");
  181. break;
  182. default:
  183. break;
  184. }
  185. spin_unlock_bh(&ar->data_lock);
  186. return 0;
  187. }
  188. static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
  189. {
  190. enum ieee80211_band band;
  191. switch (phy_mode) {
  192. case MODE_11A:
  193. case MODE_11NA_HT20:
  194. case MODE_11NA_HT40:
  195. case MODE_11AC_VHT20:
  196. case MODE_11AC_VHT40:
  197. case MODE_11AC_VHT80:
  198. band = IEEE80211_BAND_5GHZ;
  199. break;
  200. case MODE_11G:
  201. case MODE_11B:
  202. case MODE_11GONLY:
  203. case MODE_11NG_HT20:
  204. case MODE_11NG_HT40:
  205. case MODE_11AC_VHT20_2G:
  206. case MODE_11AC_VHT40_2G:
  207. case MODE_11AC_VHT80_2G:
  208. default:
  209. band = IEEE80211_BAND_2GHZ;
  210. }
  211. return band;
  212. }
  213. static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
  214. {
  215. u8 rate_idx = 0;
  216. /* rate in Kbps */
  217. switch (rate) {
  218. case 1000:
  219. rate_idx = 0;
  220. break;
  221. case 2000:
  222. rate_idx = 1;
  223. break;
  224. case 5500:
  225. rate_idx = 2;
  226. break;
  227. case 11000:
  228. rate_idx = 3;
  229. break;
  230. case 6000:
  231. rate_idx = 4;
  232. break;
  233. case 9000:
  234. rate_idx = 5;
  235. break;
  236. case 12000:
  237. rate_idx = 6;
  238. break;
  239. case 18000:
  240. rate_idx = 7;
  241. break;
  242. case 24000:
  243. rate_idx = 8;
  244. break;
  245. case 36000:
  246. rate_idx = 9;
  247. break;
  248. case 48000:
  249. rate_idx = 10;
  250. break;
  251. case 54000:
  252. rate_idx = 11;
  253. break;
  254. default:
  255. break;
  256. }
  257. if (band == IEEE80211_BAND_5GHZ) {
  258. if (rate_idx > 3)
  259. /* Omit CCK rates */
  260. rate_idx -= 4;
  261. else
  262. rate_idx = 0;
  263. }
  264. return rate_idx;
  265. }
  266. static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
  267. {
  268. struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data;
  269. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  270. struct ieee80211_hdr *hdr;
  271. u32 rx_status;
  272. u32 channel;
  273. u32 phy_mode;
  274. u32 snr;
  275. u32 rate;
  276. u32 buf_len;
  277. u16 fc;
  278. channel = __le32_to_cpu(event->hdr.channel);
  279. buf_len = __le32_to_cpu(event->hdr.buf_len);
  280. rx_status = __le32_to_cpu(event->hdr.status);
  281. snr = __le32_to_cpu(event->hdr.snr);
  282. phy_mode = __le32_to_cpu(event->hdr.phy_mode);
  283. rate = __le32_to_cpu(event->hdr.rate);
  284. memset(status, 0, sizeof(*status));
  285. ath10k_dbg(ATH10K_DBG_MGMT,
  286. "event mgmt rx status %08x\n", rx_status);
  287. if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
  288. dev_kfree_skb(skb);
  289. return 0;
  290. }
  291. if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
  292. dev_kfree_skb(skb);
  293. return 0;
  294. }
  295. if (rx_status & WMI_RX_STATUS_ERR_CRC)
  296. status->flag |= RX_FLAG_FAILED_FCS_CRC;
  297. if (rx_status & WMI_RX_STATUS_ERR_MIC)
  298. status->flag |= RX_FLAG_MMIC_ERROR;
  299. status->band = phy_mode_to_band(phy_mode);
  300. status->freq = ieee80211_channel_to_frequency(channel, status->band);
  301. status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
  302. status->rate_idx = get_rate_idx(rate, status->band);
  303. skb_pull(skb, sizeof(event->hdr));
  304. hdr = (struct ieee80211_hdr *)skb->data;
  305. fc = le16_to_cpu(hdr->frame_control);
  306. if (fc & IEEE80211_FCTL_PROTECTED) {
  307. status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
  308. RX_FLAG_MMIC_STRIPPED;
  309. hdr->frame_control = __cpu_to_le16(fc &
  310. ~IEEE80211_FCTL_PROTECTED);
  311. }
  312. ath10k_dbg(ATH10K_DBG_MGMT,
  313. "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
  314. skb, skb->len,
  315. fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
  316. ath10k_dbg(ATH10K_DBG_MGMT,
  317. "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
  318. status->freq, status->band, status->signal,
  319. status->rate_idx);
  320. /*
  321. * packets from HTC come aligned to 4byte boundaries
  322. * because they can originally come in along with a trailer
  323. */
  324. skb_trim(skb, buf_len);
  325. ieee80211_rx(ar->hw, skb);
  326. return 0;
  327. }
  328. static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
  329. {
  330. ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n");
  331. }
  332. static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
  333. {
  334. ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
  335. }
  336. static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
  337. {
  338. ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n");
  339. }
  340. static void ath10k_wmi_event_update_stats(struct ath10k *ar,
  341. struct sk_buff *skb)
  342. {
  343. struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
  344. ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
  345. ath10k_debug_read_target_stats(ar, ev);
  346. }
  347. static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
  348. struct sk_buff *skb)
  349. {
  350. struct wmi_vdev_start_response_event *ev;
  351. ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
  352. ev = (struct wmi_vdev_start_response_event *)skb->data;
  353. if (WARN_ON(__le32_to_cpu(ev->status)))
  354. return;
  355. complete(&ar->vdev_setup_done);
  356. }
  357. static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
  358. struct sk_buff *skb)
  359. {
  360. ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
  361. complete(&ar->vdev_setup_done);
  362. }
  363. static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
  364. struct sk_buff *skb)
  365. {
  366. ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n");
  367. }
  368. /*
  369. * FIXME
  370. *
  371. * We don't report to mac80211 sleep state of connected
  372. * stations. Due to this mac80211 can't fill in TIM IE
  373. * correctly.
  374. *
  375. * I know of no way of getting nullfunc frames that contain
  376. * sleep transition from connected stations - these do not
  377. * seem to be sent from the target to the host. There also
  378. * doesn't seem to be a dedicated event for that. So the
  379. * only way left to do this would be to read tim_bitmap
  380. * during SWBA.
  381. *
  382. * We could probably try using tim_bitmap from SWBA to tell
  383. * mac80211 which stations are asleep and which are not. The
  384. * problem here is calling mac80211 functions so many times
  385. * could take too long and make us miss the time to submit
  386. * the beacon to the target.
  387. *
  388. * So as a workaround we try to extend the TIM IE if there
  389. * is unicast buffered for stations with aid > 7 and fill it
  390. * in ourselves.
  391. */
  392. static void ath10k_wmi_update_tim(struct ath10k *ar,
  393. struct ath10k_vif *arvif,
  394. struct sk_buff *bcn,
  395. struct wmi_bcn_info *bcn_info)
  396. {
  397. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
  398. struct ieee80211_tim_ie *tim;
  399. u8 *ies, *ie;
  400. u8 ie_len, pvm_len;
  401. /* if next SWBA has no tim_changed the tim_bitmap is garbage.
  402. * we must copy the bitmap upon change and reuse it later */
  403. if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
  404. int i;
  405. BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
  406. sizeof(bcn_info->tim_info.tim_bitmap));
  407. for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
  408. __le32 t = bcn_info->tim_info.tim_bitmap[i / 4];
  409. u32 v = __le32_to_cpu(t);
  410. arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
  411. }
  412. /* FW reports either length 0 or 16
  413. * so we calculate this on our own */
  414. arvif->u.ap.tim_len = 0;
  415. for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
  416. if (arvif->u.ap.tim_bitmap[i])
  417. arvif->u.ap.tim_len = i;
  418. arvif->u.ap.tim_len++;
  419. }
  420. ies = bcn->data;
  421. ies += ieee80211_hdrlen(hdr->frame_control);
  422. ies += 12; /* fixed parameters */
  423. ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
  424. (u8 *)skb_tail_pointer(bcn) - ies);
  425. if (!ie) {
  426. /* highly unlikely for mac80211 */
  427. ath10k_warn("no tim ie found;\n");
  428. return;
  429. }
  430. tim = (void *)ie + 2;
  431. ie_len = ie[1];
  432. pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
  433. if (pvm_len < arvif->u.ap.tim_len) {
  434. int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
  435. int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
  436. void *next_ie = ie + 2 + ie_len;
  437. if (skb_put(bcn, expand_size)) {
  438. memmove(next_ie + expand_size, next_ie, move_size);
  439. ie[1] += expand_size;
  440. ie_len += expand_size;
  441. pvm_len += expand_size;
  442. } else {
  443. ath10k_warn("tim expansion failed\n");
  444. }
  445. }
  446. if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
  447. ath10k_warn("tim pvm length is too great (%d)\n", pvm_len);
  448. return;
  449. }
  450. tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
  451. memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
  452. ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
  453. tim->dtim_count, tim->dtim_period,
  454. tim->bitmap_ctrl, pvm_len);
  455. }
  456. static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
  457. struct wmi_p2p_noa_info *noa)
  458. {
  459. struct ieee80211_p2p_noa_attr *noa_attr;
  460. u8 ctwindow_oppps = noa->ctwindow_oppps;
  461. u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
  462. bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
  463. __le16 *noa_attr_len;
  464. u16 attr_len;
  465. u8 noa_descriptors = noa->num_descriptors;
  466. int i;
  467. /* P2P IE */
  468. data[0] = WLAN_EID_VENDOR_SPECIFIC;
  469. data[1] = len - 2;
  470. data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
  471. data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
  472. data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
  473. data[5] = WLAN_OUI_TYPE_WFA_P2P;
  474. /* NOA ATTR */
  475. data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
  476. noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
  477. noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
  478. noa_attr->index = noa->index;
  479. noa_attr->oppps_ctwindow = ctwindow;
  480. if (oppps)
  481. noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
  482. for (i = 0; i < noa_descriptors; i++) {
  483. noa_attr->desc[i].count =
  484. __le32_to_cpu(noa->descriptors[i].type_count);
  485. noa_attr->desc[i].duration = noa->descriptors[i].duration;
  486. noa_attr->desc[i].interval = noa->descriptors[i].interval;
  487. noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
  488. }
  489. attr_len = 2; /* index + oppps_ctwindow */
  490. attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
  491. *noa_attr_len = __cpu_to_le16(attr_len);
  492. }
  493. static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
  494. {
  495. u32 len = 0;
  496. u8 noa_descriptors = noa->num_descriptors;
  497. u8 opp_ps_info = noa->ctwindow_oppps;
  498. bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
  499. if (!noa_descriptors && !opps_enabled)
  500. return len;
  501. len += 1 + 1 + 4; /* EID + len + OUI */
  502. len += 1 + 2; /* noa attr + attr len */
  503. len += 1 + 1; /* index + oppps_ctwindow */
  504. len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
  505. return len;
  506. }
  507. static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
  508. struct sk_buff *bcn,
  509. struct wmi_bcn_info *bcn_info)
  510. {
  511. struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
  512. u8 *new_data, *old_data = arvif->u.ap.noa_data;
  513. u32 new_len;
  514. if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
  515. return;
  516. ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
  517. if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
  518. new_len = ath10k_p2p_calc_noa_ie_len(noa);
  519. if (!new_len)
  520. goto cleanup;
  521. new_data = kmalloc(new_len, GFP_ATOMIC);
  522. if (!new_data)
  523. goto cleanup;
  524. ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
  525. spin_lock_bh(&ar->data_lock);
  526. arvif->u.ap.noa_data = new_data;
  527. arvif->u.ap.noa_len = new_len;
  528. spin_unlock_bh(&ar->data_lock);
  529. kfree(old_data);
  530. }
  531. if (arvif->u.ap.noa_data)
  532. if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
  533. memcpy(skb_put(bcn, arvif->u.ap.noa_len),
  534. arvif->u.ap.noa_data,
  535. arvif->u.ap.noa_len);
  536. return;
  537. cleanup:
  538. spin_lock_bh(&ar->data_lock);
  539. arvif->u.ap.noa_data = NULL;
  540. arvif->u.ap.noa_len = 0;
  541. spin_unlock_bh(&ar->data_lock);
  542. kfree(old_data);
  543. }
  544. static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
  545. {
  546. struct wmi_host_swba_event *ev;
  547. u32 map;
  548. int i = -1;
  549. struct wmi_bcn_info *bcn_info;
  550. struct ath10k_vif *arvif;
  551. struct wmi_bcn_tx_arg arg;
  552. struct sk_buff *bcn;
  553. int vdev_id = 0;
  554. int ret;
  555. ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
  556. ev = (struct wmi_host_swba_event *)skb->data;
  557. map = __le32_to_cpu(ev->vdev_map);
  558. ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
  559. "-vdev map 0x%x\n",
  560. ev->vdev_map);
  561. for (; map; map >>= 1, vdev_id++) {
  562. if (!(map & 0x1))
  563. continue;
  564. i++;
  565. if (i >= WMI_MAX_AP_VDEV) {
  566. ath10k_warn("swba has corrupted vdev map\n");
  567. break;
  568. }
  569. bcn_info = &ev->bcn_info[i];
  570. ath10k_dbg(ATH10K_DBG_MGMT,
  571. "-bcn_info[%d]:\n"
  572. "--tim_len %d\n"
  573. "--tim_mcast %d\n"
  574. "--tim_changed %d\n"
  575. "--tim_num_ps_pending %d\n"
  576. "--tim_bitmap 0x%08x%08x%08x%08x\n",
  577. i,
  578. __le32_to_cpu(bcn_info->tim_info.tim_len),
  579. __le32_to_cpu(bcn_info->tim_info.tim_mcast),
  580. __le32_to_cpu(bcn_info->tim_info.tim_changed),
  581. __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
  582. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
  583. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
  584. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
  585. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
  586. arvif = ath10k_get_arvif(ar, vdev_id);
  587. if (arvif == NULL) {
  588. ath10k_warn("no vif for vdev_id %d found\n", vdev_id);
  589. continue;
  590. }
  591. bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
  592. if (!bcn) {
  593. ath10k_warn("could not get mac80211 beacon\n");
  594. continue;
  595. }
  596. ath10k_tx_h_seq_no(bcn);
  597. ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
  598. ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
  599. arg.vdev_id = arvif->vdev_id;
  600. arg.tx_rate = 0;
  601. arg.tx_power = 0;
  602. arg.bcn = bcn->data;
  603. arg.bcn_len = bcn->len;
  604. ret = ath10k_wmi_beacon_send(ar, &arg);
  605. if (ret)
  606. ath10k_warn("could not send beacon (%d)\n", ret);
  607. dev_kfree_skb_any(bcn);
  608. }
  609. }
  610. static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
  611. struct sk_buff *skb)
  612. {
  613. ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
  614. }
  615. static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
  616. {
  617. ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n");
  618. }
  619. static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
  620. {
  621. ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
  622. }
  623. static void ath10k_wmi_event_profile_match(struct ath10k *ar,
  624. struct sk_buff *skb)
  625. {
  626. ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
  627. }
  628. static void ath10k_wmi_event_debug_print(struct ath10k *ar,
  629. struct sk_buff *skb)
  630. {
  631. ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n");
  632. }
  633. static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
  634. {
  635. ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
  636. }
  637. static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
  638. struct sk_buff *skb)
  639. {
  640. ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
  641. }
  642. static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
  643. struct sk_buff *skb)
  644. {
  645. ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
  646. }
  647. static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
  648. struct sk_buff *skb)
  649. {
  650. ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
  651. }
  652. static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
  653. struct sk_buff *skb)
  654. {
  655. ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
  656. }
  657. static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
  658. struct sk_buff *skb)
  659. {
  660. ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
  661. }
  662. static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
  663. struct sk_buff *skb)
  664. {
  665. ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
  666. }
  667. static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
  668. struct sk_buff *skb)
  669. {
  670. ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
  671. }
  672. static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
  673. struct sk_buff *skb)
  674. {
  675. ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
  676. }
  677. static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
  678. struct sk_buff *skb)
  679. {
  680. ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
  681. }
  682. static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
  683. struct sk_buff *skb)
  684. {
  685. ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
  686. }
  687. static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
  688. struct sk_buff *skb)
  689. {
  690. ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
  691. }
  692. static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
  693. struct sk_buff *skb)
  694. {
  695. ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
  696. }
  697. static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
  698. struct sk_buff *skb)
  699. {
  700. ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
  701. }
  702. static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
  703. struct sk_buff *skb)
  704. {
  705. struct wmi_service_ready_event *ev = (void *)skb->data;
  706. if (skb->len < sizeof(*ev)) {
  707. ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
  708. skb->len, sizeof(*ev));
  709. return;
  710. }
  711. ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
  712. ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
  713. ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
  714. ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
  715. ar->fw_version_major =
  716. (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
  717. ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
  718. ar->fw_version_release =
  719. (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
  720. ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
  721. ar->phy_capability = __le32_to_cpu(ev->phy_capability);
  722. ar->ath_common.regulatory.current_rd =
  723. __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
  724. ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
  725. sizeof(ev->wmi_service_bitmap));
  726. if (strlen(ar->hw->wiphy->fw_version) == 0) {
  727. snprintf(ar->hw->wiphy->fw_version,
  728. sizeof(ar->hw->wiphy->fw_version),
  729. "%u.%u.%u.%u",
  730. ar->fw_version_major,
  731. ar->fw_version_minor,
  732. ar->fw_version_release,
  733. ar->fw_version_build);
  734. }
  735. /* FIXME: it probably should be better to support this */
  736. if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
  737. ath10k_warn("target requested %d memory chunks; ignoring\n",
  738. __le32_to_cpu(ev->num_mem_reqs));
  739. }
  740. ath10k_dbg(ATH10K_DBG_WMI,
  741. "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n",
  742. __le32_to_cpu(ev->sw_version),
  743. __le32_to_cpu(ev->sw_version_1),
  744. __le32_to_cpu(ev->abi_version),
  745. __le32_to_cpu(ev->phy_capability),
  746. __le32_to_cpu(ev->ht_cap_info),
  747. __le32_to_cpu(ev->vht_cap_info),
  748. __le32_to_cpu(ev->vht_supp_mcs),
  749. __le32_to_cpu(ev->sys_cap_info),
  750. __le32_to_cpu(ev->num_mem_reqs));
  751. complete(&ar->wmi.service_ready);
  752. }
  753. static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
  754. {
  755. struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
  756. if (WARN_ON(skb->len < sizeof(*ev)))
  757. return -EINVAL;
  758. memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
  759. ath10k_dbg(ATH10K_DBG_WMI,
  760. "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
  761. __le32_to_cpu(ev->sw_version),
  762. __le32_to_cpu(ev->abi_version),
  763. ev->mac_addr.addr,
  764. __le32_to_cpu(ev->status));
  765. complete(&ar->wmi.unified_ready);
  766. return 0;
  767. }
  768. static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
  769. {
  770. struct wmi_cmd_hdr *cmd_hdr;
  771. enum wmi_event_id id;
  772. u16 len;
  773. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  774. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  775. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  776. return;
  777. len = skb->len;
  778. trace_ath10k_wmi_event(id, skb->data, skb->len);
  779. switch (id) {
  780. case WMI_MGMT_RX_EVENTID:
  781. ath10k_wmi_event_mgmt_rx(ar, skb);
  782. /* mgmt_rx() owns the skb now! */
  783. return;
  784. case WMI_SCAN_EVENTID:
  785. ath10k_wmi_event_scan(ar, skb);
  786. break;
  787. case WMI_CHAN_INFO_EVENTID:
  788. ath10k_wmi_event_chan_info(ar, skb);
  789. break;
  790. case WMI_ECHO_EVENTID:
  791. ath10k_wmi_event_echo(ar, skb);
  792. break;
  793. case WMI_DEBUG_MESG_EVENTID:
  794. ath10k_wmi_event_debug_mesg(ar, skb);
  795. break;
  796. case WMI_UPDATE_STATS_EVENTID:
  797. ath10k_wmi_event_update_stats(ar, skb);
  798. break;
  799. case WMI_VDEV_START_RESP_EVENTID:
  800. ath10k_wmi_event_vdev_start_resp(ar, skb);
  801. break;
  802. case WMI_VDEV_STOPPED_EVENTID:
  803. ath10k_wmi_event_vdev_stopped(ar, skb);
  804. break;
  805. case WMI_PEER_STA_KICKOUT_EVENTID:
  806. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  807. break;
  808. case WMI_HOST_SWBA_EVENTID:
  809. ath10k_wmi_event_host_swba(ar, skb);
  810. break;
  811. case WMI_TBTTOFFSET_UPDATE_EVENTID:
  812. ath10k_wmi_event_tbttoffset_update(ar, skb);
  813. break;
  814. case WMI_PHYERR_EVENTID:
  815. ath10k_wmi_event_phyerr(ar, skb);
  816. break;
  817. case WMI_ROAM_EVENTID:
  818. ath10k_wmi_event_roam(ar, skb);
  819. break;
  820. case WMI_PROFILE_MATCH:
  821. ath10k_wmi_event_profile_match(ar, skb);
  822. break;
  823. case WMI_DEBUG_PRINT_EVENTID:
  824. ath10k_wmi_event_debug_print(ar, skb);
  825. break;
  826. case WMI_PDEV_QVIT_EVENTID:
  827. ath10k_wmi_event_pdev_qvit(ar, skb);
  828. break;
  829. case WMI_WLAN_PROFILE_DATA_EVENTID:
  830. ath10k_wmi_event_wlan_profile_data(ar, skb);
  831. break;
  832. case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
  833. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  834. break;
  835. case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
  836. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  837. break;
  838. case WMI_RTT_ERROR_REPORT_EVENTID:
  839. ath10k_wmi_event_rtt_error_report(ar, skb);
  840. break;
  841. case WMI_WOW_WAKEUP_HOST_EVENTID:
  842. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  843. break;
  844. case WMI_DCS_INTERFERENCE_EVENTID:
  845. ath10k_wmi_event_dcs_interference(ar, skb);
  846. break;
  847. case WMI_PDEV_TPC_CONFIG_EVENTID:
  848. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  849. break;
  850. case WMI_PDEV_FTM_INTG_EVENTID:
  851. ath10k_wmi_event_pdev_ftm_intg(ar, skb);
  852. break;
  853. case WMI_GTK_OFFLOAD_STATUS_EVENTID:
  854. ath10k_wmi_event_gtk_offload_status(ar, skb);
  855. break;
  856. case WMI_GTK_REKEY_FAIL_EVENTID:
  857. ath10k_wmi_event_gtk_rekey_fail(ar, skb);
  858. break;
  859. case WMI_TX_DELBA_COMPLETE_EVENTID:
  860. ath10k_wmi_event_delba_complete(ar, skb);
  861. break;
  862. case WMI_TX_ADDBA_COMPLETE_EVENTID:
  863. ath10k_wmi_event_addba_complete(ar, skb);
  864. break;
  865. case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
  866. ath10k_wmi_event_vdev_install_key_complete(ar, skb);
  867. break;
  868. case WMI_SERVICE_READY_EVENTID:
  869. ath10k_wmi_service_ready_event_rx(ar, skb);
  870. break;
  871. case WMI_READY_EVENTID:
  872. ath10k_wmi_ready_event_rx(ar, skb);
  873. break;
  874. default:
  875. ath10k_warn("Unknown eventid: %d\n", id);
  876. break;
  877. }
  878. dev_kfree_skb(skb);
  879. }
  880. static void ath10k_wmi_event_work(struct work_struct *work)
  881. {
  882. struct ath10k *ar = container_of(work, struct ath10k,
  883. wmi.wmi_event_work);
  884. struct sk_buff *skb;
  885. for (;;) {
  886. skb = skb_dequeue(&ar->wmi.wmi_event_list);
  887. if (!skb)
  888. break;
  889. ath10k_wmi_event_process(ar, skb);
  890. }
  891. }
  892. static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
  893. {
  894. struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  895. enum wmi_event_id event_id;
  896. event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  897. /* some events require to be handled ASAP
  898. * thus can't be defered to a worker thread */
  899. switch (event_id) {
  900. case WMI_HOST_SWBA_EVENTID:
  901. case WMI_MGMT_RX_EVENTID:
  902. ath10k_wmi_event_process(ar, skb);
  903. return;
  904. default:
  905. break;
  906. }
  907. skb_queue_tail(&ar->wmi.wmi_event_list, skb);
  908. queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
  909. }
  910. /* WMI Initialization functions */
  911. int ath10k_wmi_attach(struct ath10k *ar)
  912. {
  913. init_completion(&ar->wmi.service_ready);
  914. init_completion(&ar->wmi.unified_ready);
  915. init_waitqueue_head(&ar->wmi.wq);
  916. skb_queue_head_init(&ar->wmi.wmi_event_list);
  917. INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
  918. return 0;
  919. }
  920. void ath10k_wmi_detach(struct ath10k *ar)
  921. {
  922. /* HTC should've drained the packets already */
  923. if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
  924. ath10k_warn("there are still pending packets\n");
  925. cancel_work_sync(&ar->wmi.wmi_event_work);
  926. skb_queue_purge(&ar->wmi.wmi_event_list);
  927. }
  928. int ath10k_wmi_connect_htc_service(struct ath10k *ar)
  929. {
  930. int status;
  931. struct ath10k_htc_svc_conn_req conn_req;
  932. struct ath10k_htc_svc_conn_resp conn_resp;
  933. memset(&conn_req, 0, sizeof(conn_req));
  934. memset(&conn_resp, 0, sizeof(conn_resp));
  935. /* these fields are the same for all service endpoints */
  936. conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
  937. conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
  938. /* connect to control service */
  939. conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
  940. status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp);
  941. if (status) {
  942. ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
  943. status);
  944. return status;
  945. }
  946. ar->wmi.eid = conn_resp.eid;
  947. return 0;
  948. }
  949. int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
  950. u16 rd5g, u16 ctl2g, u16 ctl5g)
  951. {
  952. struct wmi_pdev_set_regdomain_cmd *cmd;
  953. struct sk_buff *skb;
  954. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  955. if (!skb)
  956. return -ENOMEM;
  957. cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
  958. cmd->reg_domain = __cpu_to_le32(rd);
  959. cmd->reg_domain_2G = __cpu_to_le32(rd2g);
  960. cmd->reg_domain_5G = __cpu_to_le32(rd5g);
  961. cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
  962. cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
  963. ath10k_dbg(ATH10K_DBG_WMI,
  964. "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
  965. rd, rd2g, rd5g, ctl2g, ctl5g);
  966. return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
  967. }
  968. int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
  969. const struct wmi_channel_arg *arg)
  970. {
  971. struct wmi_set_channel_cmd *cmd;
  972. struct sk_buff *skb;
  973. if (arg->passive)
  974. return -EINVAL;
  975. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  976. if (!skb)
  977. return -ENOMEM;
  978. cmd = (struct wmi_set_channel_cmd *)skb->data;
  979. cmd->chan.mhz = __cpu_to_le32(arg->freq);
  980. cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
  981. cmd->chan.mode = arg->mode;
  982. cmd->chan.min_power = arg->min_power;
  983. cmd->chan.max_power = arg->max_power;
  984. cmd->chan.reg_power = arg->max_reg_power;
  985. cmd->chan.reg_classid = arg->reg_class_id;
  986. cmd->chan.antenna_max = arg->max_antenna_gain;
  987. ath10k_dbg(ATH10K_DBG_WMI,
  988. "wmi set channel mode %d freq %d\n",
  989. arg->mode, arg->freq);
  990. return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID);
  991. }
  992. int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
  993. {
  994. struct wmi_pdev_suspend_cmd *cmd;
  995. struct sk_buff *skb;
  996. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  997. if (!skb)
  998. return -ENOMEM;
  999. cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
  1000. cmd->suspend_opt = WMI_PDEV_SUSPEND;
  1001. return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID);
  1002. }
  1003. int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
  1004. {
  1005. struct sk_buff *skb;
  1006. skb = ath10k_wmi_alloc_skb(0);
  1007. if (skb == NULL)
  1008. return -ENOMEM;
  1009. return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID);
  1010. }
  1011. int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
  1012. u32 value)
  1013. {
  1014. struct wmi_pdev_set_param_cmd *cmd;
  1015. struct sk_buff *skb;
  1016. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1017. if (!skb)
  1018. return -ENOMEM;
  1019. cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
  1020. cmd->param_id = __cpu_to_le32(id);
  1021. cmd->param_value = __cpu_to_le32(value);
  1022. ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
  1023. id, value);
  1024. return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID);
  1025. }
  1026. int ath10k_wmi_cmd_init(struct ath10k *ar)
  1027. {
  1028. struct wmi_init_cmd *cmd;
  1029. struct sk_buff *buf;
  1030. struct wmi_resource_config config = {};
  1031. u32 val;
  1032. config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
  1033. config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
  1034. config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
  1035. config.num_offload_reorder_bufs =
  1036. __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
  1037. config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
  1038. config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
  1039. config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
  1040. config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
  1041. config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
  1042. config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  1043. config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  1044. config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  1045. config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
  1046. config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
  1047. config.scan_max_pending_reqs =
  1048. __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
  1049. config.bmiss_offload_max_vdev =
  1050. __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
  1051. config.roam_offload_max_vdev =
  1052. __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
  1053. config.roam_offload_max_ap_profiles =
  1054. __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
  1055. config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
  1056. config.num_mcast_table_elems =
  1057. __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
  1058. config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
  1059. config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
  1060. config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
  1061. config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
  1062. config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
  1063. val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  1064. config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  1065. config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
  1066. config.gtk_offload_max_vdev =
  1067. __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
  1068. config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
  1069. config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
  1070. buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1071. if (!buf)
  1072. return -ENOMEM;
  1073. cmd = (struct wmi_init_cmd *)buf->data;
  1074. cmd->num_host_mem_chunks = 0;
  1075. memcpy(&cmd->resource_config, &config, sizeof(config));
  1076. ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
  1077. return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID);
  1078. }
  1079. static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
  1080. {
  1081. int len;
  1082. len = sizeof(struct wmi_start_scan_cmd);
  1083. if (arg->ie_len) {
  1084. if (!arg->ie)
  1085. return -EINVAL;
  1086. if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
  1087. return -EINVAL;
  1088. len += sizeof(struct wmi_ie_data);
  1089. len += roundup(arg->ie_len, 4);
  1090. }
  1091. if (arg->n_channels) {
  1092. if (!arg->channels)
  1093. return -EINVAL;
  1094. if (arg->n_channels > ARRAY_SIZE(arg->channels))
  1095. return -EINVAL;
  1096. len += sizeof(struct wmi_chan_list);
  1097. len += sizeof(__le32) * arg->n_channels;
  1098. }
  1099. if (arg->n_ssids) {
  1100. if (!arg->ssids)
  1101. return -EINVAL;
  1102. if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
  1103. return -EINVAL;
  1104. len += sizeof(struct wmi_ssid_list);
  1105. len += sizeof(struct wmi_ssid) * arg->n_ssids;
  1106. }
  1107. if (arg->n_bssids) {
  1108. if (!arg->bssids)
  1109. return -EINVAL;
  1110. if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
  1111. return -EINVAL;
  1112. len += sizeof(struct wmi_bssid_list);
  1113. len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
  1114. }
  1115. return len;
  1116. }
  1117. int ath10k_wmi_start_scan(struct ath10k *ar,
  1118. const struct wmi_start_scan_arg *arg)
  1119. {
  1120. struct wmi_start_scan_cmd *cmd;
  1121. struct sk_buff *skb;
  1122. struct wmi_ie_data *ie;
  1123. struct wmi_chan_list *channels;
  1124. struct wmi_ssid_list *ssids;
  1125. struct wmi_bssid_list *bssids;
  1126. u32 scan_id;
  1127. u32 scan_req_id;
  1128. int off;
  1129. int len = 0;
  1130. int i;
  1131. len = ath10k_wmi_start_scan_calc_len(arg);
  1132. if (len < 0)
  1133. return len; /* len contains error code here */
  1134. skb = ath10k_wmi_alloc_skb(len);
  1135. if (!skb)
  1136. return -ENOMEM;
  1137. scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
  1138. scan_id |= arg->scan_id;
  1139. scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
  1140. scan_req_id |= arg->scan_req_id;
  1141. cmd = (struct wmi_start_scan_cmd *)skb->data;
  1142. cmd->scan_id = __cpu_to_le32(scan_id);
  1143. cmd->scan_req_id = __cpu_to_le32(scan_req_id);
  1144. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  1145. cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
  1146. cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
  1147. cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
  1148. cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
  1149. cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time);
  1150. cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time);
  1151. cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
  1152. cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
  1153. cmd->idle_time = __cpu_to_le32(arg->idle_time);
  1154. cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time);
  1155. cmd->probe_delay = __cpu_to_le32(arg->probe_delay);
  1156. cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
  1157. /* TLV list starts after fields included in the struct */
  1158. off = sizeof(*cmd);
  1159. if (arg->n_channels) {
  1160. channels = (void *)skb->data + off;
  1161. channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
  1162. channels->num_chan = __cpu_to_le32(arg->n_channels);
  1163. for (i = 0; i < arg->n_channels; i++)
  1164. channels->channel_list[i] =
  1165. __cpu_to_le32(arg->channels[i]);
  1166. off += sizeof(*channels);
  1167. off += sizeof(__le32) * arg->n_channels;
  1168. }
  1169. if (arg->n_ssids) {
  1170. ssids = (void *)skb->data + off;
  1171. ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
  1172. ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
  1173. for (i = 0; i < arg->n_ssids; i++) {
  1174. ssids->ssids[i].ssid_len =
  1175. __cpu_to_le32(arg->ssids[i].len);
  1176. memcpy(&ssids->ssids[i].ssid,
  1177. arg->ssids[i].ssid,
  1178. arg->ssids[i].len);
  1179. }
  1180. off += sizeof(*ssids);
  1181. off += sizeof(struct wmi_ssid) * arg->n_ssids;
  1182. }
  1183. if (arg->n_bssids) {
  1184. bssids = (void *)skb->data + off;
  1185. bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
  1186. bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
  1187. for (i = 0; i < arg->n_bssids; i++)
  1188. memcpy(&bssids->bssid_list[i],
  1189. arg->bssids[i].bssid,
  1190. ETH_ALEN);
  1191. off += sizeof(*bssids);
  1192. off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
  1193. }
  1194. if (arg->ie_len) {
  1195. ie = (void *)skb->data + off;
  1196. ie->tag = __cpu_to_le32(WMI_IE_TAG);
  1197. ie->ie_len = __cpu_to_le32(arg->ie_len);
  1198. memcpy(ie->ie_data, arg->ie, arg->ie_len);
  1199. off += sizeof(*ie);
  1200. off += roundup(arg->ie_len, 4);
  1201. }
  1202. if (off != skb->len) {
  1203. dev_kfree_skb(skb);
  1204. return -EINVAL;
  1205. }
  1206. ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
  1207. return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID);
  1208. }
  1209. void ath10k_wmi_start_scan_init(struct ath10k *ar,
  1210. struct wmi_start_scan_arg *arg)
  1211. {
  1212. /* setup commonly used values */
  1213. arg->scan_req_id = 1;
  1214. arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
  1215. arg->dwell_time_active = 50;
  1216. arg->dwell_time_passive = 150;
  1217. arg->min_rest_time = 50;
  1218. arg->max_rest_time = 500;
  1219. arg->repeat_probe_time = 0;
  1220. arg->probe_spacing_time = 0;
  1221. arg->idle_time = 0;
  1222. arg->max_scan_time = 5000;
  1223. arg->probe_delay = 5;
  1224. arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
  1225. | WMI_SCAN_EVENT_COMPLETED
  1226. | WMI_SCAN_EVENT_BSS_CHANNEL
  1227. | WMI_SCAN_EVENT_FOREIGN_CHANNEL
  1228. | WMI_SCAN_EVENT_DEQUEUED;
  1229. arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
  1230. arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
  1231. arg->n_bssids = 1;
  1232. arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
  1233. }
  1234. int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
  1235. {
  1236. struct wmi_stop_scan_cmd *cmd;
  1237. struct sk_buff *skb;
  1238. u32 scan_id;
  1239. u32 req_id;
  1240. if (arg->req_id > 0xFFF)
  1241. return -EINVAL;
  1242. if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
  1243. return -EINVAL;
  1244. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1245. if (!skb)
  1246. return -ENOMEM;
  1247. scan_id = arg->u.scan_id;
  1248. scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
  1249. req_id = arg->req_id;
  1250. req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
  1251. cmd = (struct wmi_stop_scan_cmd *)skb->data;
  1252. cmd->req_type = __cpu_to_le32(arg->req_type);
  1253. cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
  1254. cmd->scan_id = __cpu_to_le32(scan_id);
  1255. cmd->scan_req_id = __cpu_to_le32(req_id);
  1256. ath10k_dbg(ATH10K_DBG_WMI,
  1257. "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
  1258. arg->req_id, arg->req_type, arg->u.scan_id);
  1259. return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID);
  1260. }
  1261. int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
  1262. enum wmi_vdev_type type,
  1263. enum wmi_vdev_subtype subtype,
  1264. const u8 macaddr[ETH_ALEN])
  1265. {
  1266. struct wmi_vdev_create_cmd *cmd;
  1267. struct sk_buff *skb;
  1268. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1269. if (!skb)
  1270. return -ENOMEM;
  1271. cmd = (struct wmi_vdev_create_cmd *)skb->data;
  1272. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1273. cmd->vdev_type = __cpu_to_le32(type);
  1274. cmd->vdev_subtype = __cpu_to_le32(subtype);
  1275. memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN);
  1276. ath10k_dbg(ATH10K_DBG_WMI,
  1277. "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
  1278. vdev_id, type, subtype, macaddr);
  1279. return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID);
  1280. }
  1281. int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
  1282. {
  1283. struct wmi_vdev_delete_cmd *cmd;
  1284. struct sk_buff *skb;
  1285. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1286. if (!skb)
  1287. return -ENOMEM;
  1288. cmd = (struct wmi_vdev_delete_cmd *)skb->data;
  1289. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1290. ath10k_dbg(ATH10K_DBG_WMI,
  1291. "WMI vdev delete id %d\n", vdev_id);
  1292. return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID);
  1293. }
  1294. static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
  1295. const struct wmi_vdev_start_request_arg *arg,
  1296. enum wmi_cmd_id cmd_id)
  1297. {
  1298. struct wmi_vdev_start_request_cmd *cmd;
  1299. struct sk_buff *skb;
  1300. const char *cmdname;
  1301. u32 flags = 0;
  1302. if (cmd_id != WMI_VDEV_START_REQUEST_CMDID &&
  1303. cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID)
  1304. return -EINVAL;
  1305. if (WARN_ON(arg->ssid && arg->ssid_len == 0))
  1306. return -EINVAL;
  1307. if (WARN_ON(arg->hidden_ssid && !arg->ssid))
  1308. return -EINVAL;
  1309. if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
  1310. return -EINVAL;
  1311. if (cmd_id == WMI_VDEV_START_REQUEST_CMDID)
  1312. cmdname = "start";
  1313. else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID)
  1314. cmdname = "restart";
  1315. else
  1316. return -EINVAL; /* should not happen, we already check cmd_id */
  1317. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1318. if (!skb)
  1319. return -ENOMEM;
  1320. if (arg->hidden_ssid)
  1321. flags |= WMI_VDEV_START_HIDDEN_SSID;
  1322. if (arg->pmf_enabled)
  1323. flags |= WMI_VDEV_START_PMF_ENABLED;
  1324. cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
  1325. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  1326. cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
  1327. cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
  1328. cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
  1329. cmd->flags = __cpu_to_le32(flags);
  1330. cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
  1331. cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
  1332. if (arg->ssid) {
  1333. cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
  1334. memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
  1335. }
  1336. cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
  1337. cmd->chan.band_center_freq1 =
  1338. __cpu_to_le32(arg->channel.band_center_freq1);
  1339. cmd->chan.mode = arg->channel.mode;
  1340. cmd->chan.min_power = arg->channel.min_power;
  1341. cmd->chan.max_power = arg->channel.max_power;
  1342. cmd->chan.reg_power = arg->channel.max_reg_power;
  1343. cmd->chan.reg_classid = arg->channel.reg_class_id;
  1344. cmd->chan.antenna_max = arg->channel.max_antenna_gain;
  1345. ath10k_dbg(ATH10K_DBG_WMI,
  1346. "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X,"
  1347. "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq,
  1348. arg->channel.mode, flags, arg->channel.max_power);
  1349. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1350. }
  1351. int ath10k_wmi_vdev_start(struct ath10k *ar,
  1352. const struct wmi_vdev_start_request_arg *arg)
  1353. {
  1354. return ath10k_wmi_vdev_start_restart(ar, arg,
  1355. WMI_VDEV_START_REQUEST_CMDID);
  1356. }
  1357. int ath10k_wmi_vdev_restart(struct ath10k *ar,
  1358. const struct wmi_vdev_start_request_arg *arg)
  1359. {
  1360. return ath10k_wmi_vdev_start_restart(ar, arg,
  1361. WMI_VDEV_RESTART_REQUEST_CMDID);
  1362. }
  1363. int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
  1364. {
  1365. struct wmi_vdev_stop_cmd *cmd;
  1366. struct sk_buff *skb;
  1367. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1368. if (!skb)
  1369. return -ENOMEM;
  1370. cmd = (struct wmi_vdev_stop_cmd *)skb->data;
  1371. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1372. ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
  1373. return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID);
  1374. }
  1375. int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
  1376. {
  1377. struct wmi_vdev_up_cmd *cmd;
  1378. struct sk_buff *skb;
  1379. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1380. if (!skb)
  1381. return -ENOMEM;
  1382. cmd = (struct wmi_vdev_up_cmd *)skb->data;
  1383. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1384. cmd->vdev_assoc_id = __cpu_to_le32(aid);
  1385. memcpy(&cmd->vdev_bssid.addr, bssid, 6);
  1386. ath10k_dbg(ATH10K_DBG_WMI,
  1387. "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
  1388. vdev_id, aid, bssid);
  1389. return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID);
  1390. }
  1391. int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
  1392. {
  1393. struct wmi_vdev_down_cmd *cmd;
  1394. struct sk_buff *skb;
  1395. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1396. if (!skb)
  1397. return -ENOMEM;
  1398. cmd = (struct wmi_vdev_down_cmd *)skb->data;
  1399. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1400. ath10k_dbg(ATH10K_DBG_WMI,
  1401. "wmi mgmt vdev down id 0x%x\n", vdev_id);
  1402. return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID);
  1403. }
  1404. int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
  1405. enum wmi_vdev_param param_id, u32 param_value)
  1406. {
  1407. struct wmi_vdev_set_param_cmd *cmd;
  1408. struct sk_buff *skb;
  1409. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1410. if (!skb)
  1411. return -ENOMEM;
  1412. cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
  1413. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1414. cmd->param_id = __cpu_to_le32(param_id);
  1415. cmd->param_value = __cpu_to_le32(param_value);
  1416. ath10k_dbg(ATH10K_DBG_WMI,
  1417. "wmi vdev id 0x%x set param %d value %d\n",
  1418. vdev_id, param_id, param_value);
  1419. return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID);
  1420. }
  1421. int ath10k_wmi_vdev_install_key(struct ath10k *ar,
  1422. const struct wmi_vdev_install_key_arg *arg)
  1423. {
  1424. struct wmi_vdev_install_key_cmd *cmd;
  1425. struct sk_buff *skb;
  1426. if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
  1427. return -EINVAL;
  1428. if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
  1429. return -EINVAL;
  1430. skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len);
  1431. if (!skb)
  1432. return -ENOMEM;
  1433. cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
  1434. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  1435. cmd->key_idx = __cpu_to_le32(arg->key_idx);
  1436. cmd->key_flags = __cpu_to_le32(arg->key_flags);
  1437. cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
  1438. cmd->key_len = __cpu_to_le32(arg->key_len);
  1439. cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
  1440. cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
  1441. if (arg->macaddr)
  1442. memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN);
  1443. if (arg->key_data)
  1444. memcpy(cmd->key_data, arg->key_data, arg->key_len);
  1445. return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
  1446. }
  1447. int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
  1448. const u8 peer_addr[ETH_ALEN])
  1449. {
  1450. struct wmi_peer_create_cmd *cmd;
  1451. struct sk_buff *skb;
  1452. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1453. if (!skb)
  1454. return -ENOMEM;
  1455. cmd = (struct wmi_peer_create_cmd *)skb->data;
  1456. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1457. memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
  1458. ath10k_dbg(ATH10K_DBG_WMI,
  1459. "wmi peer create vdev_id %d peer_addr %pM\n",
  1460. vdev_id, peer_addr);
  1461. return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID);
  1462. }
  1463. int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
  1464. const u8 peer_addr[ETH_ALEN])
  1465. {
  1466. struct wmi_peer_delete_cmd *cmd;
  1467. struct sk_buff *skb;
  1468. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1469. if (!skb)
  1470. return -ENOMEM;
  1471. cmd = (struct wmi_peer_delete_cmd *)skb->data;
  1472. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1473. memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
  1474. ath10k_dbg(ATH10K_DBG_WMI,
  1475. "wmi peer delete vdev_id %d peer_addr %pM\n",
  1476. vdev_id, peer_addr);
  1477. return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID);
  1478. }
  1479. int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
  1480. const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  1481. {
  1482. struct wmi_peer_flush_tids_cmd *cmd;
  1483. struct sk_buff *skb;
  1484. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1485. if (!skb)
  1486. return -ENOMEM;
  1487. cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
  1488. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1489. cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
  1490. memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
  1491. ath10k_dbg(ATH10K_DBG_WMI,
  1492. "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
  1493. vdev_id, peer_addr, tid_bitmap);
  1494. return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID);
  1495. }
  1496. int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
  1497. const u8 *peer_addr, enum wmi_peer_param param_id,
  1498. u32 param_value)
  1499. {
  1500. struct wmi_peer_set_param_cmd *cmd;
  1501. struct sk_buff *skb;
  1502. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1503. if (!skb)
  1504. return -ENOMEM;
  1505. cmd = (struct wmi_peer_set_param_cmd *)skb->data;
  1506. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1507. cmd->param_id = __cpu_to_le32(param_id);
  1508. cmd->param_value = __cpu_to_le32(param_value);
  1509. memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
  1510. ath10k_dbg(ATH10K_DBG_WMI,
  1511. "wmi vdev %d peer 0x%pM set param %d value %d\n",
  1512. vdev_id, peer_addr, param_id, param_value);
  1513. return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID);
  1514. }
  1515. int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
  1516. enum wmi_sta_ps_mode psmode)
  1517. {
  1518. struct wmi_sta_powersave_mode_cmd *cmd;
  1519. struct sk_buff *skb;
  1520. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1521. if (!skb)
  1522. return -ENOMEM;
  1523. cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
  1524. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1525. cmd->sta_ps_mode = __cpu_to_le32(psmode);
  1526. ath10k_dbg(ATH10K_DBG_WMI,
  1527. "wmi set powersave id 0x%x mode %d\n",
  1528. vdev_id, psmode);
  1529. return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID);
  1530. }
  1531. int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
  1532. enum wmi_sta_powersave_param param_id,
  1533. u32 value)
  1534. {
  1535. struct wmi_sta_powersave_param_cmd *cmd;
  1536. struct sk_buff *skb;
  1537. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1538. if (!skb)
  1539. return -ENOMEM;
  1540. cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
  1541. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1542. cmd->param_id = __cpu_to_le32(param_id);
  1543. cmd->param_value = __cpu_to_le32(value);
  1544. ath10k_dbg(ATH10K_DBG_WMI,
  1545. "wmi sta ps param vdev_id 0x%x param %d value %d\n",
  1546. vdev_id, param_id, value);
  1547. return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
  1548. }
  1549. int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  1550. enum wmi_ap_ps_peer_param param_id, u32 value)
  1551. {
  1552. struct wmi_ap_ps_peer_cmd *cmd;
  1553. struct sk_buff *skb;
  1554. if (!mac)
  1555. return -EINVAL;
  1556. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1557. if (!skb)
  1558. return -ENOMEM;
  1559. cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
  1560. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1561. cmd->param_id = __cpu_to_le32(param_id);
  1562. cmd->param_value = __cpu_to_le32(value);
  1563. memcpy(&cmd->peer_macaddr, mac, ETH_ALEN);
  1564. ath10k_dbg(ATH10K_DBG_WMI,
  1565. "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
  1566. vdev_id, param_id, value, mac);
  1567. return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID);
  1568. }
  1569. int ath10k_wmi_scan_chan_list(struct ath10k *ar,
  1570. const struct wmi_scan_chan_list_arg *arg)
  1571. {
  1572. struct wmi_scan_chan_list_cmd *cmd;
  1573. struct sk_buff *skb;
  1574. struct wmi_channel_arg *ch;
  1575. struct wmi_channel *ci;
  1576. int len;
  1577. int i;
  1578. len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
  1579. skb = ath10k_wmi_alloc_skb(len);
  1580. if (!skb)
  1581. return -EINVAL;
  1582. cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
  1583. cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
  1584. for (i = 0; i < arg->n_channels; i++) {
  1585. u32 flags = 0;
  1586. ch = &arg->channels[i];
  1587. ci = &cmd->chan_info[i];
  1588. if (ch->passive)
  1589. flags |= WMI_CHAN_FLAG_PASSIVE;
  1590. if (ch->allow_ibss)
  1591. flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
  1592. if (ch->allow_ht)
  1593. flags |= WMI_CHAN_FLAG_ALLOW_HT;
  1594. if (ch->allow_vht)
  1595. flags |= WMI_CHAN_FLAG_ALLOW_VHT;
  1596. if (ch->ht40plus)
  1597. flags |= WMI_CHAN_FLAG_HT40_PLUS;
  1598. ci->mhz = __cpu_to_le32(ch->freq);
  1599. ci->band_center_freq1 = __cpu_to_le32(ch->freq);
  1600. ci->band_center_freq2 = 0;
  1601. ci->min_power = ch->min_power;
  1602. ci->max_power = ch->max_power;
  1603. ci->reg_power = ch->max_reg_power;
  1604. ci->antenna_max = ch->max_antenna_gain;
  1605. ci->antenna_max = 0;
  1606. /* mode & flags share storage */
  1607. ci->mode = ch->mode;
  1608. ci->flags |= __cpu_to_le32(flags);
  1609. }
  1610. return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID);
  1611. }
  1612. int ath10k_wmi_peer_assoc(struct ath10k *ar,
  1613. const struct wmi_peer_assoc_complete_arg *arg)
  1614. {
  1615. struct wmi_peer_assoc_complete_cmd *cmd;
  1616. struct sk_buff *skb;
  1617. if (arg->peer_mpdu_density > 16)
  1618. return -EINVAL;
  1619. if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
  1620. return -EINVAL;
  1621. if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
  1622. return -EINVAL;
  1623. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1624. if (!skb)
  1625. return -ENOMEM;
  1626. cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data;
  1627. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  1628. cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
  1629. cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
  1630. cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
  1631. cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
  1632. cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
  1633. cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
  1634. cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
  1635. cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
  1636. cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
  1637. cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
  1638. cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
  1639. cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
  1640. memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN);
  1641. cmd->peer_legacy_rates.num_rates =
  1642. __cpu_to_le32(arg->peer_legacy_rates.num_rates);
  1643. memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
  1644. arg->peer_legacy_rates.num_rates);
  1645. cmd->peer_ht_rates.num_rates =
  1646. __cpu_to_le32(arg->peer_ht_rates.num_rates);
  1647. memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
  1648. arg->peer_ht_rates.num_rates);
  1649. cmd->peer_vht_rates.rx_max_rate =
  1650. __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
  1651. cmd->peer_vht_rates.rx_mcs_set =
  1652. __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
  1653. cmd->peer_vht_rates.tx_max_rate =
  1654. __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
  1655. cmd->peer_vht_rates.tx_mcs_set =
  1656. __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
  1657. return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
  1658. }
  1659. int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
  1660. {
  1661. struct wmi_bcn_tx_cmd *cmd;
  1662. struct sk_buff *skb;
  1663. skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
  1664. if (!skb)
  1665. return -ENOMEM;
  1666. cmd = (struct wmi_bcn_tx_cmd *)skb->data;
  1667. cmd->hdr.vdev_id = __cpu_to_le32(arg->vdev_id);
  1668. cmd->hdr.tx_rate = __cpu_to_le32(arg->tx_rate);
  1669. cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power);
  1670. cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
  1671. memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
  1672. return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
  1673. }
  1674. static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
  1675. const struct wmi_wmm_params_arg *arg)
  1676. {
  1677. params->cwmin = __cpu_to_le32(arg->cwmin);
  1678. params->cwmax = __cpu_to_le32(arg->cwmax);
  1679. params->aifs = __cpu_to_le32(arg->aifs);
  1680. params->txop = __cpu_to_le32(arg->txop);
  1681. params->acm = __cpu_to_le32(arg->acm);
  1682. params->no_ack = __cpu_to_le32(arg->no_ack);
  1683. }
  1684. int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
  1685. const struct wmi_pdev_set_wmm_params_arg *arg)
  1686. {
  1687. struct wmi_pdev_set_wmm_params *cmd;
  1688. struct sk_buff *skb;
  1689. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1690. if (!skb)
  1691. return -ENOMEM;
  1692. cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
  1693. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
  1694. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
  1695. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
  1696. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
  1697. ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
  1698. return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID);
  1699. }
  1700. int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
  1701. {
  1702. struct wmi_request_stats_cmd *cmd;
  1703. struct sk_buff *skb;
  1704. skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  1705. if (!skb)
  1706. return -ENOMEM;
  1707. cmd = (struct wmi_request_stats_cmd *)skb->data;
  1708. cmd->stats_id = __cpu_to_le32(stats_id);
  1709. ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
  1710. return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
  1711. }