|
@@ -59,13 +59,15 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
|
|
u8 sta_id = 0;
|
|
|
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
|
|
|
__le16 bc_ent;
|
|
|
+ struct iwl_tx_cmd *tx_cmd =
|
|
|
+ (struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload;
|
|
|
|
|
|
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
|
|
|
|
|
|
WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
|
|
|
|
|
|
- sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
|
|
|
- sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
|
|
|
+ sta_id = tx_cmd->sta_id;
|
|
|
+ sec_ctl = tx_cmd->sec_ctl;
|
|
|
|
|
|
switch (sec_ctl & TX_CMD_SEC_MSK) {
|
|
|
case TX_CMD_SEC_CCM:
|
|
@@ -353,11 +355,13 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
|
|
|
int read_ptr = txq->q.read_ptr;
|
|
|
u8 sta_id = 0;
|
|
|
__le16 bc_ent;
|
|
|
+ struct iwl_tx_cmd *tx_cmd =
|
|
|
+ (struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload;
|
|
|
|
|
|
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
|
|
|
|
|
|
if (txq_id != trans->shrd->cmd_queue)
|
|
|
- sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
|
|
|
+ sta_id = tx_cmd->sta_id;
|
|
|
|
|
|
bc_ent = cpu_to_le16(1 | (sta_id << 12));
|
|
|
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
|
|
@@ -773,7 +777,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
|
|
|
|
|
/* and copy the data that needs to be copied */
|
|
|
|
|
|
- cmd_dest = &out_cmd->cmd.payload[0];
|
|
|
+ cmd_dest = out_cmd->payload;
|
|
|
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
|
|
if (!cmd->len[i])
|
|
|
continue;
|