Browse Source

Merge branch 'for-john' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

John W. Linville 13 years ago
parent
commit
fd2841c5b5

+ 38 - 2
drivers/net/wireless/iwlwifi/dvm/commands.h

@@ -190,6 +190,44 @@ enum {
 	REPLY_MAX = 0xff
 };
 
+/*
+ * Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate
+ *  - 4 standard TX queues
+ *  - the command queue
+ *  - 4 PAN TX queues
+ *  - the PAN multicast queue, and
+ *  - the AUX (TX during scan dwell) queue.
+ */
+#define IWL_MIN_NUM_QUEUES	11
+
+/*
+ * Command queue depends on iPAN support.
+ */
+#define IWL_DEFAULT_CMD_QUEUE_NUM	4
+#define IWL_IPAN_CMD_QUEUE_NUM		9
+
+#define IWL_TX_FIFO_BK		0	/* shared */
+#define IWL_TX_FIFO_BE		1
+#define IWL_TX_FIFO_VI		2	/* shared */
+#define IWL_TX_FIFO_VO		3
+#define IWL_TX_FIFO_BK_IPAN	IWL_TX_FIFO_BK
+#define IWL_TX_FIFO_BE_IPAN	4
+#define IWL_TX_FIFO_VI_IPAN	IWL_TX_FIFO_VI
+#define IWL_TX_FIFO_VO_IPAN	5
+/* re-uses the VO FIFO, uCode will properly flush/schedule */
+#define IWL_TX_FIFO_AUX		5
+#define IWL_TX_FIFO_UNUSED	255
+
+#define IWLAGN_CMD_FIFO_NUM	7
+
+/*
+ * This queue number is required for proper operation
+ * because the ucode will stop/start the scheduler as
+ * required.
+ */
+#define IWL_IPAN_MCAST_QUEUE	8
+
 /******************************************************************************
  * (0)
  * Commonly used structures and definitions:
@@ -755,8 +793,6 @@ struct iwl_qosparam_cmd {
 #define IWLAGN_BROADCAST_ID	15
 #define	IWLAGN_STATION_COUNT	16
 
-#define	IWL_INVALID_STATION 	255
-#define IWL_MAX_TID_COUNT	8
 #define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
 
 #define STA_FLG_TX_RATE_MSK		cpu_to_le32(1 << 2)

+ 5 - 1
drivers/net/wireless/iwlwifi/dvm/debugfs.c

@@ -83,7 +83,7 @@ static ssize_t iwl_dbgfs_##name##_write(struct file *file,              \
 #define DEBUGFS_READ_FILE_OPS(name)                                     \
 	DEBUGFS_READ_FUNC(name);                                        \
 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
-	.read = iwl_dbgfs_##name##_read,                       		\
+	.read = iwl_dbgfs_##name##_read,				\
 	.open = simple_open,						\
 	.llseek = generic_file_llseek,					\
 };
@@ -2255,6 +2255,10 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
 	char buf[8];
 	int buf_size;
 
+	/* check that the interface is up */
+	if (!iwl_is_ready(priv))
+		return -EAGAIN;
+
 	memset(buf, 0, sizeof(buf));
 	buf_size = min(count, sizeof(buf) -  1);
 	if (copy_from_user(buf, user_buf, buf_size))

+ 0 - 16
drivers/net/wireless/iwlwifi/dvm/dev.h

@@ -90,22 +90,6 @@
 
 #define IWL_NUM_SCAN_RATES         (2)
 
-/*
- * Minimum number of queues. MAX_NUM is defined in hw specific files.
- * Set the minimum to accommodate
- *  - 4 standard TX queues
- *  - the command queue
- *  - 4 PAN TX queues
- *  - the PAN multicast queue, and
- *  - the AUX (TX during scan dwell) queue.
- */
-#define IWL_MIN_NUM_QUEUES	11
-
-/*
- * Command queue depends on iPAN support.
- */
-#define IWL_DEFAULT_CMD_QUEUE_NUM	4
-#define IWL_IPAN_CMD_QUEUE_NUM		9
 
 #define IEEE80211_DATA_LEN              2304
 #define IEEE80211_4ADDR_LEN             30

+ 1 - 55
drivers/net/wireless/iwlwifi/dvm/main.c

@@ -518,49 +518,6 @@ static void iwl_bg_tx_flush(struct work_struct *work)
  * queue/FIFO/AC mapping definitions
  */
 
-#define IWL_TX_FIFO_BK		0	/* shared */
-#define IWL_TX_FIFO_BE		1
-#define IWL_TX_FIFO_VI		2	/* shared */
-#define IWL_TX_FIFO_VO		3
-#define IWL_TX_FIFO_BK_IPAN	IWL_TX_FIFO_BK
-#define IWL_TX_FIFO_BE_IPAN	4
-#define IWL_TX_FIFO_VI_IPAN	IWL_TX_FIFO_VI
-#define IWL_TX_FIFO_VO_IPAN	5
-/* re-uses the VO FIFO, uCode will properly flush/schedule */
-#define IWL_TX_FIFO_AUX		5
-#define IWL_TX_FIFO_UNUSED	-1
-
-#define IWLAGN_CMD_FIFO_NUM	7
-
-/*
- * This queue number is required for proper operation
- * because the ucode will stop/start the scheduler as
- * required.
- */
-#define IWL_IPAN_MCAST_QUEUE	8
-
-static const u8 iwlagn_default_queue_to_tx_fifo[] = {
-	IWL_TX_FIFO_VO,
-	IWL_TX_FIFO_VI,
-	IWL_TX_FIFO_BE,
-	IWL_TX_FIFO_BK,
-	IWLAGN_CMD_FIFO_NUM,
-};
-
-static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
-	IWL_TX_FIFO_VO,
-	IWL_TX_FIFO_VI,
-	IWL_TX_FIFO_BE,
-	IWL_TX_FIFO_BK,
-	IWL_TX_FIFO_BK_IPAN,
-	IWL_TX_FIFO_BE_IPAN,
-	IWL_TX_FIFO_VI_IPAN,
-	IWL_TX_FIFO_VO_IPAN,
-	IWL_TX_FIFO_BE_IPAN,
-	IWLAGN_CMD_FIFO_NUM,
-	IWL_TX_FIFO_AUX,
-};
-
 static const u8 iwlagn_bss_ac_to_fifo[] = {
 	IWL_TX_FIFO_VO,
 	IWL_TX_FIFO_VI,
@@ -1350,6 +1307,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
 	else
 		trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
 	trans_cfg.command_names = iwl_dvm_cmd_strings;
+	trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
 
 	WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
 		priv->cfg->base_params->num_of_queues);
@@ -1363,15 +1321,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
 	if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
 		priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
 		trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
-		trans_cfg.queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
-		trans_cfg.n_queue_to_fifo =
-			ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
 	} else {
 		priv->sta_key_max_num = STA_KEY_MAX_NUM;
 		trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
-		trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
-		trans_cfg.n_queue_to_fifo =
-			ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
 	}
 
 	/* Configure transport layer */
@@ -1460,9 +1412,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
 		ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
 		priv->sta_key_max_num = STA_KEY_MAX_NUM;
 		trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
-		trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
-		trans_cfg.n_queue_to_fifo =
-			ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
 
 		/* Configure transport layer again*/
 		iwl_trans_configure(priv->trans, &trans_cfg);
@@ -1480,9 +1429,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
 		atomic_set(&priv->queue_stop_count[i], 0);
 	}
 
-	WARN_ON(trans_cfg.queue_to_fifo[trans_cfg.cmd_queue] !=
-						IWLAGN_CMD_FIFO_NUM);
-
 	if (iwl_init_drv(priv))
 		goto out_free_eeprom;
 

+ 27 - 10
drivers/net/wireless/iwlwifi/dvm/scan.c

@@ -396,15 +396,21 @@ static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
 static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
 {
 	struct iwl_rxon_context *ctx;
+	int limits[NUM_IWL_RXON_CTX] = {};
+	int n_active = 0;
+	u16 limit;
+
+	BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
 
 	/*
 	 * If we're associated, we clamp the dwell time 98%
-	 * of the smallest beacon interval (minus 2 * channel
-	 * tune time)
+	 * of the beacon interval (minus 2 * channel tune time)
+	 * If both contexts are active, we have to restrict to
+	 * 1/2 of the minimum of them, because they might be in
+	 * lock-step with the time inbetween only half of what
+	 * time we'd have in each of them.
 	 */
 	for_each_context(priv, ctx) {
-		u16 value;
-
 		switch (ctx->staging.dev_type) {
 		case RXON_DEV_TYPE_P2P:
 			/* no timing constraints */
@@ -424,14 +430,25 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
 			break;
 		}
 
-		value = ctx->beacon_int;
-		if (!value)
-			value = IWL_PASSIVE_DWELL_BASE;
-		value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
-		dwell_time = min(value, dwell_time);
+		limits[n_active++] = ctx->beacon_int ?: IWL_PASSIVE_DWELL_BASE;
 	}
 
-	return dwell_time;
+	switch (n_active) {
+	case 0:
+		return dwell_time;
+	case 2:
+		limit = (limits[1] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+		limit /= 2;
+		dwell_time = min(limit, dwell_time);
+		/* fall through to limit further */
+	case 1:
+		limit = (limits[0] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+		limit /= n_active;
+		return min(limit, dwell_time);
+	default:
+		WARN_ON_ONCE(1);
+		return dwell_time;
+	}
 }
 
 static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,

+ 37 - 0
drivers/net/wireless/iwlwifi/dvm/ucode.c

@@ -226,13 +226,50 @@ int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
 	return ret;
 }
 
+static const u8 iwlagn_default_queue_to_tx_fifo[] = {
+	IWL_TX_FIFO_VO,
+	IWL_TX_FIFO_VI,
+	IWL_TX_FIFO_BE,
+	IWL_TX_FIFO_BK,
+};
+
+static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
+	IWL_TX_FIFO_VO,
+	IWL_TX_FIFO_VI,
+	IWL_TX_FIFO_BE,
+	IWL_TX_FIFO_BK,
+	IWL_TX_FIFO_BK_IPAN,
+	IWL_TX_FIFO_BE_IPAN,
+	IWL_TX_FIFO_VI_IPAN,
+	IWL_TX_FIFO_VO_IPAN,
+	IWL_TX_FIFO_BE_IPAN,
+	IWL_TX_FIFO_UNUSED,
+	IWL_TX_FIFO_AUX,
+};
 
 static int iwl_alive_notify(struct iwl_priv *priv)
 {
+	const u8 *queue_to_txf;
+	u8 n_queues;
 	int ret;
+	int i;
 
 	iwl_trans_fw_alive(priv->trans);
 
+	if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
+	    priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE) {
+		n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
+		queue_to_txf = iwlagn_ipan_queue_to_tx_fifo;
+	} else {
+		n_queues = ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
+		queue_to_txf = iwlagn_default_queue_to_tx_fifo;
+	}
+
+	for (i = 0; i < n_queues; i++)
+		if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED)
+			iwl_trans_ac_txq_enable(priv->trans, i,
+						queue_to_txf[i]);
+
 	priv->passive_no_rx = false;
 	priv->transport_queue_stop = 0;
 

+ 4 - 1
drivers/net/wireless/iwlwifi/iwl-debug.c

@@ -61,6 +61,9 @@
  *
  *****************************************************************************/
 
+#define DEBUG
+
+#include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/export.h>
 #include "iwl-debug.h"
@@ -124,7 +127,7 @@ void __iwl_dbg(struct device *dev,
 #ifdef CONFIG_IWLWIFI_DEBUG
 	if (iwl_have_debug_level(level) &&
 	    (!limit || net_ratelimit()))
-		dev_err(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
+		dev_dbg(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
 			function, &vaf);
 #endif
 	trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);

+ 2 - 2
drivers/net/wireless/iwlwifi/iwl-devtrace.h

@@ -176,7 +176,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM iwlwifi_msg
 
-#define MAX_MSG_LEN	100
+#define MAX_MSG_LEN	110
 
 DECLARE_EVENT_CLASS(iwlwifi_msg_event,
 	TP_PROTO(struct va_format *vaf),
@@ -189,7 +189,7 @@ DECLARE_EVENT_CLASS(iwlwifi_msg_event,
 				       MAX_MSG_LEN, vaf->fmt,
 				       *vaf->va) >= MAX_MSG_LEN);
 	),
-	TP_printk("%s", (char *)__get_dynamic_array(msg))
+	TP_printk("%s", __get_str(msg))
 );
 
 DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err,

+ 1 - 0
drivers/net/wireless/iwlwifi/iwl-drv.c

@@ -1013,6 +1013,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
 	.power_level = IWL_POWER_INDEX_1,
 	.bt_ch_announce = true,
 	.auto_agg = true,
+	.wd_disable = true,
 	/* the rest are 0 by default */
 };
 EXPORT_SYMBOL_GPL(iwlwifi_mod_params);

+ 17 - 10
drivers/net/wireless/iwlwifi/iwl-trans.h

@@ -290,16 +290,17 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
  * currently supports
  */
 #define IWL_MAX_HW_QUEUES		32
+#define IWL_INVALID_STATION	255
+#define IWL_MAX_TID_COUNT	8
+#define IWL_FRAME_LIMIT	64
 
 /**
  * struct iwl_trans_config - transport configuration
  *
  * @op_mode: pointer to the upper layer.
- * @queue_to_fifo: queue to FIFO mapping to set up by
- *	default
- * @n_queue_to_fifo: number of queues to set up
  * @cmd_queue: the index of the command queue.
  *	Must be set before start_fw.
+ * @cmd_fifo: the fifo for host commands
  * @no_reclaim_cmds: Some devices erroneously don't set the
  *	SEQ_RX_FRAME bit on some notifications, this is the
  *	list of such notifications to filter. Max length is
@@ -314,10 +315,9 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
  */
 struct iwl_trans_config {
 	struct iwl_op_mode *op_mode;
-	const u8 *queue_to_fifo;
-	u8 n_queue_to_fifo;
 
 	u8 cmd_queue;
+	u8 cmd_fifo;
 	const u8 *no_reclaim_cmds;
 	int n_no_reclaim_cmds;
 
@@ -355,9 +355,9 @@ struct iwl_trans;
  *	Must be atomic
  * @reclaim: free packet until ssn. Returns a list of freed packets.
  *	Must be atomic
- * @txq_enable: setup a tx queue for AMPDU - will be called once the HW is
- *	ready and a successful ADDBA response has been received.
- *	May sleep
+ * @txq_enable: setup a queue. To setup an AC queue, use the
+ *	iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
+ *	this one. The op_mode must not configure the HCMD queue. May sleep.
  * @txq_disable: de-configure a Tx queue to send AMPDUs
  *	Must be atomic
  * @wait_tx_queue_empty: wait until all tx queues are empty
@@ -497,9 +497,9 @@ static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
 {
 	might_sleep();
 
-	trans->ops->fw_alive(trans);
-
 	trans->state = IWL_TRANS_FW_ALIVE;
+
+	trans->ops->fw_alive(trans);
 }
 
 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
@@ -593,6 +593,13 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
 				 frame_limit, ssn);
 }
 
+static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
+					   int fifo)
+{
+	iwl_trans_txq_enable(trans, queue, fifo, IWL_INVALID_STATION,
+			     IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
+}
+
 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
 {
 	WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,

+ 1 - 2
drivers/net/wireless/iwlwifi/pcie/internal.h

@@ -269,10 +269,9 @@ struct iwl_trans_pcie {
 	wait_queue_head_t ucode_write_waitq;
 	unsigned long status;
 	u8 cmd_queue;
+	u8 cmd_fifo;
 	u8 n_no_reclaim_cmds;
 	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
-	u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
-	u8 n_q_to_fifo;
 
 	bool rx_buf_size_8k;
 	u32 rx_page_order;

+ 0 - 3
drivers/net/wireless/iwlwifi/pcie/rx.c

@@ -879,9 +879,6 @@ static irqreturn_t iwl_isr(int irq, void *data)
 
 	lockdep_assert_held(&trans_pcie->irq_lock);
 
-	if (!trans)
-		return IRQ_NONE;
-
 	trace_iwlwifi_dev_irq(trans->dev);
 
 	/* Disable (but don't clear!) interrupts here to avoid

+ 12 - 24
drivers/net/wireless/iwlwifi/pcie/trans.c

@@ -1059,7 +1059,7 @@ static void iwl_tx_start(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	u32 a;
-	int i, chan;
+	int chan;
 	u32 reg_val;
 
 	/* make sure all queue are not stopped/used */
@@ -1091,12 +1091,8 @@ static void iwl_tx_start(struct iwl_trans *trans)
 	 */
 	iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
 
-	for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
-		int fifo = trans_pcie->setup_q_to_fifo[i];
-
-		iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION,
-					  IWL_TID_NON_QOS, SCD_FRAME_LIMIT, 0);
-	}
+	iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
+				trans_pcie->cmd_fifo);
 
 	/* Activate all Tx DMA/FIFO channels */
 	iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
@@ -1145,7 +1141,7 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
 			FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
 		if (ret < 0)
 			IWL_ERR(trans,
-				"Failing on timeout while stopping DMA channel %d [0x%08x]",
+				"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
 				ch,
 				iwl_read_direct32(trans,
 						  FH_TSSR_TX_STATUS_REG));
@@ -1153,7 +1149,8 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 
 	if (!trans_pcie->txq) {
-		IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
+		IWL_WARN(trans,
+			 "Stopping tx queues that aren't allocated...\n");
 		return 0;
 	}
 
@@ -1430,7 +1427,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
 
 	err = iwl_prepare_card_hw(trans);
 	if (err) {
-		IWL_ERR(trans, "Error while preparing HW: %d", err);
+		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
 		goto err_free_irq;
 	}
 
@@ -1528,6 +1525,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
 	trans_pcie->cmd_queue = trans_cfg->cmd_queue;
+	trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
 	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
 		trans_pcie->n_no_reclaim_cmds = 0;
 	else
@@ -1536,17 +1534,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
 		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
 		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
 
-	trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
-
-	if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
-		trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
-
-	/* at least the command queue must be mapped */
-	WARN_ON(!trans_pcie->n_q_to_fifo);
-
-	memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
-	       trans_pcie->n_q_to_fifo * sizeof(u8));
-
 	trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
 	if (trans_pcie->rx_buf_size_8k)
 		trans_pcie->rx_page_order = get_order(8 * 1024);
@@ -2141,13 +2128,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
 	err = pci_request_regions(pdev, DRV_NAME);
 	if (err) {
-		dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "pci_request_regions failed\n");
 		goto out_pci_disable_device;
 	}
 
 	trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
 	if (!trans_pcie->hw_base) {
-		dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
+		dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n");
 		err = -ENODEV;
 		goto out_pci_release_regions;
 	}
@@ -2168,7 +2156,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 	err = pci_enable_msi(pdev);
 	if (err)
 		dev_printk(KERN_ERR, &pdev->dev,
-			   "pci_enable_msi failed(0X%x)", err);
+			   "pci_enable_msi failed(0X%x)\n", err);
 
 	trans->dev = &pdev->dev;
 	trans_pcie->irq = pdev->irq;