|
@@ -231,8 +231,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
|
|
|
return i;
|
|
|
}
|
|
|
|
|
|
-inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
|
|
|
- unsigned char *state, int auto_ack)
|
|
|
+static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
|
|
|
+ unsigned char *state, int auto_ack)
|
|
|
{
|
|
|
return get_buf_states(q, bufnr, state, 1, auto_ack);
|
|
|
}
|
|
@@ -276,7 +276,7 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
|
|
|
QDIO_MAX_BUFFERS_PER_Q);
|
|
|
}
|
|
|
|
|
|
-static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
|
|
|
+static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
|
|
|
unsigned int input)
|
|
|
{
|
|
|
int cc;
|
|
@@ -293,7 +293,7 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
|
|
|
return cc;
|
|
|
}
|
|
|
|
|
|
-inline int qdio_siga_sync_q(struct qdio_q *q)
|
|
|
+static inline int qdio_siga_sync_q(struct qdio_q *q)
|
|
|
{
|
|
|
if (q->is_input_q)
|
|
|
return qdio_siga_sync(q, 0, q->mask);
|
|
@@ -358,8 +358,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
|
|
|
return cc;
|
|
|
}
|
|
|
|
|
|
-/* called from thinint inbound handler */
|
|
|
-void qdio_sync_after_thinint(struct qdio_q *q)
|
|
|
+static inline void qdio_sync_after_thinint(struct qdio_q *q)
|
|
|
{
|
|
|
if (pci_out_supported(q)) {
|
|
|
if (need_siga_sync_thinint(q))
|
|
@@ -370,7 +369,14 @@ void qdio_sync_after_thinint(struct qdio_q *q)
|
|
|
qdio_siga_sync_q(q);
|
|
|
}
|
|
|
|
|
|
-inline void qdio_stop_polling(struct qdio_q *q)
|
|
|
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
|
|
|
+ unsigned char *state)
|
|
|
+{
|
|
|
+ qdio_siga_sync_q(q);
|
|
|
+ return get_buf_states(q, bufnr, state, 1, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void qdio_stop_polling(struct qdio_q *q)
|
|
|
{
|
|
|
if (!q->u.in.polling)
|
|
|
return;
|
|
@@ -449,13 +455,6 @@ static inline void inbound_primed(struct qdio_q *q, int count)
|
|
|
count--;
|
|
|
if (!count)
|
|
|
return;
|
|
|
-
|
|
|
- /*
|
|
|
- * Need to change all PRIMED buffers to NOT_INIT, otherwise
|
|
|
- * we're loosing initiative in the thinint code.
|
|
|
- */
|
|
|
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT,
|
|
|
- count);
|
|
|
}
|
|
|
|
|
|
static int get_inbound_buffer_frontier(struct qdio_q *q)
|
|
@@ -470,19 +469,13 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
|
|
|
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
|
|
|
stop = add_buf(q->first_to_check, count);
|
|
|
|
|
|
- /*
|
|
|
- * No siga sync here, as a PCI or we after a thin interrupt
|
|
|
- * will sync the queues.
|
|
|
- */
|
|
|
-
|
|
|
- /* need to set count to 1 for non-qebsm */
|
|
|
- if (!is_qebsm(q))
|
|
|
- count = 1;
|
|
|
-
|
|
|
-check_next:
|
|
|
if (q->first_to_check == stop)
|
|
|
goto out;
|
|
|
|
|
|
+ /*
|
|
|
+ * No siga sync here, as a PCI or we after a thin interrupt
|
|
|
+ * already sync'ed the queues.
|
|
|
+ */
|
|
|
count = get_buf_states(q, q->first_to_check, &state, count, 1);
|
|
|
if (!count)
|
|
|
goto out;
|
|
@@ -490,14 +483,9 @@ check_next:
|
|
|
switch (state) {
|
|
|
case SLSB_P_INPUT_PRIMED:
|
|
|
inbound_primed(q, count);
|
|
|
- /*
|
|
|
- * No siga-sync needed for non-qebsm here, as the inbound queue
|
|
|
- * will be synced on the next siga-r, resp.
|
|
|
- * tiqdio_is_inbound_q_done will do the siga-sync.
|
|
|
- */
|
|
|
q->first_to_check = add_buf(q->first_to_check, count);
|
|
|
atomic_sub(count, &q->nr_buf_used);
|
|
|
- goto check_next;
|
|
|
+ break;
|
|
|
case SLSB_P_INPUT_ERROR:
|
|
|
announce_buffer_error(q, count);
|
|
|
/* process the buffer, the upper layer will take care of it */
|
|
@@ -516,7 +504,7 @@ out:
|
|
|
return q->first_to_check;
|
|
|
}
|
|
|
|
|
|
-int qdio_inbound_q_moved(struct qdio_q *q)
|
|
|
+static int qdio_inbound_q_moved(struct qdio_q *q)
|
|
|
{
|
|
|
int bufnr;
|
|
|
|
|
@@ -524,35 +512,32 @@ int qdio_inbound_q_moved(struct qdio_q *q)
|
|
|
|
|
|
if ((bufnr != q->last_move) || q->qdio_error) {
|
|
|
q->last_move = bufnr;
|
|
|
- if (!need_siga_sync(q) && !pci_out_supported(q))
|
|
|
+ if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM)
|
|
|
q->u.in.timestamp = get_usecs();
|
|
|
-
|
|
|
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
|
|
|
return 1;
|
|
|
} else
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int qdio_inbound_q_done(struct qdio_q *q)
|
|
|
+static inline int qdio_inbound_q_done(struct qdio_q *q)
|
|
|
{
|
|
|
unsigned char state = 0;
|
|
|
|
|
|
if (!atomic_read(&q->nr_buf_used))
|
|
|
return 1;
|
|
|
|
|
|
- /*
|
|
|
- * We need that one for synchronization with the adapter, as it
|
|
|
- * does a kind of PCI avoidance.
|
|
|
- */
|
|
|
qdio_siga_sync_q(q);
|
|
|
-
|
|
|
get_buf_state(q, q->first_to_check, &state, 0);
|
|
|
+
|
|
|
if (state == SLSB_P_INPUT_PRIMED)
|
|
|
- /* we got something to do */
|
|
|
+ /* more work coming */
|
|
|
return 0;
|
|
|
|
|
|
- /* on VM, we don't poll, so the q is always done here */
|
|
|
- if (need_siga_sync(q) || pci_out_supported(q))
|
|
|
+ if (is_thinint_irq(q->irq_ptr))
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ /* don't poll under z/VM */
|
|
|
+ if (MACHINE_IS_VM)
|
|
|
return 1;
|
|
|
|
|
|
/*
|
|
@@ -563,14 +548,11 @@ static int qdio_inbound_q_done(struct qdio_q *q)
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
|
|
|
q->first_to_check);
|
|
|
return 1;
|
|
|
- } else {
|
|
|
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
|
|
|
- q->first_to_check);
|
|
|
+ } else
|
|
|
return 0;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
-void qdio_kick_handler(struct qdio_q *q)
|
|
|
+static void qdio_kick_handler(struct qdio_q *q)
|
|
|
{
|
|
|
int start = q->first_to_kick;
|
|
|
int end = q->first_to_check;
|
|
@@ -619,7 +601,6 @@ again:
|
|
|
goto again;
|
|
|
}
|
|
|
|
|
|
-/* inbound tasklet */
|
|
|
void qdio_inbound_processing(unsigned long data)
|
|
|
{
|
|
|
struct qdio_q *q = (struct qdio_q *)data;
|
|
@@ -642,11 +623,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
|
|
|
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
|
|
|
stop = add_buf(q->first_to_check, count);
|
|
|
|
|
|
- /* need to set count to 1 for non-qebsm */
|
|
|
- if (!is_qebsm(q))
|
|
|
- count = 1;
|
|
|
-
|
|
|
-check_next:
|
|
|
if (q->first_to_check == stop)
|
|
|
return q->first_to_check;
|
|
|
|
|
@@ -661,13 +637,7 @@ check_next:
|
|
|
|
|
|
atomic_sub(count, &q->nr_buf_used);
|
|
|
q->first_to_check = add_buf(q->first_to_check, count);
|
|
|
- /*
|
|
|
- * We fetch all buffer states at once. get_buf_states may
|
|
|
- * return count < stop. For QEBSM we do not loop.
|
|
|
- */
|
|
|
- if (is_qebsm(q))
|
|
|
- break;
|
|
|
- goto check_next;
|
|
|
+ break;
|
|
|
case SLSB_P_OUTPUT_ERROR:
|
|
|
announce_buffer_error(q, count);
|
|
|
/* process the buffer, the upper layer will take care of it */
|
|
@@ -797,8 +767,7 @@ void qdio_outbound_timer(unsigned long data)
|
|
|
tasklet_schedule(&q->tasklet);
|
|
|
}
|
|
|
|
|
|
-/* called from thinint inbound tasklet */
|
|
|
-void qdio_check_outbound_after_thinint(struct qdio_q *q)
|
|
|
+static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
|
|
|
{
|
|
|
struct qdio_q *out;
|
|
|
int i;
|
|
@@ -811,6 +780,46 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q)
|
|
|
tasklet_schedule(&out->tasklet);
|
|
|
}
|
|
|
|
|
|
+static void __tiqdio_inbound_processing(struct qdio_q *q)
|
|
|
+{
|
|
|
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound);
|
|
|
+ qdio_sync_after_thinint(q);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The interrupt could be caused by a PCI request. Check the
|
|
|
+ * PCI capable outbound queues.
|
|
|
+ */
|
|
|
+ qdio_check_outbound_after_thinint(q);
|
|
|
+
|
|
|
+ if (!qdio_inbound_q_moved(q))
|
|
|
+ return;
|
|
|
+
|
|
|
+ qdio_kick_handler(q);
|
|
|
+
|
|
|
+ if (!qdio_inbound_q_done(q)) {
|
|
|
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
|
|
|
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
|
|
|
+ tasklet_schedule(&q->tasklet);
|
|
|
+ }
|
|
|
+
|
|
|
+ qdio_stop_polling(q);
|
|
|
+ /*
|
|
|
+ * We need to check again to not lose initiative after
|
|
|
+ * resetting the ACK state.
|
|
|
+ */
|
|
|
+ if (!qdio_inbound_q_done(q)) {
|
|
|
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
|
|
|
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
|
|
|
+ tasklet_schedule(&q->tasklet);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void tiqdio_inbound_processing(unsigned long data)
|
|
|
+{
|
|
|
+ struct qdio_q *q = (struct qdio_q *)data;
|
|
|
+ __tiqdio_inbound_processing(q);
|
|
|
+}
|
|
|
+
|
|
|
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
|
|
|
enum qdio_irq_states state)
|
|
|
{
|
|
@@ -1488,18 +1497,13 @@ out:
|
|
|
* @count: how many buffers to process
|
|
|
*/
|
|
|
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
|
|
|
- int q_nr, int bufnr, int count)
|
|
|
+ int q_nr, unsigned int bufnr, unsigned int count)
|
|
|
{
|
|
|
struct qdio_irq *irq_ptr;
|
|
|
|
|
|
- if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
|
|
|
- (count > QDIO_MAX_BUFFERS_PER_Q) ||
|
|
|
- (q_nr >= QDIO_MAX_QUEUES_PER_IRQ))
|
|
|
+ if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- if (!count)
|
|
|
- return 0;
|
|
|
-
|
|
|
irq_ptr = cdev->private->qdio_data;
|
|
|
if (!irq_ptr)
|
|
|
return -ENODEV;
|