|
@@ -419,20 +419,6 @@ struct pktgen_thread {
|
|
#define REMOVE 1
|
|
#define REMOVE 1
|
|
#define FIND 0
|
|
#define FIND 0
|
|
|
|
|
|
-static inline ktime_t ktime_now(void)
|
|
|
|
-{
|
|
|
|
- struct timespec ts;
|
|
|
|
- ktime_get_ts(&ts);
|
|
|
|
-
|
|
|
|
- return timespec_to_ktime(ts);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/* This works even if 32 bit because of careful byte order choice */
|
|
|
|
-static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
|
|
|
|
-{
|
|
|
|
- return cmp1.tv64 < cmp2.tv64;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static const char version[] =
|
|
static const char version[] =
|
|
"Packet Generator for packet performance testing. "
|
|
"Packet Generator for packet performance testing. "
|
|
"Version: " VERSION "\n";
|
|
"Version: " VERSION "\n";
|
|
@@ -675,7 +661,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
|
|
seq_puts(seq, "\n");
|
|
seq_puts(seq, "\n");
|
|
|
|
|
|
/* not really stopped, more like last-running-at */
|
|
/* not really stopped, more like last-running-at */
|
|
- stopped = pkt_dev->running ? ktime_now() : pkt_dev->stopped_at;
|
|
|
|
|
|
+ stopped = pkt_dev->running ? ktime_get() : pkt_dev->stopped_at;
|
|
idle = pkt_dev->idle_acc;
|
|
idle = pkt_dev->idle_acc;
|
|
do_div(idle, NSEC_PER_USEC);
|
|
do_div(idle, NSEC_PER_USEC);
|
|
|
|
|
|
@@ -2141,12 +2127,12 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- start_time = ktime_now();
|
|
|
|
|
|
+ start_time = ktime_get();
|
|
if (remaining < 100000) {
|
|
if (remaining < 100000) {
|
|
/* for small delays (<100us), just loop until limit is reached */
|
|
/* for small delays (<100us), just loop until limit is reached */
|
|
do {
|
|
do {
|
|
- end_time = ktime_now();
|
|
|
|
- } while (ktime_lt(end_time, spin_until));
|
|
|
|
|
|
+ end_time = ktime_get();
|
|
|
|
+ } while (ktime_compare(end_time, spin_until) < 0);
|
|
} else {
|
|
} else {
|
|
/* see do_nanosleep */
|
|
/* see do_nanosleep */
|
|
hrtimer_init_sleeper(&t, current);
|
|
hrtimer_init_sleeper(&t, current);
|
|
@@ -2162,7 +2148,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
|
hrtimer_cancel(&t.timer);
|
|
hrtimer_cancel(&t.timer);
|
|
} while (t.task && pkt_dev->running && !signal_pending(current));
|
|
} while (t.task && pkt_dev->running && !signal_pending(current));
|
|
__set_current_state(TASK_RUNNING);
|
|
__set_current_state(TASK_RUNNING);
|
|
- end_time = ktime_now();
|
|
|
|
|
|
+ end_time = ktime_get();
|
|
}
|
|
}
|
|
|
|
|
|
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
|
|
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
|
|
@@ -2912,8 +2898,7 @@ static void pktgen_run(struct pktgen_thread *t)
|
|
pktgen_clear_counters(pkt_dev);
|
|
pktgen_clear_counters(pkt_dev);
|
|
pkt_dev->running = 1; /* Cranke yeself! */
|
|
pkt_dev->running = 1; /* Cranke yeself! */
|
|
pkt_dev->skb = NULL;
|
|
pkt_dev->skb = NULL;
|
|
- pkt_dev->started_at =
|
|
|
|
- pkt_dev->next_tx = ktime_now();
|
|
|
|
|
|
+ pkt_dev->started_at = pkt_dev->next_tx = ktime_get();
|
|
|
|
|
|
set_pkt_overhead(pkt_dev);
|
|
set_pkt_overhead(pkt_dev);
|
|
|
|
|
|
@@ -3072,7 +3057,7 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
|
|
|
|
|
|
kfree_skb(pkt_dev->skb);
|
|
kfree_skb(pkt_dev->skb);
|
|
pkt_dev->skb = NULL;
|
|
pkt_dev->skb = NULL;
|
|
- pkt_dev->stopped_at = ktime_now();
|
|
|
|
|
|
+ pkt_dev->stopped_at = ktime_get();
|
|
pkt_dev->running = 0;
|
|
pkt_dev->running = 0;
|
|
|
|
|
|
show_results(pkt_dev, nr_frags);
|
|
show_results(pkt_dev, nr_frags);
|
|
@@ -3091,7 +3076,7 @@ static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
|
|
continue;
|
|
continue;
|
|
if (best == NULL)
|
|
if (best == NULL)
|
|
best = pkt_dev;
|
|
best = pkt_dev;
|
|
- else if (ktime_lt(pkt_dev->next_tx, best->next_tx))
|
|
|
|
|
|
+ else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0)
|
|
best = pkt_dev;
|
|
best = pkt_dev;
|
|
}
|
|
}
|
|
if_unlock(t);
|
|
if_unlock(t);
|
|
@@ -3176,14 +3161,14 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
|
|
|
|
|
|
static void pktgen_resched(struct pktgen_dev *pkt_dev)
|
|
static void pktgen_resched(struct pktgen_dev *pkt_dev)
|
|
{
|
|
{
|
|
- ktime_t idle_start = ktime_now();
|
|
|
|
|
|
+ ktime_t idle_start = ktime_get();
|
|
schedule();
|
|
schedule();
|
|
- pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
|
|
|
|
|
|
+ pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
|
|
}
|
|
}
|
|
|
|
|
|
static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
|
|
static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
|
|
{
|
|
{
|
|
- ktime_t idle_start = ktime_now();
|
|
|
|
|
|
+ ktime_t idle_start = ktime_get();
|
|
|
|
|
|
while (atomic_read(&(pkt_dev->skb->users)) != 1) {
|
|
while (atomic_read(&(pkt_dev->skb->users)) != 1) {
|
|
if (signal_pending(current))
|
|
if (signal_pending(current))
|
|
@@ -3194,7 +3179,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
|
|
else
|
|
else
|
|
cpu_relax();
|
|
cpu_relax();
|
|
}
|
|
}
|
|
- pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
|
|
|
|
|
|
+ pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
|
|
}
|
|
}
|
|
|
|
|
|
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
|
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
|
@@ -3216,7 +3201,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
|
* "never transmit"
|
|
* "never transmit"
|
|
*/
|
|
*/
|
|
if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
|
|
if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
|
|
- pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
|
|
|
|
|
|
+ pkt_dev->next_tx = ktime_add_ns(ktime_get(), ULONG_MAX);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|