|
@@ -52,6 +52,7 @@ struct nfqnl_instance {
|
|
|
|
|
|
u_int16_t queue_num; /* number of this queue */
|
|
|
u_int8_t copy_mode;
|
|
|
+ u_int32_t flags; /* Set using NFQA_CFG_FLAGS */
|
|
|
/*
|
|
|
* Following fields are dirtied for each queued packet,
|
|
|
* keep them in same cache line if possible.
|
|
@@ -406,6 +407,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
|
|
|
struct nfqnl_instance *queue;
|
|
|
int err = -ENOBUFS;
|
|
|
__be32 *packet_id_ptr;
|
|
|
+ int failopen = 0;
|
|
|
|
|
|
/* rcu_read_lock()ed by nf_hook_slow() */
|
|
|
queue = instance_lookup(queuenum);
|
|
@@ -431,9 +433,14 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
|
|
|
goto err_out_free_nskb;
|
|
|
}
|
|
|
if (queue->queue_total >= queue->queue_maxlen) {
|
|
|
- queue->queue_dropped++;
|
|
|
- net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
|
|
|
- queue->queue_total);
|
|
|
+ if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
|
|
|
+ failopen = 1;
|
|
|
+ err = 0;
|
|
|
+ } else {
|
|
|
+ queue->queue_dropped++;
|
|
|
+ net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
|
|
|
+ queue->queue_total);
|
|
|
+ }
|
|
|
goto err_out_free_nskb;
|
|
|
}
|
|
|
entry->id = ++queue->id_sequence;
|
|
@@ -455,6 +462,8 @@ err_out_free_nskb:
|
|
|
kfree_skb(nskb);
|
|
|
err_out_unlock:
|
|
|
spin_unlock_bh(&queue->lock);
|
|
|
+ if (failopen)
|
|
|
+ nf_reinject(entry, NF_ACCEPT);
|
|
|
err_out:
|
|
|
return err;
|
|
|
}
|
|
@@ -858,6 +867,31 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
|
|
|
spin_unlock_bh(&queue->lock);
|
|
|
}
|
|
|
|
|
|
+ if (nfqa[NFQA_CFG_FLAGS]) {
|
|
|
+ __u32 flags, mask;
|
|
|
+
|
|
|
+ if (!queue) {
|
|
|
+ ret = -ENODEV;
|
|
|
+ goto err_out_unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!nfqa[NFQA_CFG_MASK]) {
|
|
|
+ /* A mask is needed to specify which flags are being
|
|
|
+ * changed.
|
|
|
+ */
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto err_out_unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
|
|
|
+ mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
|
|
|
+
|
|
|
+ spin_lock_bh(&queue->lock);
|
|
|
+ queue->flags &= ~mask;
|
|
|
+ queue->flags |= flags & mask;
|
|
|
+ spin_unlock_bh(&queue->lock);
|
|
|
+ }
|
|
|
+
|
|
|
err_out_unlock:
|
|
|
rcu_read_unlock();
|
|
|
return ret;
|