|
@@ -46,8 +46,8 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
|
|
|
|
|
|
static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
|
|
|
{
|
|
|
- INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge);
|
|
|
- queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
|
|
|
+ INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
|
|
|
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
|
|
|
msecs_to_jiffies(5000));
|
|
|
}
|
|
|
|
|
@@ -88,7 +88,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
|
|
|
struct batadv_tt_common_entry *tt_common_entry;
|
|
|
struct batadv_tt_local_entry *tt_local_entry = NULL;
|
|
|
|
|
|
- tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data);
|
|
|
+ tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
|
|
|
if (tt_common_entry)
|
|
|
tt_local_entry = container_of(tt_common_entry,
|
|
|
struct batadv_tt_local_entry,
|
|
@@ -102,7 +102,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
|
|
|
struct batadv_tt_common_entry *tt_common_entry;
|
|
|
struct batadv_tt_global_entry *tt_global_entry = NULL;
|
|
|
|
|
|
- tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data);
|
|
|
+ tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
|
|
|
if (tt_common_entry)
|
|
|
tt_global_entry = container_of(tt_common_entry,
|
|
|
struct batadv_tt_global_entry,
|
|
@@ -177,8 +177,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
|
|
|
del_op_requested = flags & BATADV_TT_CLIENT_DEL;
|
|
|
|
|
|
/* check for ADD+DEL or DEL+ADD events */
|
|
|
- spin_lock_bh(&bat_priv->tt_changes_list_lock);
|
|
|
- list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
|
|
|
+ spin_lock_bh(&bat_priv->tt.changes_list_lock);
|
|
|
+ list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
|
|
|
list) {
|
|
|
if (!batadv_compare_eth(entry->change.addr, addr))
|
|
|
continue;
|
|
@@ -205,15 +205,15 @@ del:
|
|
|
}
|
|
|
|
|
|
/* track the change in the OGMinterval list */
|
|
|
- list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
|
|
|
+ list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
|
|
|
|
|
|
unlock:
|
|
|
- spin_unlock_bh(&bat_priv->tt_changes_list_lock);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.changes_list_lock);
|
|
|
|
|
|
if (event_removed)
|
|
|
- atomic_dec(&bat_priv->tt_local_changes);
|
|
|
+ atomic_dec(&bat_priv->tt.local_changes);
|
|
|
else
|
|
|
- atomic_inc(&bat_priv->tt_local_changes);
|
|
|
+ atomic_inc(&bat_priv->tt.local_changes);
|
|
|
}
|
|
|
|
|
|
int batadv_tt_len(int changes_num)
|
|
@@ -223,12 +223,12 @@ int batadv_tt_len(int changes_num)
|
|
|
|
|
|
static int batadv_tt_local_init(struct batadv_priv *bat_priv)
|
|
|
{
|
|
|
- if (bat_priv->tt_local_hash)
|
|
|
+ if (bat_priv->tt.local_hash)
|
|
|
return 0;
|
|
|
|
|
|
- bat_priv->tt_local_hash = batadv_hash_new(1024);
|
|
|
+ bat_priv->tt.local_hash = batadv_hash_new(1024);
|
|
|
|
|
|
- if (!bat_priv->tt_local_hash)
|
|
|
+ if (!bat_priv->tt.local_hash)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
return 0;
|
|
@@ -260,7 +260,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
|
|
|
|
|
|
batadv_dbg(BATADV_DBG_TT, bat_priv,
|
|
|
"Creating new local tt entry: %pM (ttvn: %d)\n", addr,
|
|
|
- (uint8_t)atomic_read(&bat_priv->ttvn));
|
|
|
+ (uint8_t)atomic_read(&bat_priv->tt.vn));
|
|
|
|
|
|
memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
|
|
|
tt_local_entry->common.flags = BATADV_NO_FLAGS;
|
|
@@ -279,7 +279,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
|
|
|
*/
|
|
|
tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
|
|
|
|
|
|
- hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt,
|
|
|
+ hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
|
|
|
batadv_choose_orig,
|
|
|
&tt_local_entry->common,
|
|
|
&tt_local_entry->common.hash_entry);
|
|
@@ -350,7 +350,7 @@ static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
|
|
|
primary_if = batadv_primary_if_get_selected(bat_priv);
|
|
|
|
|
|
req_len = min_packet_len;
|
|
|
- req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
|
|
|
+ req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
|
|
|
|
|
|
/* if we have too many changes for one packet don't send any
|
|
|
* and wait for the tt table request which will be fragmented
|
|
@@ -383,10 +383,10 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
|
|
|
if (new_len > 0)
|
|
|
tot_changes = new_len / batadv_tt_len(1);
|
|
|
|
|
|
- spin_lock_bh(&bat_priv->tt_changes_list_lock);
|
|
|
- atomic_set(&bat_priv->tt_local_changes, 0);
|
|
|
+ spin_lock_bh(&bat_priv->tt.changes_list_lock);
|
|
|
+ atomic_set(&bat_priv->tt.local_changes, 0);
|
|
|
|
|
|
- list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
|
|
|
+ list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
|
|
|
list) {
|
|
|
if (count < tot_changes) {
|
|
|
memcpy(tt_buff + batadv_tt_len(count),
|
|
@@ -396,25 +396,25 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
|
|
|
list_del(&entry->list);
|
|
|
kfree(entry);
|
|
|
}
|
|
|
- spin_unlock_bh(&bat_priv->tt_changes_list_lock);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.changes_list_lock);
|
|
|
|
|
|
/* Keep the buffer for possible tt_request */
|
|
|
- spin_lock_bh(&bat_priv->tt_buff_lock);
|
|
|
- kfree(bat_priv->tt_buff);
|
|
|
- bat_priv->tt_buff_len = 0;
|
|
|
- bat_priv->tt_buff = NULL;
|
|
|
+ spin_lock_bh(&bat_priv->tt.last_changeset_lock);
|
|
|
+ kfree(bat_priv->tt.last_changeset);
|
|
|
+ bat_priv->tt.last_changeset_len = 0;
|
|
|
+ bat_priv->tt.last_changeset = NULL;
|
|
|
/* check whether this new OGM has no changes due to size problems */
|
|
|
if (new_len > 0) {
|
|
|
/* if kmalloc() fails we will reply with the full table
|
|
|
* instead of providing the diff
|
|
|
*/
|
|
|
- bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
|
|
|
- if (bat_priv->tt_buff) {
|
|
|
- memcpy(bat_priv->tt_buff, tt_buff, new_len);
|
|
|
- bat_priv->tt_buff_len = new_len;
|
|
|
+ bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
|
|
|
+ if (bat_priv->tt.last_changeset) {
|
|
|
+ memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
|
|
|
+ bat_priv->tt.last_changeset_len = new_len;
|
|
|
}
|
|
|
}
|
|
|
- spin_unlock_bh(&bat_priv->tt_buff_lock);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
|
|
|
|
|
|
return count;
|
|
|
}
|
|
@@ -423,7 +423,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
|
|
|
{
|
|
|
struct net_device *net_dev = (struct net_device *)seq->private;
|
|
|
struct batadv_priv *bat_priv = netdev_priv(net_dev);
|
|
|
- struct batadv_hashtable *hash = bat_priv->tt_local_hash;
|
|
|
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
|
|
|
struct batadv_tt_common_entry *tt_common_entry;
|
|
|
struct batadv_hard_iface *primary_if;
|
|
|
struct hlist_node *node;
|
|
@@ -448,7 +448,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
|
|
|
|
|
|
seq_printf(seq,
|
|
|
"Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
|
|
|
- net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
|
|
|
+ net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
|
|
|
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
|
head = &hash->table[i];
|
|
@@ -546,7 +546,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
|
|
|
|
|
|
static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
|
|
|
{
|
|
|
- struct batadv_hashtable *hash = bat_priv->tt_local_hash;
|
|
|
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
|
|
|
struct hlist_head *head;
|
|
|
spinlock_t *list_lock; /* protects write access to the hash lists */
|
|
|
uint32_t i;
|
|
@@ -572,10 +572,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
|
|
|
struct hlist_head *head;
|
|
|
uint32_t i;
|
|
|
|
|
|
- if (!bat_priv->tt_local_hash)
|
|
|
+ if (!bat_priv->tt.local_hash)
|
|
|
return;
|
|
|
|
|
|
- hash = bat_priv->tt_local_hash;
|
|
|
+ hash = bat_priv->tt.local_hash;
|
|
|
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
|
head = &hash->table[i];
|
|
@@ -595,17 +595,17 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
|
|
|
|
|
|
batadv_hash_destroy(hash);
|
|
|
|
|
|
- bat_priv->tt_local_hash = NULL;
|
|
|
+ bat_priv->tt.local_hash = NULL;
|
|
|
}
|
|
|
|
|
|
static int batadv_tt_global_init(struct batadv_priv *bat_priv)
|
|
|
{
|
|
|
- if (bat_priv->tt_global_hash)
|
|
|
+ if (bat_priv->tt.global_hash)
|
|
|
return 0;
|
|
|
|
|
|
- bat_priv->tt_global_hash = batadv_hash_new(1024);
|
|
|
+ bat_priv->tt.global_hash = batadv_hash_new(1024);
|
|
|
|
|
|
- if (!bat_priv->tt_global_hash)
|
|
|
+ if (!bat_priv->tt.global_hash)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
return 0;
|
|
@@ -615,16 +615,16 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
|
|
|
{
|
|
|
struct batadv_tt_change_node *entry, *safe;
|
|
|
|
|
|
- spin_lock_bh(&bat_priv->tt_changes_list_lock);
|
|
|
+ spin_lock_bh(&bat_priv->tt.changes_list_lock);
|
|
|
|
|
|
- list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
|
|
|
+ list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
|
|
|
list) {
|
|
|
list_del(&entry->list);
|
|
|
kfree(entry);
|
|
|
}
|
|
|
|
|
|
- atomic_set(&bat_priv->tt_local_changes, 0);
|
|
|
- spin_unlock_bh(&bat_priv->tt_changes_list_lock);
|
|
|
+ atomic_set(&bat_priv->tt.local_changes, 0);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.changes_list_lock);
|
|
|
}
|
|
|
|
|
|
/* retrieves the orig_tt_list_entry belonging to orig_node from the
|
|
@@ -733,7 +733,7 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
|
|
|
INIT_HLIST_HEAD(&tt_global_entry->orig_list);
|
|
|
spin_lock_init(&tt_global_entry->list_lock);
|
|
|
|
|
|
- hash_added = batadv_hash_add(bat_priv->tt_global_hash,
|
|
|
+ hash_added = batadv_hash_add(bat_priv->tt.global_hash,
|
|
|
batadv_compare_tt,
|
|
|
batadv_choose_orig, common,
|
|
|
&common->hash_entry);
|
|
@@ -812,7 +812,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
|
|
|
{
|
|
|
struct net_device *net_dev = (struct net_device *)seq->private;
|
|
|
struct batadv_priv *bat_priv = netdev_priv(net_dev);
|
|
|
- struct batadv_hashtable *hash = bat_priv->tt_global_hash;
|
|
|
+ struct batadv_hashtable *hash = bat_priv->tt.global_hash;
|
|
|
struct batadv_tt_common_entry *tt_common_entry;
|
|
|
struct batadv_tt_global_entry *tt_global;
|
|
|
struct batadv_hard_iface *primary_if;
|
|
@@ -913,7 +913,7 @@ batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
|
|
|
"Deleting global tt entry %pM: %s\n",
|
|
|
tt_global_entry->common.addr, message);
|
|
|
|
|
|
- batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt,
|
|
|
+ batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
|
|
|
batadv_choose_orig, tt_global_entry->common.addr);
|
|
|
batadv_tt_global_entry_free_ref(tt_global_entry);
|
|
|
|
|
@@ -1024,7 +1024,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
|
|
|
struct batadv_tt_global_entry *tt_global;
|
|
|
struct batadv_tt_common_entry *tt_common_entry;
|
|
|
uint32_t i;
|
|
|
- struct batadv_hashtable *hash = bat_priv->tt_global_hash;
|
|
|
+ struct batadv_hashtable *hash = bat_priv->tt.global_hash;
|
|
|
struct hlist_node *node, *safe;
|
|
|
struct hlist_head *head;
|
|
|
spinlock_t *list_lock; /* protects write access to the hash lists */
|
|
@@ -1088,7 +1088,7 @@ static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv,
|
|
|
|
|
|
static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv)
|
|
|
{
|
|
|
- struct batadv_hashtable *hash = bat_priv->tt_global_hash;
|
|
|
+ struct batadv_hashtable *hash = bat_priv->tt.global_hash;
|
|
|
struct hlist_head *head;
|
|
|
spinlock_t *list_lock; /* protects write access to the hash lists */
|
|
|
uint32_t i;
|
|
@@ -1114,10 +1114,10 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
|
|
|
struct hlist_head *head;
|
|
|
uint32_t i;
|
|
|
|
|
|
- if (!bat_priv->tt_global_hash)
|
|
|
+ if (!bat_priv->tt.global_hash)
|
|
|
return;
|
|
|
|
|
|
- hash = bat_priv->tt_global_hash;
|
|
|
+ hash = bat_priv->tt.global_hash;
|
|
|
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
|
head = &hash->table[i];
|
|
@@ -1137,7 +1137,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
|
|
|
|
|
|
batadv_hash_destroy(hash);
|
|
|
|
|
|
- bat_priv->tt_global_hash = NULL;
|
|
|
+ bat_priv->tt.global_hash = NULL;
|
|
|
}
|
|
|
|
|
|
static bool
|
|
@@ -1216,7 +1216,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
|
|
|
struct batadv_orig_node *orig_node)
|
|
|
{
|
|
|
uint16_t total = 0, total_one;
|
|
|
- struct batadv_hashtable *hash = bat_priv->tt_global_hash;
|
|
|
+ struct batadv_hashtable *hash = bat_priv->tt.global_hash;
|
|
|
struct batadv_tt_common_entry *tt_common;
|
|
|
struct batadv_tt_global_entry *tt_global;
|
|
|
struct hlist_node *node;
|
|
@@ -1263,7 +1263,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
|
|
|
static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
|
|
|
{
|
|
|
uint16_t total = 0, total_one;
|
|
|
- struct batadv_hashtable *hash = bat_priv->tt_local_hash;
|
|
|
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
|
|
|
struct batadv_tt_common_entry *tt_common;
|
|
|
struct hlist_node *node;
|
|
|
struct hlist_head *head;
|
|
@@ -1296,14 +1296,14 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
|
|
|
{
|
|
|
struct batadv_tt_req_node *node, *safe;
|
|
|
|
|
|
- spin_lock_bh(&bat_priv->tt_req_list_lock);
|
|
|
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
|
|
|
|
|
|
- list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
|
|
|
+ list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
|
|
|
list_del(&node->list);
|
|
|
kfree(node);
|
|
|
}
|
|
|
|
|
|
- spin_unlock_bh(&bat_priv->tt_req_list_lock);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
|
|
|
}
|
|
|
|
|
|
static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
|
|
@@ -1333,15 +1333,15 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
|
|
|
{
|
|
|
struct batadv_tt_req_node *node, *safe;
|
|
|
|
|
|
- spin_lock_bh(&bat_priv->tt_req_list_lock);
|
|
|
- list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
|
|
|
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
|
|
|
+ list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
|
|
|
if (batadv_has_timed_out(node->issued_at,
|
|
|
BATADV_TT_REQUEST_TIMEOUT)) {
|
|
|
list_del(&node->list);
|
|
|
kfree(node);
|
|
|
}
|
|
|
}
|
|
|
- spin_unlock_bh(&bat_priv->tt_req_list_lock);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
|
|
|
}
|
|
|
|
|
|
/* returns the pointer to the new tt_req_node struct if no request
|
|
@@ -1353,8 +1353,8 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
|
|
|
{
|
|
|
struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
|
|
|
|
|
|
- spin_lock_bh(&bat_priv->tt_req_list_lock);
|
|
|
- list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
|
|
|
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
|
|
|
+ list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
|
|
|
if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
|
|
|
!batadv_has_timed_out(tt_req_node_tmp->issued_at,
|
|
|
BATADV_TT_REQUEST_TIMEOUT))
|
|
@@ -1368,9 +1368,9 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
|
|
|
memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
|
|
|
tt_req_node->issued_at = jiffies;
|
|
|
|
|
|
- list_add(&tt_req_node->list, &bat_priv->tt_req_list);
|
|
|
+ list_add(&tt_req_node->list, &bat_priv->tt.req_list);
|
|
|
unlock:
|
|
|
- spin_unlock_bh(&bat_priv->tt_req_list_lock);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
|
|
|
return tt_req_node;
|
|
|
}
|
|
|
|
|
@@ -1536,9 +1536,9 @@ out:
|
|
|
if (ret)
|
|
|
kfree_skb(skb);
|
|
|
if (ret && tt_req_node) {
|
|
|
- spin_lock_bh(&bat_priv->tt_req_list_lock);
|
|
|
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
|
|
|
list_del(&tt_req_node->list);
|
|
|
- spin_unlock_bh(&bat_priv->tt_req_list_lock);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
|
|
|
kfree(tt_req_node);
|
|
|
}
|
|
|
return ret;
|
|
@@ -1629,7 +1629,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
|
|
|
ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
|
|
|
|
|
|
skb = batadv_tt_response_fill_table(tt_len, ttvn,
|
|
|
- bat_priv->tt_global_hash,
|
|
|
+ bat_priv->tt.global_hash,
|
|
|
primary_if,
|
|
|
batadv_tt_global_valid,
|
|
|
req_dst_orig_node);
|
|
@@ -1700,7 +1700,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
|
|
|
(tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
|
|
|
|
|
|
|
|
|
- my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
|
|
|
+ my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
|
|
|
req_ttvn = tt_request->ttvn;
|
|
|
|
|
|
orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
|
|
@@ -1719,7 +1719,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
|
|
|
* is too big send the whole local translation table
|
|
|
*/
|
|
|
if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
|
|
|
- !bat_priv->tt_buff)
|
|
|
+ !bat_priv->tt.last_changeset)
|
|
|
full_table = true;
|
|
|
else
|
|
|
full_table = false;
|
|
@@ -1728,8 +1728,8 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
|
|
|
* I'll send only one packet with as much TT entries as I can
|
|
|
*/
|
|
|
if (!full_table) {
|
|
|
- spin_lock_bh(&bat_priv->tt_buff_lock);
|
|
|
- tt_len = bat_priv->tt_buff_len;
|
|
|
+ spin_lock_bh(&bat_priv->tt.last_changeset_lock);
|
|
|
+ tt_len = bat_priv->tt.last_changeset_len;
|
|
|
tt_tot = tt_len / sizeof(struct batadv_tt_change);
|
|
|
|
|
|
len = sizeof(*tt_response) + tt_len;
|
|
@@ -1744,16 +1744,16 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
|
|
|
tt_response->tt_data = htons(tt_tot);
|
|
|
|
|
|
tt_buff = skb->data + sizeof(*tt_response);
|
|
|
- memcpy(tt_buff, bat_priv->tt_buff,
|
|
|
- bat_priv->tt_buff_len);
|
|
|
- spin_unlock_bh(&bat_priv->tt_buff_lock);
|
|
|
+ memcpy(tt_buff, bat_priv->tt.last_changeset,
|
|
|
+ bat_priv->tt.last_changeset_len);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
|
|
|
} else {
|
|
|
- tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt);
|
|
|
+ tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
|
|
|
tt_len *= sizeof(struct batadv_tt_change);
|
|
|
- ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
|
|
|
+ ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
|
|
|
|
|
|
skb = batadv_tt_response_fill_table(tt_len, ttvn,
|
|
|
- bat_priv->tt_local_hash,
|
|
|
+ bat_priv->tt.local_hash,
|
|
|
primary_if,
|
|
|
batadv_tt_local_valid_entry,
|
|
|
NULL);
|
|
@@ -1785,7 +1785,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
|
|
|
goto out;
|
|
|
|
|
|
unlock:
|
|
|
- spin_unlock_bh(&bat_priv->tt_buff_lock);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
|
|
|
out:
|
|
|
if (orig_node)
|
|
|
batadv_orig_node_free_ref(orig_node);
|
|
@@ -1938,14 +1938,14 @@ void batadv_handle_tt_response(struct batadv_priv *bat_priv,
|
|
|
}
|
|
|
|
|
|
/* Delete the tt_req_node from pending tt_requests list */
|
|
|
- spin_lock_bh(&bat_priv->tt_req_list_lock);
|
|
|
- list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
|
|
|
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
|
|
|
+ list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
|
|
|
if (!batadv_compare_eth(node->addr, tt_response->src))
|
|
|
continue;
|
|
|
list_del(&node->list);
|
|
|
kfree(node);
|
|
|
}
|
|
|
- spin_unlock_bh(&bat_priv->tt_req_list_lock);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
|
|
|
|
|
|
/* Recalculate the CRC for this orig_node and store it */
|
|
|
orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
|
|
@@ -1979,22 +1979,22 @@ static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
|
|
|
{
|
|
|
struct batadv_tt_roam_node *node, *safe;
|
|
|
|
|
|
- spin_lock_bh(&bat_priv->tt_roam_list_lock);
|
|
|
+ spin_lock_bh(&bat_priv->tt.roam_list_lock);
|
|
|
|
|
|
- list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
|
|
|
+ list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
|
|
|
list_del(&node->list);
|
|
|
kfree(node);
|
|
|
}
|
|
|
|
|
|
- spin_unlock_bh(&bat_priv->tt_roam_list_lock);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.roam_list_lock);
|
|
|
}
|
|
|
|
|
|
static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
|
|
|
{
|
|
|
struct batadv_tt_roam_node *node, *safe;
|
|
|
|
|
|
- spin_lock_bh(&bat_priv->tt_roam_list_lock);
|
|
|
- list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
|
|
|
+ spin_lock_bh(&bat_priv->tt.roam_list_lock);
|
|
|
+ list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
|
|
|
if (!batadv_has_timed_out(node->first_time,
|
|
|
BATADV_ROAMING_MAX_TIME))
|
|
|
continue;
|
|
@@ -2002,7 +2002,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
|
|
|
list_del(&node->list);
|
|
|
kfree(node);
|
|
|
}
|
|
|
- spin_unlock_bh(&bat_priv->tt_roam_list_lock);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.roam_list_lock);
|
|
|
}
|
|
|
|
|
|
/* This function checks whether the client already reached the
|
|
@@ -2017,11 +2017,11 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
|
|
|
struct batadv_tt_roam_node *tt_roam_node;
|
|
|
bool ret = false;
|
|
|
|
|
|
- spin_lock_bh(&bat_priv->tt_roam_list_lock);
|
|
|
+ spin_lock_bh(&bat_priv->tt.roam_list_lock);
|
|
|
/* The new tt_req will be issued only if I'm not waiting for a
|
|
|
* reply from the same orig_node yet
|
|
|
*/
|
|
|
- list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
|
|
|
+ list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
|
|
|
if (!batadv_compare_eth(tt_roam_node->addr, client))
|
|
|
continue;
|
|
|
|
|
@@ -2046,12 +2046,12 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
|
|
|
BATADV_ROAMING_MAX_COUNT - 1);
|
|
|
memcpy(tt_roam_node->addr, client, ETH_ALEN);
|
|
|
|
|
|
- list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
|
|
|
+ list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
|
|
|
ret = true;
|
|
|
}
|
|
|
|
|
|
unlock:
|
|
|
- spin_unlock_bh(&bat_priv->tt_roam_list_lock);
|
|
|
+ spin_unlock_bh(&bat_priv->tt.roam_list_lock);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -2115,10 +2115,12 @@ out:
|
|
|
static void batadv_tt_purge(struct work_struct *work)
|
|
|
{
|
|
|
struct delayed_work *delayed_work;
|
|
|
+ struct batadv_priv_tt *priv_tt;
|
|
|
struct batadv_priv *bat_priv;
|
|
|
|
|
|
delayed_work = container_of(work, struct delayed_work, work);
|
|
|
- bat_priv = container_of(delayed_work, struct batadv_priv, tt_work);
|
|
|
+ priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
|
|
|
+ bat_priv = container_of(priv_tt, struct batadv_priv, tt);
|
|
|
|
|
|
batadv_tt_local_purge(bat_priv);
|
|
|
batadv_tt_global_roam_purge(bat_priv);
|
|
@@ -2130,7 +2132,7 @@ static void batadv_tt_purge(struct work_struct *work)
|
|
|
|
|
|
void batadv_tt_free(struct batadv_priv *bat_priv)
|
|
|
{
|
|
|
- cancel_delayed_work_sync(&bat_priv->tt_work);
|
|
|
+ cancel_delayed_work_sync(&bat_priv->tt.work);
|
|
|
|
|
|
batadv_tt_local_table_free(bat_priv);
|
|
|
batadv_tt_global_table_free(bat_priv);
|
|
@@ -2138,7 +2140,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
|
|
|
batadv_tt_changes_list_free(bat_priv);
|
|
|
batadv_tt_roam_list_free(bat_priv);
|
|
|
|
|
|
- kfree(bat_priv->tt_buff);
|
|
|
+ kfree(bat_priv->tt.last_changeset);
|
|
|
}
|
|
|
|
|
|
/* This function will enable or disable the specified flags for all the entries
|
|
@@ -2182,7 +2184,7 @@ out:
|
|
|
/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
|
|
|
static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
|
|
|
{
|
|
|
- struct batadv_hashtable *hash = bat_priv->tt_local_hash;
|
|
|
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
|
|
|
struct batadv_tt_common_entry *tt_common;
|
|
|
struct batadv_tt_local_entry *tt_local;
|
|
|
struct hlist_node *node, *node_tmp;
|
|
@@ -2207,7 +2209,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
|
|
|
"Deleting local tt entry (%pM): pending\n",
|
|
|
tt_common->addr);
|
|
|
|
|
|
- atomic_dec(&bat_priv->num_local_tt);
|
|
|
+ atomic_dec(&bat_priv->tt.local_entry_num);
|
|
|
hlist_del_rcu(node);
|
|
|
tt_local = container_of(tt_common,
|
|
|
struct batadv_tt_local_entry,
|
|
@@ -2225,26 +2227,26 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
|
|
|
{
|
|
|
uint16_t changed_num = 0;
|
|
|
|
|
|
- if (atomic_read(&bat_priv->tt_local_changes) < 1)
|
|
|
+ if (atomic_read(&bat_priv->tt.local_changes) < 1)
|
|
|
return -ENOENT;
|
|
|
|
|
|
- changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash,
|
|
|
+ changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
|
|
|
BATADV_TT_CLIENT_NEW, false);
|
|
|
|
|
|
/* all reset entries have to be counted as local entries */
|
|
|
- atomic_add(changed_num, &bat_priv->num_local_tt);
|
|
|
+ atomic_add(changed_num, &bat_priv->tt.local_entry_num);
|
|
|
batadv_tt_local_purge_pending_clients(bat_priv);
|
|
|
- bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
|
|
|
+ bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
|
|
|
|
|
|
/* Increment the TTVN only once per OGM interval */
|
|
|
- atomic_inc(&bat_priv->ttvn);
|
|
|
+ atomic_inc(&bat_priv->tt.vn);
|
|
|
batadv_dbg(BATADV_DBG_TT, bat_priv,
|
|
|
"Local changes committed, updating to ttvn %u\n",
|
|
|
- (uint8_t)atomic_read(&bat_priv->ttvn));
|
|
|
- bat_priv->tt_poss_change = false;
|
|
|
+ (uint8_t)atomic_read(&bat_priv->tt.vn));
|
|
|
+ bat_priv->tt.poss_change = false;
|
|
|
|
|
|
/* reset the sending counter */
|
|
|
- atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
|
|
|
+ atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
|
|
|
|
|
|
return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
|
|
|
packet_buff_len, packet_min_len);
|
|
@@ -2264,7 +2266,7 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
|
|
|
|
|
|
/* if the changes have been sent often enough */
|
|
|
if ((tt_num_changes < 0) &&
|
|
|
- (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
|
|
|
+ (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
|
|
|
batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
|
|
|
packet_min_len, packet_min_len);
|
|
|
tt_num_changes = 0;
|