|
@@ -36,6 +36,7 @@ struct mpath_node {
|
|
};
|
|
};
|
|
|
|
|
|
static struct mesh_table *mesh_paths;
|
|
static struct mesh_table *mesh_paths;
|
|
|
|
+static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
|
|
|
|
|
|
/* This lock will have the grow table function as writer and add / delete nodes
|
|
/* This lock will have the grow table function as writer and add / delete nodes
|
|
* as readers. When reading the table (i.e. doing lookups) we are well protected
|
|
* as readers. When reading the table (i.e. doing lookups) we are well protected
|
|
@@ -94,6 +95,34 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
|
|
|
|
+{
|
|
|
|
+ struct mesh_path *mpath;
|
|
|
|
+ struct hlist_node *n;
|
|
|
|
+ struct hlist_head *bucket;
|
|
|
|
+ struct mesh_table *tbl;
|
|
|
|
+ struct mpath_node *node;
|
|
|
|
+
|
|
|
|
+ tbl = rcu_dereference(mpp_paths);
|
|
|
|
+
|
|
|
|
+ bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
|
|
|
|
+ hlist_for_each_entry_rcu(node, n, bucket, list) {
|
|
|
|
+ mpath = node->mpath;
|
|
|
|
+ if (mpath->sdata == sdata &&
|
|
|
|
+ memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
|
|
|
|
+ if (MPATH_EXPIRED(mpath)) {
|
|
|
|
+ spin_lock_bh(&mpath->state_lock);
|
|
|
|
+ if (MPATH_EXPIRED(mpath))
|
|
|
|
+ mpath->flags &= ~MESH_PATH_ACTIVE;
|
|
|
|
+ spin_unlock_bh(&mpath->state_lock);
|
|
|
|
+ }
|
|
|
|
+ return mpath;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
|
|
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
|
|
* @idx: index
|
|
* @idx: index
|
|
@@ -226,6 +255,91 @@ err_path_alloc:
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
+int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
|
|
|
|
+{
|
|
|
|
+ struct mesh_path *mpath, *new_mpath;
|
|
|
|
+ struct mpath_node *node, *new_node;
|
|
|
|
+ struct hlist_head *bucket;
|
|
|
|
+ struct hlist_node *n;
|
|
|
|
+ int grow = 0;
|
|
|
|
+ int err = 0;
|
|
|
|
+ u32 hash_idx;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
|
|
|
|
+ /* never add ourselves as neighbours */
|
|
|
|
+ return -ENOTSUPP;
|
|
|
|
+
|
|
|
|
+ if (is_multicast_ether_addr(dst))
|
|
|
|
+ return -ENOTSUPP;
|
|
|
|
+
|
|
|
|
+ err = -ENOMEM;
|
|
|
|
+ new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
|
|
|
|
+ if (!new_mpath)
|
|
|
|
+ goto err_path_alloc;
|
|
|
|
+
|
|
|
|
+ new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
|
|
|
|
+ if (!new_node)
|
|
|
|
+ goto err_node_alloc;
|
|
|
|
+
|
|
|
|
+ read_lock(&pathtbl_resize_lock);
|
|
|
|
+ memcpy(new_mpath->dst, dst, ETH_ALEN);
|
|
|
|
+ memcpy(new_mpath->mpp, mpp, ETH_ALEN);
|
|
|
|
+ new_mpath->sdata = sdata;
|
|
|
|
+ new_mpath->flags = 0;
|
|
|
|
+ skb_queue_head_init(&new_mpath->frame_queue);
|
|
|
|
+ new_node->mpath = new_mpath;
|
|
|
|
+ new_mpath->exp_time = jiffies;
|
|
|
|
+ spin_lock_init(&new_mpath->state_lock);
|
|
|
|
+
|
|
|
|
+ hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
|
|
|
|
+ bucket = &mpp_paths->hash_buckets[hash_idx];
|
|
|
|
+
|
|
|
|
+ spin_lock(&mpp_paths->hashwlock[hash_idx]);
|
|
|
|
+
|
|
|
|
+ err = -EEXIST;
|
|
|
|
+ hlist_for_each_entry(node, n, bucket, list) {
|
|
|
|
+ mpath = node->mpath;
|
|
|
|
+ if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
|
|
|
|
+ goto err_exists;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ hlist_add_head_rcu(&new_node->list, bucket);
|
|
|
|
+ if (atomic_inc_return(&mpp_paths->entries) >=
|
|
|
|
+ mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
|
|
|
|
+ grow = 1;
|
|
|
|
+
|
|
|
|
+ spin_unlock(&mpp_paths->hashwlock[hash_idx]);
|
|
|
|
+ read_unlock(&pathtbl_resize_lock);
|
|
|
|
+ if (grow) {
|
|
|
|
+ struct mesh_table *oldtbl, *newtbl;
|
|
|
|
+
|
|
|
|
+ write_lock(&pathtbl_resize_lock);
|
|
|
|
+ oldtbl = mpp_paths;
|
|
|
|
+ newtbl = mesh_table_grow(mpp_paths);
|
|
|
|
+ if (!newtbl) {
|
|
|
|
+ write_unlock(&pathtbl_resize_lock);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ rcu_assign_pointer(mpp_paths, newtbl);
|
|
|
|
+ write_unlock(&pathtbl_resize_lock);
|
|
|
|
+
|
|
|
|
+ synchronize_rcu();
|
|
|
|
+ mesh_table_free(oldtbl, false);
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+err_exists:
|
|
|
|
+ spin_unlock(&mpp_paths->hashwlock[hash_idx]);
|
|
|
|
+ read_unlock(&pathtbl_resize_lock);
|
|
|
|
+ kfree(new_node);
|
|
|
|
+err_node_alloc:
|
|
|
|
+ kfree(new_mpath);
|
|
|
|
+err_path_alloc:
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* mesh_plink_broken - deactivates paths and sends perr when a link breaks
|
|
* mesh_plink_broken - deactivates paths and sends perr when a link breaks
|
|
*
|
|
*
|
|
@@ -475,11 +589,21 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
|
|
int mesh_pathtbl_init(void)
|
|
int mesh_pathtbl_init(void)
|
|
{
|
|
{
|
|
mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
|
|
mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
|
|
|
|
+ if (!mesh_paths)
|
|
|
|
+ return -ENOMEM;
|
|
mesh_paths->free_node = &mesh_path_node_free;
|
|
mesh_paths->free_node = &mesh_path_node_free;
|
|
mesh_paths->copy_node = &mesh_path_node_copy;
|
|
mesh_paths->copy_node = &mesh_path_node_copy;
|
|
mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
|
|
mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
|
|
- if (!mesh_paths)
|
|
|
|
|
|
+
|
|
|
|
+ mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
|
|
|
|
+ if (!mpp_paths) {
|
|
|
|
+ mesh_table_free(mesh_paths, true);
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+ mpp_paths->free_node = &mesh_path_node_free;
|
|
|
|
+ mpp_paths->copy_node = &mesh_path_node_copy;
|
|
|
|
+ mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -511,4 +635,5 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
|
|
void mesh_pathtbl_unregister(void)
|
|
void mesh_pathtbl_unregister(void)
|
|
{
|
|
{
|
|
mesh_table_free(mesh_paths, true);
|
|
mesh_table_free(mesh_paths, true);
|
|
|
|
+ mesh_table_free(mpp_paths, true);
|
|
}
|
|
}
|