|
@@ -99,7 +99,8 @@ static inline int elements_fit_in_base(struct flex_array *fa)
|
|
* capacity in the base structure. Also note that no effort is made
|
|
* capacity in the base structure. Also note that no effort is made
|
|
* to efficiently pack objects across page boundaries.
|
|
* to efficiently pack objects across page boundaries.
|
|
*/
|
|
*/
|
|
-struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags)
|
|
|
|
|
|
+struct flex_array *flex_array_alloc(int element_size, unsigned int total,
|
|
|
|
+ gfp_t flags)
|
|
{
|
|
{
|
|
struct flex_array *ret;
|
|
struct flex_array *ret;
|
|
int max_size = nr_base_part_ptrs() * __elements_per_part(element_size);
|
|
int max_size = nr_base_part_ptrs() * __elements_per_part(element_size);
|
|
@@ -115,7 +116,8 @@ struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static int fa_element_to_part_nr(struct flex_array *fa, int element_nr)
|
|
|
|
|
|
+static int fa_element_to_part_nr(struct flex_array *fa,
|
|
|
|
+ unsigned int element_nr)
|
|
{
|
|
{
|
|
return element_nr / __elements_per_part(fa->element_size);
|
|
return element_nr / __elements_per_part(fa->element_size);
|
|
}
|
|
}
|
|
@@ -143,14 +145,12 @@ void flex_array_free(struct flex_array *fa)
|
|
kfree(fa);
|
|
kfree(fa);
|
|
}
|
|
}
|
|
|
|
|
|
-static int fa_index_inside_part(struct flex_array *fa, int element_nr)
|
|
|
|
|
|
+static unsigned int index_inside_part(struct flex_array *fa,
|
|
|
|
+ unsigned int element_nr)
|
|
{
|
|
{
|
|
- return element_nr % __elements_per_part(fa->element_size);
|
|
|
|
-}
|
|
|
|
|
|
+ unsigned int part_offset;
|
|
|
|
|
|
-static int index_inside_part(struct flex_array *fa, int element_nr)
|
|
|
|
-{
|
|
|
|
- int part_offset = fa_index_inside_part(fa, element_nr);
|
|
|
|
|
|
+ part_offset = element_nr % __elements_per_part(fa->element_size);
|
|
return part_offset * fa->element_size;
|
|
return part_offset * fa->element_size;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -185,7 +185,8 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
|
|
*
|
|
*
|
|
* Locking must be provided by the caller.
|
|
* Locking must be provided by the caller.
|
|
*/
|
|
*/
|
|
-int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags)
|
|
|
|
|
|
+int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
|
|
|
|
+ gfp_t flags)
|
|
{
|
|
{
|
|
int part_nr = fa_element_to_part_nr(fa, element_nr);
|
|
int part_nr = fa_element_to_part_nr(fa, element_nr);
|
|
struct flex_array_part *part;
|
|
struct flex_array_part *part;
|
|
@@ -217,7 +218,8 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags
|
|
*
|
|
*
|
|
* Locking must be provided by the caller.
|
|
* Locking must be provided by the caller.
|
|
*/
|
|
*/
|
|
-int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags)
|
|
|
|
|
|
+int flex_array_prealloc(struct flex_array *fa, unsigned int start,
|
|
|
|
+ unsigned int end, gfp_t flags)
|
|
{
|
|
{
|
|
int start_part;
|
|
int start_part;
|
|
int end_part;
|
|
int end_part;
|
|
@@ -248,7 +250,7 @@ int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags)
|
|
*
|
|
*
|
|
* Locking must be provided by the caller.
|
|
* Locking must be provided by the caller.
|
|
*/
|
|
*/
|
|
-void *flex_array_get(struct flex_array *fa, int element_nr)
|
|
|
|
|
|
+void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
|
|
{
|
|
{
|
|
int part_nr = fa_element_to_part_nr(fa, element_nr);
|
|
int part_nr = fa_element_to_part_nr(fa, element_nr);
|
|
struct flex_array_part *part;
|
|
struct flex_array_part *part;
|