|
static __hwloc_inline void | hwloc_distributev (hwloc_topology_t topology, hwloc_obj_t *root, unsigned n_roots, hwloc_cpuset_t *cpuset, unsigned n, unsigned until) |
|
static __hwloc_inline void | hwloc_distribute (hwloc_topology_t topology, hwloc_obj_t root, hwloc_cpuset_t *set, unsigned n, unsigned until) |
|
static __hwloc_inline void * | hwloc_alloc_membind_policy_nodeset (hwloc_topology_t topology, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) |
|
static __hwloc_inline void * | hwloc_alloc_membind_policy (hwloc_topology_t topology, size_t len, hwloc_const_cpuset_t set, hwloc_membind_policy_t policy, int flags) |
|
Allocate some memory on the memory nodes near given cpuset cpuset .
This is similar to hwloc_alloc_membind_policy_nodeset, but for a given cpuset.
Allocate some memory on the given nodeset nodeset .
This is similar to hwloc_alloc_membind except that it is allowed to change the current memory binding policy, thus providing more binding support, at the expense of changing the current state.
Distribute n items over the topology under root .
Distribute n items over the topology under roots .
Array cpuset will be filled with n cpusets recursively distributed linearly over the topology under root , down to depth until (which can be INT_MAX to distribute down to the finest level).
This is typically useful when an application wants to distribute n threads over a machine, giving each of them as much private cache as possible and keeping them locally in number order.
The caller may typically want to also call hwloc_bitmap_singlify() before binding a thread so that it does not move at all.
- Note
- This function requires the
root object to have a CPU set.
This is the same as hwloc_distribute, but takes an array of roots instead of just one root.
- Note
- This function requires the
roots objects to have a CPU set.
|
|