Changed CPU threads format.
This commit is contained in:
parent
fd9039928b
commit
97192f224d
50
doc/CPU.md
50
doc/CPU.md
@ -1,5 +1,7 @@
|
||||
# CPU backend
|
||||
|
||||
**Information in this document actual to version 2.99.5+**
|
||||
|
||||
All CPU related settings contains in one `cpu` object in config file, CPU backend allow specify multiple profiles and allow switch between them without restrictions by pool request or config change. Default auto-configuration create reasonable minimum of profiles which cover all supported algorithms.
|
||||
|
||||
### Example
|
||||
@ -9,7 +11,7 @@ Example below demonstrate all primary ideas of flexible profiles configuration:
|
||||
* `"rx/wow"` Exact match to algorithm `rx/wow`, defined 4 threads without CPU affinity.
|
||||
* `"cn"` Default failback profile for all `cn/*` algorithms, defined 2 threads with CPU affinity, another failback profiles is `cn-lite`, `cn-heavy` and `rx`.
|
||||
* `"cn-lite"` Default failback profile for all `cn-lite/*` algorithms, defined 2 double threads with CPU affinity.
|
||||
* `"cn-pico"` Alternative short object format, since 2.99.5.
|
||||
* `"cn-pico"` Alternative short object format.
|
||||
* `"custom-profile"` Custom user defined profile.
|
||||
* `"*"` Failback profile for all unhandled by other profiles algorithms.
|
||||
* `"cn/r"` Exact match, alias to profile `custom-profile`.
|
||||
@ -24,16 +26,13 @@ Example below demonstrate all primary ideas of flexible profiles configuration:
|
||||
"priority": null,
|
||||
"asm": true,
|
||||
"rx/wow": [-1, -1, -1, -1],
|
||||
"cn": [0, 2],
|
||||
"cn": [
|
||||
[1, 0],
|
||||
[1, 2]
|
||||
],
|
||||
"cn-lite": [
|
||||
{
|
||||
"intensity": 2,
|
||||
"affinity": 0
|
||||
},
|
||||
{
|
||||
"intensity": 2,
|
||||
"affinity": 2
|
||||
}
|
||||
[2, 0],
|
||||
[2, 2]
|
||||
],
|
||||
"cn-pico": {
|
||||
"intensity": 2,
|
||||
@ -48,8 +47,35 @@ Example below demonstrate all primary ideas of flexible profiles configuration:
|
||||
}
|
||||
```
|
||||
|
||||
### Intensity
|
||||
This option was known as `low_power_mode`, possible values is range from 1 to 5, for convinient if value 1 used, possible omit this option and specify CPU thread config by only one number: CPU affinity, instead of object.
|
||||
## Threads definition
|
||||
Threads can be defined in 3 formats.
|
||||
|
||||
#### Array format
|
||||
```json
|
||||
[
|
||||
[1, 0],
|
||||
[1, 2],
|
||||
[1, -1],
|
||||
[2, -1]
|
||||
]
|
||||
```
|
||||
Each line represent one thread, first element is intensity, this option was known as `low_power_mode`, possible values is range from 1 to 5, second element is CPU affinity, special value `-1` means no affinity.
|
||||
|
||||
#### Short array format
|
||||
```json
|
||||
[-1, -1, -1, -1]
|
||||
```
|
||||
Each number represent one thread and means CPU affinity, this is default format for algorithm with maximum intensity 1, currently it all RandomX variants and cryptonight-gpu.
|
||||
|
||||
#### Short object format
|
||||
```json
|
||||
{
|
||||
"intensity": 2,
|
||||
"threads": 8,
|
||||
"affinity": -1
|
||||
}
|
||||
```
|
||||
Internal format, but can be user defined.
|
||||
|
||||
## Shared options
|
||||
|
||||
|
@ -147,31 +147,32 @@ void xmrig::CpuConfig::read(const rapidjson::Value &value)
|
||||
|
||||
void xmrig::CpuConfig::generate()
|
||||
{
|
||||
m_shouldSave = true;
|
||||
m_shouldSave = true;
|
||||
ICpuInfo *cpu = Cpu::info();
|
||||
|
||||
m_threads.disable(Algorithm::CN_0);
|
||||
m_threads.move(kCn, Cpu::info()->threads(Algorithm::CN_0));
|
||||
m_threads.move(kCn, cpu->threads(Algorithm::CN_0));
|
||||
|
||||
# ifdef XMRIG_ALGO_CN_GPU
|
||||
m_threads.move(kCnGPU, Cpu::info()->threads(Algorithm::CN_GPU));
|
||||
m_threads.move(kCnGPU, cpu->threads(Algorithm::CN_GPU));
|
||||
# endif
|
||||
|
||||
# ifdef XMRIG_ALGO_CN_LITE
|
||||
m_threads.disable(Algorithm::CN_LITE_0);
|
||||
m_threads.move(kCnLite, Cpu::info()->threads(Algorithm::CN_LITE_1));
|
||||
m_threads.move(kCnLite, cpu->threads(Algorithm::CN_LITE_1));
|
||||
# endif
|
||||
|
||||
# ifdef XMRIG_ALGO_CN_HEAVY
|
||||
m_threads.move(kCnHeavy, Cpu::info()->threads(Algorithm::CN_HEAVY_0));
|
||||
m_threads.move(kCnHeavy, cpu->threads(Algorithm::CN_HEAVY_0));
|
||||
# endif
|
||||
|
||||
# ifdef XMRIG_ALGO_CN_PICO
|
||||
m_threads.move(kCnPico, Cpu::info()->threads(Algorithm::CN_PICO_0));
|
||||
m_threads.move(kCnPico, cpu->threads(Algorithm::CN_PICO_0));
|
||||
# endif
|
||||
|
||||
# ifdef XMRIG_ALGO_RANDOMX
|
||||
m_threads.move(kRx, Cpu::info()->threads(Algorithm::RX_0));
|
||||
m_threads.move(kRxWOW, Cpu::info()->threads(Algorithm::RX_WOW));
|
||||
m_threads.move(kRx, cpu->threads(Algorithm::RX_0));
|
||||
m_threads.move(kRxWOW, cpu->threads(Algorithm::RX_WOW));
|
||||
# endif
|
||||
}
|
||||
|
||||
|
@ -28,25 +28,14 @@
|
||||
#include "rapidjson/document.h"
|
||||
|
||||
|
||||
namespace xmrig {
|
||||
|
||||
|
||||
static const char *kAffinity = "affinity";
|
||||
static const char *kIntensity = "intensity";
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
xmrig::CpuThread::CpuThread(const rapidjson::Value &value)
|
||||
{
|
||||
if (value.IsObject()) {
|
||||
m_intensity = Json::getInt(value, kIntensity, -1);
|
||||
m_affinity = Json::getInt(value, kAffinity, -1);
|
||||
if (value.IsArray() && value.Size() >= 2) {
|
||||
m_intensity = value[0].GetInt();
|
||||
m_affinity = value[1].GetInt();
|
||||
}
|
||||
else if (value.IsInt()) {
|
||||
m_intensity = 1;
|
||||
m_intensity = -1;
|
||||
m_affinity = value.GetInt();
|
||||
}
|
||||
}
|
||||
@ -55,17 +44,15 @@ xmrig::CpuThread::CpuThread(const rapidjson::Value &value)
|
||||
rapidjson::Value xmrig::CpuThread::toJSON(rapidjson::Document &doc) const
|
||||
{
|
||||
using namespace rapidjson;
|
||||
|
||||
if (intensity() > 1) {
|
||||
auto &allocator = doc.GetAllocator();
|
||||
|
||||
Value obj(kObjectType);
|
||||
|
||||
obj.AddMember(StringRef(kIntensity), m_intensity, allocator);
|
||||
obj.AddMember(StringRef(kAffinity), m_affinity, allocator);
|
||||
|
||||
return obj;
|
||||
if (m_intensity == -1) {
|
||||
return Value(m_affinity);
|
||||
}
|
||||
|
||||
return Value(m_affinity);
|
||||
auto &allocator = doc.GetAllocator();
|
||||
|
||||
Value out(kArrayType);
|
||||
out.PushBack(m_intensity, allocator);
|
||||
out.PushBack(m_affinity, allocator);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
@ -35,13 +35,14 @@ namespace xmrig {
|
||||
class CpuThread
|
||||
{
|
||||
public:
|
||||
inline constexpr CpuThread(int intensity = 1, int64_t affinity = -1) : m_intensity(intensity), m_affinity(affinity) {}
|
||||
inline constexpr CpuThread() {}
|
||||
inline constexpr CpuThread(int64_t affinity, int intensity) : m_intensity(intensity), m_affinity(affinity) {}
|
||||
|
||||
CpuThread(const rapidjson::Value &value);
|
||||
|
||||
inline bool isEqual(const CpuThread &other) const { return other.m_affinity == m_affinity && other.m_intensity == m_intensity; }
|
||||
inline bool isValid() const { return m_intensity >= 1 && m_intensity <= 5; }
|
||||
inline int intensity() const { return m_intensity; }
|
||||
inline bool isValid() const { return m_intensity == -1 || (m_intensity >= 1 && m_intensity <= 5); }
|
||||
inline int intensity() const { return m_intensity == -1 ? 1 : m_intensity; }
|
||||
inline int64_t affinity() const { return m_affinity; }
|
||||
|
||||
inline bool operator!=(const CpuThread &other) const { return !isEqual(other); }
|
||||
|
@ -47,7 +47,7 @@ public:
|
||||
inline const std::vector<CpuThread> &data() const { return m_data; }
|
||||
inline size_t count() const { return m_data.size(); }
|
||||
inline void add(CpuThread &&thread) { m_data.push_back(thread); }
|
||||
inline void add(int64_t affinity, int intensity = 1) { add(CpuThread(intensity, affinity)); }
|
||||
inline void add(int64_t affinity, int intensity) { add(CpuThread(affinity, intensity)); }
|
||||
inline void reserve(size_t capacity) { m_data.reserve(capacity); }
|
||||
|
||||
rapidjson::Value toJSON(rapidjson::Document &doc) const;
|
||||
|
@ -250,6 +250,7 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith
|
||||
int L2_associativity = 0;
|
||||
size_t extra = 0;
|
||||
const size_t scratchpad = algorithm.memory();
|
||||
int intensity = algorithm.maxIntensity() == 1 ? -1 : 1;
|
||||
|
||||
if (cache->attr->cache.depth == 3 && isCacheExclusive(cache)) {
|
||||
for (size_t i = 0; i < cache->arity; ++i) {
|
||||
@ -286,7 +287,7 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith
|
||||
for (hwloc_obj_t core : cores) {
|
||||
const std::vector<hwloc_obj_t> units = findByType(core, HWLOC_OBJ_PU);
|
||||
for (hwloc_obj_t pu : units) {
|
||||
threads.add(pu->os_index);
|
||||
threads.add(pu->os_index, intensity);
|
||||
}
|
||||
}
|
||||
|
||||
@ -307,7 +308,7 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith
|
||||
PUs--;
|
||||
|
||||
allocated_pu = true;
|
||||
threads.add(units[pu_id]->os_index);
|
||||
threads.add(units[pu_id]->os_index, intensity);
|
||||
|
||||
if (cacheHashes == 0) {
|
||||
break;
|
||||
|
Loading…
x
Reference in New Issue
Block a user