Added JIT compiler for RandomX on ARMv8

This commit is contained in:
SChernykh
2019-09-21 10:10:52 +02:00
parent a4bc548fe5
commit 38f4f4f695
12 changed files with 1918 additions and 59 deletions

View File

@ -51,6 +51,13 @@ if (WITH_RANDOMX)
)
# cheat because cmake and ccache hate each other
set_property(SOURCE src/crypto/randomx/jit_compiler_x86_static.S PROPERTY LANGUAGE C)
elseif (XMRIG_ARM AND CMAKE_SIZEOF_VOID_P EQUAL 8)
list(APPEND SOURCES_CRYPTO
src/crypto/randomx/jit_compiler_a64_static.S
src/crypto/randomx/jit_compiler_a64.cpp
)
# cheat because cmake and ccache hate each other
set_property(SOURCE src/crypto/randomx/jit_compiler_a64_static.S PROPERTY LANGUAGE C)
endif()
if (CMAKE_CXX_COMPILER_ID MATCHES Clang)

View File

@ -108,7 +108,7 @@ namespace randomx {
class JitCompilerX86;
using JitCompiler = JitCompilerX86;
#elif defined(__aarch64__)
#define RANDOMX_HAVE_COMPILER 0
#define RANDOMX_HAVE_COMPILER 1
class JitCompilerA64;
using JitCompiler = JitCompilerA64;
#else

View File

@ -82,6 +82,12 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define HAVE_SETROUNDMODE_IMPL
#endif
#ifndef HAVE_SETROUNDMODE_IMPL
static void setRoundMode_(uint32_t mode) {
fesetround(mode);
}
#endif
#ifndef HAVE_ROTR64
uint64_t rotr64(uint64_t a, unsigned int b) {
return (a >> b) | (a << (-b & 63));
@ -127,12 +133,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef RANDOMX_DEFAULT_FENV
# ifndef HAVE_SETROUNDMODE_IMPL
static void setRoundMode_(uint32_t mode) {
fesetround(mode);
}
# endif
void rx_reset_float_state() {
setRoundMode_(FE_TONEAREST);
rx_set_double_precision(); //set precision to 53 bits if needed by the platform

View File

@ -376,11 +376,138 @@ FORCE_INLINE rx_vec_f128 rx_cvt_packed_int_vec_f128(const void* addr) {
#define RANDOMX_DEFAULT_FENV
void rx_reset_float_state();
#elif defined(__aarch64__)
void rx_set_rounding_mode(uint32_t mode);
#include <stdlib.h>
#include <arm_neon.h>
#include <arm_acle.h>
#else //end altivec
typedef uint8x16_t rx_vec_i128;
typedef float64x2_t rx_vec_f128;
inline void* rx_aligned_alloc(size_t size, size_t align) {
void* p;
if (posix_memalign(&p, align, size) == 0)
return p;
return 0;
};
#define rx_aligned_free(a) free(a)
inline void rx_prefetch_nta(void* ptr) {
asm volatile ("prfm pldl1strm, [%0]\n" : : "r" (ptr));
}
FORCE_INLINE rx_vec_f128 rx_load_vec_f128(const double* pd) {
return vld1q_f64((const float64_t*)pd);
}
FORCE_INLINE void rx_store_vec_f128(double* mem_addr, rx_vec_f128 val) {
vst1q_f64((float64_t*)mem_addr, val);
}
FORCE_INLINE rx_vec_f128 rx_swap_vec_f128(rx_vec_f128 a) {
float64x2_t temp;
temp = vcopyq_laneq_f64(temp, 1, a, 1);
a = vcopyq_laneq_f64(a, 1, a, 0);
return vcopyq_laneq_f64(a, 0, temp, 1);
}
FORCE_INLINE rx_vec_f128 rx_set_vec_f128(uint64_t x1, uint64_t x0) {
uint64x2_t temp0 = vdupq_n_u64(x0);
uint64x2_t temp1 = vdupq_n_u64(x1);
return vreinterpretq_f64_u64(vcopyq_laneq_u64(temp0, 1, temp1, 0));
}
FORCE_INLINE rx_vec_f128 rx_set1_vec_f128(uint64_t x) {
return vreinterpretq_f64_u64(vdupq_n_u64(x));
}
#define rx_add_vec_f128 vaddq_f64
#define rx_sub_vec_f128 vsubq_f64
#define rx_mul_vec_f128 vmulq_f64
#define rx_div_vec_f128 vdivq_f64
#define rx_sqrt_vec_f128 vsqrtq_f64
FORCE_INLINE rx_vec_f128 rx_xor_vec_f128(rx_vec_f128 a, rx_vec_f128 b) {
return vreinterpretq_f64_u8(veorq_u8(vreinterpretq_u8_f64(a), vreinterpretq_u8_f64(b)));
}
FORCE_INLINE rx_vec_f128 rx_and_vec_f128(rx_vec_f128 a, rx_vec_f128 b) {
return vreinterpretq_f64_u8(vandq_u8(vreinterpretq_u8_f64(a), vreinterpretq_u8_f64(b)));
}
FORCE_INLINE rx_vec_f128 rx_or_vec_f128(rx_vec_f128 a, rx_vec_f128 b) {
return vreinterpretq_f64_u8(vorrq_u8(vreinterpretq_u8_f64(a), vreinterpretq_u8_f64(b)));
}
#ifdef __ARM_FEATURE_CRYPTO
FORCE_INLINE rx_vec_i128 rx_aesenc_vec_i128(rx_vec_i128 a, rx_vec_i128 key) {
const uint8x16_t zero = { 0 };
return vaesmcq_u8(vaeseq_u8(a, zero)) ^ key;
}
FORCE_INLINE rx_vec_i128 rx_aesdec_vec_i128(rx_vec_i128 a, rx_vec_i128 key) {
const uint8x16_t zero = { 0 };
return vaesimcq_u8(vaesdq_u8(a, zero)) ^ key;
}
#define HAVE_AES
#endif
#define rx_xor_vec_i128 veorq_u8
FORCE_INLINE int rx_vec_i128_x(rx_vec_i128 a) {
return vgetq_lane_s32(vreinterpretq_s32_u8(a), 0);
}
FORCE_INLINE int rx_vec_i128_y(rx_vec_i128 a) {
return vgetq_lane_s32(vreinterpretq_s32_u8(a), 1);
}
FORCE_INLINE int rx_vec_i128_z(rx_vec_i128 a) {
return vgetq_lane_s32(vreinterpretq_s32_u8(a), 2);
}
FORCE_INLINE int rx_vec_i128_w(rx_vec_i128 a) {
return vgetq_lane_s32(vreinterpretq_s32_u8(a), 3);
}
FORCE_INLINE rx_vec_i128 rx_set_int_vec_i128(int _I3, int _I2, int _I1, int _I0) {
int32_t data[4];
data[0] = _I0;
data[1] = _I1;
data[2] = _I2;
data[3] = _I3;
return vreinterpretq_u8_s32(vld1q_s32(data));
};
#define rx_xor_vec_i128 veorq_u8
FORCE_INLINE rx_vec_i128 rx_load_vec_i128(const rx_vec_i128* mem_addr) {
return vld1q_u8((const uint8_t*)mem_addr);
}
FORCE_INLINE void rx_store_vec_i128(rx_vec_i128* mem_addr, rx_vec_i128 val) {
vst1q_u8((uint8_t*)mem_addr, val);
}
FORCE_INLINE rx_vec_f128 rx_cvt_packed_int_vec_f128(const void* addr) {
double lo = unsigned32ToSigned2sCompl(load32((uint8_t*)addr + 0));
double hi = unsigned32ToSigned2sCompl(load32((uint8_t*)addr + 4));
rx_vec_f128 x;
x = vsetq_lane_f64(lo, x, 0);
x = vsetq_lane_f64(hi, x, 1);
return x;
}
#define RANDOMX_DEFAULT_FENV
#else //portable fallback
#include <cstdint>
#include <stdexcept>
@ -487,7 +614,6 @@ FORCE_INLINE rx_vec_f128 rx_set1_vec_f128(uint64_t x) {
return v;
}
FORCE_INLINE rx_vec_f128 rx_xor_vec_f128(rx_vec_f128 a, rx_vec_f128 b) {
rx_vec_f128 x;
x.i.u64[0] = a.i.u64[0] ^ b.i.u64[0];
@ -578,10 +704,6 @@ FORCE_INLINE rx_vec_f128 rx_cvt_packed_int_vec_f128(const void* addr) {
#define RANDOMX_DEFAULT_FENV
void rx_reset_float_state();
void rx_set_rounding_mode(uint32_t mode);
#endif
#ifndef HAVE_AES
@ -598,6 +720,14 @@ FORCE_INLINE rx_vec_i128 rx_aesdec_vec_i128(rx_vec_i128 v, rx_vec_i128 rkey) {
}
#endif
#ifdef RANDOMX_DEFAULT_FENV
void rx_reset_float_state();
void rx_set_rounding_mode(uint32_t mode);
#endif
double loadDoublePortable(const void* addr);
uint64_t mulh(uint64_t, uint64_t);
int64_t smulh(int64_t, int64_t);

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
/*
Copyright (c) 2018-2019, tevador <tevador@gmail.com>
Copyright (c) 2019, SChernykh <https://github.com/SChernykh>
All rights reserved.
@ -32,42 +33,91 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <vector>
#include <stdexcept>
#include "crypto/randomx/common.hpp"
#include "crypto/randomx/jit_compiler_a64_static.hpp"
namespace randomx {
class Program;
class ProgramConfiguration;
class SuperscalarProgram;
class Instruction;
typedef void(JitCompilerA64::*InstructionGeneratorA64)(Instruction&, uint32_t&);
class JitCompilerA64 {
public:
JitCompilerA64() {
throw std::runtime_error("ARM64 JIT compiler is not implemented yet.");
}
void generateProgram(Program&, ProgramConfiguration&) {
JitCompilerA64();
~JitCompilerA64();
void generateProgram(Program&, ProgramConfiguration&);
void generateProgramLight(Program&, ProgramConfiguration&, uint32_t);
}
void generateProgramLight(Program&, ProgramConfiguration&, uint32_t) {
}
template<size_t N>
void generateSuperscalarHash(SuperscalarProgram(&programs)[N], std::vector<uint64_t> &) {
void generateSuperscalarHash(SuperscalarProgram(&programs)[N], std::vector<uint64_t> &);
}
void generateDatasetInitCode() {
void generateDatasetInitCode() {}
ProgramFunc* getProgramFunc() { return reinterpret_cast<ProgramFunc*>(code); }
DatasetInitFunc* getDatasetInitFunc();
uint8_t* getCode() { return code; }
size_t getCodeSize();
static InstructionGeneratorA64 engine[256];
uint32_t reg_changed_offset[8];
uint8_t* code;
uint32_t literalPos;
uint32_t num32bitLiterals;
static void emit32(uint32_t val, uint8_t* code, uint32_t& codePos)
{
*(uint32_t*)(code + codePos) = val;
codePos += sizeof(val);
}
ProgramFunc* getProgramFunc() {
return nullptr;
}
DatasetInitFunc* getDatasetInitFunc() {
return nullptr;
}
uint8_t* getCode() {
return nullptr;
}
size_t getCodeSize() {
return 0;
static void emit64(uint64_t val, uint8_t* code, uint32_t& codePos)
{
*(uint64_t*)(code + codePos) = val;
codePos += sizeof(val);
}
void emitMovImmediate(uint32_t dst, uint32_t imm, uint8_t* code, uint32_t& codePos);
void emitAddImmediate(uint32_t dst, uint32_t src, uint32_t imm, uint8_t* code, uint32_t& codePos);
template<uint32_t tmp_reg>
void emitMemLoad(uint32_t dst, uint32_t src, Instruction& instr, uint8_t* code, uint32_t& codePos);
template<uint32_t tmp_reg_fp>
void emitMemLoadFP(uint32_t src, Instruction& instr, uint8_t* code, uint32_t& codePos);
void h_IADD_RS(Instruction&, uint32_t&);
void h_IADD_M(Instruction&, uint32_t&);
void h_ISUB_R(Instruction&, uint32_t&);
void h_ISUB_M(Instruction&, uint32_t&);
void h_IMUL_R(Instruction&, uint32_t&);
void h_IMUL_M(Instruction&, uint32_t&);
void h_IMULH_R(Instruction&, uint32_t&);
void h_IMULH_M(Instruction&, uint32_t&);
void h_ISMULH_R(Instruction&, uint32_t&);
void h_ISMULH_M(Instruction&, uint32_t&);
void h_IMUL_RCP(Instruction&, uint32_t&);
void h_INEG_R(Instruction&, uint32_t&);
void h_IXOR_R(Instruction&, uint32_t&);
void h_IXOR_M(Instruction&, uint32_t&);
void h_IROR_R(Instruction&, uint32_t&);
void h_IROL_R(Instruction&, uint32_t&);
void h_ISWAP_R(Instruction&, uint32_t&);
void h_FSWAP_R(Instruction&, uint32_t&);
void h_FADD_R(Instruction&, uint32_t&);
void h_FADD_M(Instruction&, uint32_t&);
void h_FSUB_R(Instruction&, uint32_t&);
void h_FSUB_M(Instruction&, uint32_t&);
void h_FSCAL_R(Instruction&, uint32_t&);
void h_FMUL_R(Instruction&, uint32_t&);
void h_FDIV_M(Instruction&, uint32_t&);
void h_FSQRT_R(Instruction&, uint32_t&);
void h_CBRANCH(Instruction&, uint32_t&);
void h_CFROUND(Instruction&, uint32_t&);
void h_ISTORE(Instruction&, uint32_t&);
void h_NOP(Instruction&, uint32_t&);
};
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,51 @@
/*
Copyright (c) 2018-2019, tevador <tevador@gmail.com>
Copyright (c) 2019, SChernykh <https://github.com/SChernykh>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
extern "C" {
void randomx_program_aarch64(void* reg, void* mem, void* scratchpad, uint64_t iterations);
void randomx_program_aarch64_main_loop();
void randomx_program_aarch64_vm_instructions();
void randomx_program_aarch64_imul_rcp_literals_end();
void randomx_program_aarch64_vm_instructions_end();
void randomx_program_aarch64_cacheline_align_mask1();
void randomx_program_aarch64_cacheline_align_mask2();
void randomx_program_aarch64_update_spMix1();
void randomx_program_aarch64_vm_instructions_end_light();
void randomx_program_aarch64_light_cacheline_align_mask();
void randomx_program_aarch64_light_dataset_offset();
void randomx_init_dataset_aarch64();
void randomx_init_dataset_aarch64_end();
void randomx_calc_dataset_item_aarch64();
void randomx_calc_dataset_item_aarch64_prefetch();
void randomx_calc_dataset_item_aarch64_mix();
void randomx_calc_dataset_item_aarch64_store_result();
void randomx_calc_dataset_item_aarch64_end();
}

View File

@ -26,6 +26,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "crypto/randomx/common.hpp"
#include "crypto/randomx/randomx.h"
#include "crypto/randomx/dataset.hpp"
#include "crypto/randomx/vm_interpreted.hpp"
@ -33,7 +34,13 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "crypto/randomx/vm_compiled.hpp"
#include "crypto/randomx/vm_compiled_light.hpp"
#include "crypto/randomx/blake2/blake2.h"
#if defined(_M_X64) || defined(__x86_64__)
#include "crypto/randomx/jit_compiler_x86_static.hpp"
#elif defined(XMRIG_ARM)
#include "crypto/randomx/jit_compiler_a64_static.hpp"
#endif
#include <cassert>
RandomX_ConfigurationWownero::RandomX_ConfigurationWownero()
@ -156,19 +163,10 @@ RandomX_ConfigurationBase::RandomX_ConfigurationBase()
#endif
}
static uint32_t Log2(size_t value) { return (value > 1) ? (Log2(value / 2) + 1) : 0; }
void RandomX_ConfigurationBase::Apply()
{
#if defined(_M_X64) || defined(__x86_64__)
*(uint32_t*)(codeShhPrefetchTweaked + 3) = ArgonMemory * 16 - 1;
const uint32_t DatasetBaseMask = DatasetBaseSize - RANDOMX_DATASET_ITEM_SIZE;
*(uint32_t*)(codeReadDatasetTweaked + 7) = DatasetBaseMask;
*(uint32_t*)(codeReadDatasetTweaked + 23) = DatasetBaseMask;
*(uint32_t*)(codeReadDatasetLightSshInitTweaked + 59) = DatasetBaseMask;
#endif
CacheLineAlignMask_Calculated = (DatasetBaseSize - 1) & ~(RANDOMX_DATASET_ITEM_SIZE - 1);
DatasetExtraItems_Calculated = DatasetExtraSize / RANDOMX_DATASET_ITEM_SIZE;
ScratchpadL1Mask_Calculated = (ScratchpadL1_Size / sizeof(uint64_t) - 1) * 8;
ScratchpadL1Mask16_Calculated = (ScratchpadL1_Size / sizeof(uint64_t) / 2 - 1) * 16;
ScratchpadL2Mask_Calculated = (ScratchpadL2_Size / sizeof(uint64_t) - 1) * 8;
@ -176,22 +174,40 @@ void RandomX_ConfigurationBase::Apply()
ScratchpadL3Mask_Calculated = (((ScratchpadL3_Size / sizeof(uint64_t)) - 1) * 8);
ScratchpadL3Mask64_Calculated = ((ScratchpadL3_Size / sizeof(uint64_t)) / 8 - 1) * 64;
#if defined(_M_X64) || defined(__x86_64__)
*(uint32_t*)(codePrefetchScratchpadTweaked + 4) = ScratchpadL3Mask64_Calculated;
*(uint32_t*)(codePrefetchScratchpadTweaked + 18) = ScratchpadL3Mask64_Calculated;
#endif
CacheLineAlignMask_Calculated = (DatasetBaseSize - 1) & ~(RANDOMX_DATASET_ITEM_SIZE - 1);
DatasetExtraItems_Calculated = DatasetExtraSize / RANDOMX_DATASET_ITEM_SIZE;
ConditionMask_Calculated = (1 << JumpBits) - 1;
constexpr int CEIL_NULL = 0;
int k = 0;
#if defined(_M_X64) || defined(__x86_64__)
*(uint32_t*)(codeShhPrefetchTweaked + 3) = ArgonMemory * 16 - 1;
const uint32_t DatasetBaseMask = DatasetBaseSize - RANDOMX_DATASET_ITEM_SIZE;
*(uint32_t*)(codeReadDatasetTweaked + 7) = DatasetBaseMask;
*(uint32_t*)(codeReadDatasetTweaked + 23) = DatasetBaseMask;
*(uint32_t*)(codeReadDatasetLightSshInitTweaked + 59) = DatasetBaseMask;
*(uint32_t*)(codePrefetchScratchpadTweaked + 4) = ScratchpadL3Mask64_Calculated;
*(uint32_t*)(codePrefetchScratchpadTweaked + 18) = ScratchpadL3Mask64_Calculated;
#define JIT_HANDLE(x, prev) randomx::JitCompilerX86::engine[k] = &randomx::JitCompilerX86::h_##x
#elif defined(XMRIG_ARM)
Log2_ScratchpadL1 = Log2(ScratchpadL1_Size);
Log2_ScratchpadL2 = Log2(ScratchpadL2_Size);
Log2_ScratchpadL3 = Log2(ScratchpadL3_Size);
Log2_DatasetBaseSize = Log2(DatasetBaseSize);
Log2_CacheSize = Log2((ArgonMemory * randomx::ArgonBlockSize) / randomx::CacheLineSize);
#define JIT_HANDLE(x, prev) randomx::JitCompilerA64::engine[k] = &randomx::JitCompilerA64::h_##x
#else
#define JIT_HANDLE(x, prev)
#endif
constexpr int CEIL_NULL = 0;
int k = 0;
#define INST_HANDLE(x, prev) \
CEIL_##x = CEIL_##prev + RANDOMX_FREQ_##x; \
for (; k < CEIL_##x; ++k) { JIT_HANDLE(x, prev); }
@ -435,12 +451,12 @@ extern "C" {
assert(inputSize == 0 || input != nullptr);
assert(output != nullptr);
alignas(16) uint64_t tempHash[8];
rx_blake2b(tempHash, sizeof(tempHash), input, inputSize, nullptr, 0);
rx_blake2b(tempHash, sizeof(tempHash), input, inputSize, nullptr, 0);
machine->initScratchpad(&tempHash);
machine->resetRoundingMode();
for (uint32_t chain = 0; chain < RandomX_CurrentConfig.ProgramCount - 1; ++chain) {
machine->run(&tempHash);
rx_blake2b(tempHash, sizeof(tempHash), machine->getRegisterFile(), sizeof(randomx::RegisterFile), nullptr, 0);
rx_blake2b(tempHash, sizeof(tempHash), machine->getRegisterFile(), sizeof(randomx::RegisterFile), nullptr, 0);
}
machine->run(&tempHash);
machine->getFinalResult(output, RANDOMX_HASH_SIZE);

View File

@ -133,6 +133,14 @@ struct RandomX_ConfigurationBase
uint32_t ConditionMask_Calculated;
#ifdef XMRIG_ARM
uint32_t Log2_ScratchpadL1;
uint32_t Log2_ScratchpadL2;
uint32_t Log2_ScratchpadL3;
uint32_t Log2_DatasetBaseSize;
uint32_t Log2_CacheSize;
#endif
int CEIL_IADD_RS;
int CEIL_IADD_M;
int CEIL_ISUB_R;

View File

@ -50,6 +50,9 @@ namespace randomx {
template<bool softAes>
void CompiledVm<softAes>::execute() {
#ifdef XMRIG_ARM
memcpy(reg.f, config.eMask, sizeof(config.eMask));
#endif
compiler.getProgramFunc()(reg, mem, scratchpad, RandomX_CurrentConfig.ProgramIterations);
}

View File

@ -33,11 +33,9 @@
xmrig::RxVm::RxVm(RxDataset *dataset, uint8_t *scratchpad, bool softAes)
{
# ifndef XMRIG_ARM
if (!softAes) {
m_flags |= RANDOMX_FLAG_HARD_AES;
}
# endif
if (dataset->get()) {
m_flags |= RANDOMX_FLAG_FULL_MEM;