MQL5算法交易的神经网络-002定义神经网络
定义神经层描述
- 考虑我们的网络有多少层神经元:至少包含输入层,输出层和隐藏层。
- 考虑不同类型神经网络,并确定每层神经元数量。
- 选择不同类型激活函数,同一层神经元使用相同的激活函数。
- 选择不同类型优化器。
- 确定Normalization样本大小和Dropout神经元的概率。
最终一个神经层描述定义如下
class CLayerDescription : public CObject
{
public:
CLayerDescription(void);
~CLayerDescription(void) {};
//---
int type; // Type of neural layer
int count; // Number of neurons in a layer
int window; // Source data window size
int window_out; // Results window size
int step; // Input data window step
int layers; // Number of neural layers
int batch; // Weight Matrix Update Packet Size
ENUM_ACTIVATION_FUNCTION activation; // Activation function type
VECTOR activation_params[2]; // Array of activation function parameters
ENUM_OPTIMIZATION optimization; // Weight matrix optimization type
TYPE probability; // masking probability, Dropout only
};
CLayerDescription::CLayerDescription(void) : type(defNeuronBase),
count(100),
window(100),
step(100),
layers(1),
activation(AF_TANH),
optimization(Adam),
probability(0.1),
batch(100)
{
activation_params = VECTOR::Ones(2);
activation_params[1] = 0;
}
定义神经网络
- 创建Create方法创建神经网络。
- 创建Save 和Load方法保存和加载神经网络。
- 创建m_cLayers属性存储神经层。
- 创建FeedForward,Backpropagation,UpdateWeights分别负责前向传播,反向传播和更新权重。
- 创建SetLearningRates方法设置学习率,定义m_dLearningRate 和 m_adBeta属性保存学习率和衰减系数。
- 创建GetResults方法获取最后一次直接传递的结果。
- 创建GetRecentAverageLoss 方法获取损失函数结果,定义m_dNNLoss 属性保存损失函数的值。
- 创建LossFunction方法获取损失函数类型,定义m_eLossFunction属性保存损失函数类型。
- 创建CMyOpenCL对象m_cOpenCL,使用OpenCL技术利用GPU并行计算。
- 定义m_bOpenCL属性决定是否启用OpenCL。
- 创建InitOpenCL和 UseOpenCL方法初始化和管理OpenCL。
- 创建CPositionEncoder对象m_cPositionEncoder分析时间序列的偏移项。
最终神经网络定义如下
class CNet : public CObject
{
protected:
bool m_bTrainMode;
CArrayLayers* m_cLayers;
CMyOpenCL* m_cOpenCL;
bool m_bOpenCL;
TYPE m_dNNLoss;
int m_iLossSmoothFactor;
CPositionEncoder* m_cPositionEncoder;
bool m_bPositionEncoder;
ENUM_LOSS_FUNCTION m_eLossFunction;
VECTOR m_adLambda;
TYPE m_dLearningRate;
VECTOR m_adBeta;
public:
CNet(void);
~CNet(void);
//--- Methods for creating an object
bool Create(CArrayObj *descriptions);
bool Create(CArrayObj *descriptions, TYPE learning_rate,
TYPE beta1, TYPE beta2);
bool Create(CArrayObj *descriptions,
ENUM_LOSS_FUNCTION loss_function, TYPE lambda1, TYPE lambda2);
bool Create(CArrayObj *descriptions, TYPE learning_rate,
TYPE beta1, TYPE beta2,
ENUM_LOSS_FUNCTION loss_function, TYPE lambda1, TYPE lambda2);
//--- Implement work with OpenCL
void UseOpenCL(bool value);
bool UseOpenCL(void) const { return(m_bOpenCL); }
bool InitOpenCL(void);
//--- Methods for working with positional coding
void UsePositionEncoder(bool value);
bool UsePositionEncoder(void) const { return(m_bPositionEncoder);}
//--- Implement the main algorithms of the model
bool FeedForward(const CBufferType *inputs);
bool Backpropagation(CBufferType *target);
bool UpdateWeights(uint batch_size = 1);
bool GetResults(CBufferType *&result);
void SetLearningRates(TYPE learning_rate, TYPE beta1 = defBeta1,
TYPE beta2 = defBeta2);
//--- Loss Function Methods
bool LossFunction(ENUM_LOSS_FUNCTION loss_function,
TYPE lambda1 = defLambdaL1, TYPE lambda2 = defLambdaL2);
ENUM_LOSS_FUNCTION LossFunction(void) const { return(m_eLossFunction); }
ENUM_LOSS_FUNCTION LossFunction(TYPE &lambda1, TYPE &lambda2);
TYPE GetRecentAverageLoss(void) const { return(m_dNNLoss); }
void LossSmoothFactor(int value) { m_iLossSmoothFactor = value;}
int LossSmoothFactor(void) const { return(m_iLossSmoothFactor);}
//--- Model operation mode control
bool TrainMode(void) const { return m_bTrainMode; }
void TrainMode(bool mode);
//--- Methods for working with files
virtual bool Save(string file_name = NULL);
virtual bool Save(const int file_handle);
virtual bool Load(string file_name = NULL, bool common = false);
virtual bool Load(const int file_handle);
//--- object identification method
virtual int Type(void) const { return(defNeuronNet); }
//--- Retrieve pointers to internal objects
virtual CBufferType* GetGradient(uint layer) const;
virtual CBufferType* GetWeights(uint layer) const;
virtual CBufferType* GetDeltaWeights(uint layer) const;
};