mirror of
https://github.com/pjreddie/darknet.git
synced 2023-08-10 21:13:14 +03:00
renamed sigmoid to logistic
This commit is contained in:
parent
655f636a42
commit
f047cfff99
@ -4,13 +4,13 @@ extern "C" {
|
||||
}
|
||||
|
||||
__device__ float linear_activate_kernel(float x){return x;}
|
||||
__device__ float sigmoid_activate_kernel(float x){return 1./(1. + exp(-x));}
|
||||
__device__ float logistic_activate_kernel(float x){return 1./(1. + exp(-x));}
|
||||
__device__ float relu_activate_kernel(float x){return x*(x>0);}
|
||||
__device__ float ramp_activate_kernel(float x){return x*(x>0)+.1*x;}
|
||||
__device__ float tanh_activate_kernel(float x){return (exp(2*x)-1)/(exp(2*x)+1);}
|
||||
|
||||
__device__ float linear_gradient_kernel(float x){return 1;}
|
||||
__device__ float sigmoid_gradient_kernel(float x){return (1-x)*x;}
|
||||
__device__ float logistic_gradient_kernel(float x){return (1-x)*x;}
|
||||
__device__ float relu_gradient_kernel(float x){return (x>0);}
|
||||
__device__ float ramp_gradient_kernel(float x){return (x>0)+.1;}
|
||||
__device__ float tanh_gradient_kernel(float x){return 1-x*x;}
|
||||
@ -20,8 +20,8 @@ __device__ float activate_kernel(float x, ACTIVATION a)
|
||||
switch(a){
|
||||
case LINEAR:
|
||||
return linear_activate_kernel(x);
|
||||
case SIGMOID:
|
||||
return sigmoid_activate_kernel(x);
|
||||
case LOGISTIC:
|
||||
return logistic_activate_kernel(x);
|
||||
case RELU:
|
||||
return relu_activate_kernel(x);
|
||||
case RAMP:
|
||||
@ -37,8 +37,8 @@ __device__ float gradient_kernel(float x, ACTIVATION a)
|
||||
switch(a){
|
||||
case LINEAR:
|
||||
return linear_gradient_kernel(x);
|
||||
case SIGMOID:
|
||||
return sigmoid_gradient_kernel(x);
|
||||
case LOGISTIC:
|
||||
return logistic_gradient_kernel(x);
|
||||
case RELU:
|
||||
return relu_gradient_kernel(x);
|
||||
case RAMP:
|
||||
|
@ -8,8 +8,8 @@
|
||||
char *get_activation_string(ACTIVATION a)
|
||||
{
|
||||
switch(a){
|
||||
case SIGMOID:
|
||||
return "sigmoid";
|
||||
case LOGISTIC:
|
||||
return "logistic";
|
||||
case RELU:
|
||||
return "relu";
|
||||
case RAMP:
|
||||
@ -26,7 +26,7 @@ char *get_activation_string(ACTIVATION a)
|
||||
|
||||
ACTIVATION get_activation(char *s)
|
||||
{
|
||||
if (strcmp(s, "sigmoid")==0) return SIGMOID;
|
||||
if (strcmp(s, "logistic")==0) return LOGISTIC;
|
||||
if (strcmp(s, "relu")==0) return RELU;
|
||||
if (strcmp(s, "linear")==0) return LINEAR;
|
||||
if (strcmp(s, "ramp")==0) return RAMP;
|
||||
@ -40,8 +40,8 @@ float activate(float x, ACTIVATION a)
|
||||
switch(a){
|
||||
case LINEAR:
|
||||
return linear_activate(x);
|
||||
case SIGMOID:
|
||||
return sigmoid_activate(x);
|
||||
case LOGISTIC:
|
||||
return logistic_activate(x);
|
||||
case RELU:
|
||||
return relu_activate(x);
|
||||
case RAMP:
|
||||
@ -65,8 +65,8 @@ float gradient(float x, ACTIVATION a)
|
||||
switch(a){
|
||||
case LINEAR:
|
||||
return linear_gradient(x);
|
||||
case SIGMOID:
|
||||
return sigmoid_gradient(x);
|
||||
case LOGISTIC:
|
||||
return logistic_gradient(x);
|
||||
case RELU:
|
||||
return relu_gradient(x);
|
||||
case RAMP:
|
||||
|
@ -3,7 +3,7 @@
|
||||
#define ACTIVATIONS_H
|
||||
|
||||
typedef enum{
|
||||
SIGMOID, RELU, LINEAR, RAMP, TANH
|
||||
LOGISTIC, RELU, LINEAR, RAMP, TANH
|
||||
}ACTIVATION;
|
||||
|
||||
ACTIVATION get_activation(char *s);
|
||||
@ -19,13 +19,13 @@ void gradient_array_ongpu(float *x, int n, ACTIVATION a, float *delta);
|
||||
#endif
|
||||
|
||||
static inline float linear_activate(float x){return x;}
|
||||
static inline float sigmoid_activate(float x){return 1./(1. + exp(-x));}
|
||||
static inline float logistic_activate(float x){return 1./(1. + exp(-x));}
|
||||
static inline float relu_activate(float x){return x*(x>0);}
|
||||
static inline float ramp_activate(float x){return x*(x>0)+.1*x;}
|
||||
static inline float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);}
|
||||
|
||||
static inline float linear_gradient(float x){return 1;}
|
||||
static inline float sigmoid_gradient(float x){return (1-x)*x;}
|
||||
static inline float logistic_gradient(float x){return (1-x)*x;}
|
||||
static inline float relu_gradient(float x){return (x>0);}
|
||||
static inline float ramp_gradient(float x){return (x>0)+.1;}
|
||||
static inline float tanh_gradient(float x){return 1-x*x;}
|
||||
|
@ -53,7 +53,7 @@ void forward_detection_layer(const detection_layer layer, float *in, float *trut
|
||||
layer.output[out_i++] = scale*in[in_i++];
|
||||
}
|
||||
softmax_array(layer.output + out_i - layer.classes, layer.classes, layer.output + out_i - layer.classes);
|
||||
activate_array(in+in_i, layer.coords, SIGMOID);
|
||||
activate_array(in+in_i, layer.coords, LOGISTIC);
|
||||
for(j = 0; j < layer.coords; ++j){
|
||||
layer.output[out_i++] = mask*in[in_i++];
|
||||
}
|
||||
@ -75,7 +75,7 @@ void backward_detection_layer(const detection_layer layer, float *in, float *del
|
||||
delta[in_i++] = scale*layer.delta[out_i++];
|
||||
}
|
||||
|
||||
gradient_array(layer.output + out_i, layer.coords, SIGMOID, layer.delta + out_i);
|
||||
gradient_array(layer.output + out_i, layer.coords, LOGISTIC, layer.delta + out_i);
|
||||
for(j = 0; j < layer.coords; ++j){
|
||||
delta[in_i++] = layer.delta[out_i++];
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ deconvolutional_layer *parse_deconvolutional(list *options, network *net, int co
|
||||
int n = option_find_int(options, "filters",1);
|
||||
int size = option_find_int(options, "size",1);
|
||||
int stride = option_find_int(options, "stride",1);
|
||||
char *activation_s = option_find_str(options, "activation", "sigmoid");
|
||||
char *activation_s = option_find_str(options, "activation", "logistic");
|
||||
ACTIVATION activation = get_activation(activation_s);
|
||||
if(count == 0){
|
||||
learning_rate = option_find_float(options, "learning_rate", .001);
|
||||
@ -120,7 +120,7 @@ convolutional_layer *parse_convolutional(list *options, network *net, int count)
|
||||
int size = option_find_int(options, "size",1);
|
||||
int stride = option_find_int(options, "stride",1);
|
||||
int pad = option_find_int(options, "pad",0);
|
||||
char *activation_s = option_find_str(options, "activation", "sigmoid");
|
||||
char *activation_s = option_find_str(options, "activation", "logistic");
|
||||
ACTIVATION activation = get_activation(activation_s);
|
||||
if(count == 0){
|
||||
learning_rate = option_find_float(options, "learning_rate", .001);
|
||||
@ -161,7 +161,7 @@ connected_layer *parse_connected(list *options, network *net, int count)
|
||||
int input;
|
||||
float learning_rate, momentum, decay;
|
||||
int output = option_find_int(options, "output",1);
|
||||
char *activation_s = option_find_str(options, "activation", "sigmoid");
|
||||
char *activation_s = option_find_str(options, "activation", "logistic");
|
||||
ACTIVATION activation = get_activation(activation_s);
|
||||
if(count == 0){
|
||||
input = option_find_int(options, "input",1);
|
||||
|
Loading…
Reference in New Issue
Block a user