1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-08-10 06:10:52 +02:00

dnn/native: rename struct ConvolutionalNetwork to NativeModel

Signed-off-by: Ting Fu <ting.fu@intel.com>
Reviewed-by: Guo, Yejun <yejun.guo@intel.com>
This commit is contained in:
Ting Fu
2020-08-19 21:43:13 +08:00
committed by Guo, Yejun
parent b2266961c0
commit a6e830ae7f
3 changed files with 70 additions and 70 deletions

View File

@@ -30,10 +30,10 @@
static DNNReturnType get_input_native(void *model, DNNData *input, const char *input_name) static DNNReturnType get_input_native(void *model, DNNData *input, const char *input_name)
{ {
ConvolutionalNetwork *network = (ConvolutionalNetwork *)model; NativeModel *native_model = (NativeModel *)model;
for (int i = 0; i < network->operands_num; ++i) { for (int i = 0; i < native_model->operands_num; ++i) {
DnnOperand *oprd = &network->operands[i]; DnnOperand *oprd = &native_model->operands[i];
if (strcmp(oprd->name, input_name) == 0) { if (strcmp(oprd->name, input_name) == 0) {
if (oprd->type != DOT_INPUT) if (oprd->type != DOT_INPUT)
return DNN_ERROR; return DNN_ERROR;
@@ -52,15 +52,15 @@ static DNNReturnType get_input_native(void *model, DNNData *input, const char *i
static DNNReturnType set_input_output_native(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output) static DNNReturnType set_input_output_native(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output)
{ {
ConvolutionalNetwork *network = (ConvolutionalNetwork *)model; NativeModel *native_model = (NativeModel *)model;
DnnOperand *oprd = NULL; DnnOperand *oprd = NULL;
if (network->layers_num <= 0 || network->operands_num <= 0) if (native_model->layers_num <= 0 || native_model->operands_num <= 0)
return DNN_ERROR; return DNN_ERROR;
/* inputs */ /* inputs */
for (int i = 0; i < network->operands_num; ++i) { for (int i = 0; i < native_model->operands_num; ++i) {
oprd = &network->operands[i]; oprd = &native_model->operands[i];
if (strcmp(oprd->name, input_name) == 0) { if (strcmp(oprd->name, input_name) == 0) {
if (oprd->type != DOT_INPUT) if (oprd->type != DOT_INPUT)
return DNN_ERROR; return DNN_ERROR;
@@ -88,24 +88,24 @@ static DNNReturnType set_input_output_native(void *model, DNNData *input, const
input->data = oprd->data; input->data = oprd->data;
/* outputs */ /* outputs */
network->nb_output = 0; native_model->nb_output = 0;
av_freep(&network->output_indexes); av_freep(&native_model->output_indexes);
network->output_indexes = av_mallocz_array(nb_output, sizeof(*network->output_indexes)); native_model->output_indexes = av_mallocz_array(nb_output, sizeof(*native_model->output_indexes));
if (!network->output_indexes) if (!native_model->output_indexes)
return DNN_ERROR; return DNN_ERROR;
for (uint32_t i = 0; i < nb_output; ++i) { for (uint32_t i = 0; i < nb_output; ++i) {
const char *output_name = output_names[i]; const char *output_name = output_names[i];
for (int j = 0; j < network->operands_num; ++j) { for (int j = 0; j < native_model->operands_num; ++j) {
oprd = &network->operands[j]; oprd = &native_model->operands[j];
if (strcmp(oprd->name, output_name) == 0) { if (strcmp(oprd->name, output_name) == 0) {
network->output_indexes[network->nb_output++] = j; native_model->output_indexes[native_model->nb_output++] = j;
break; break;
} }
} }
} }
if (network->nb_output != nb_output) if (native_model->nb_output != nb_output)
return DNN_ERROR; return DNN_ERROR;
return DNN_SUCCESS; return DNN_SUCCESS;
@@ -122,7 +122,7 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *optio
char *buf; char *buf;
size_t size; size_t size;
int version, header_size, major_version_expected = 1; int version, header_size, major_version_expected = 1;
ConvolutionalNetwork *network = NULL; NativeModel *native_model = NULL;
AVIOContext *model_file_context; AVIOContext *model_file_context;
int file_size, dnn_size, parsed_size; int file_size, dnn_size, parsed_size;
int32_t layer; int32_t layer;
@@ -167,29 +167,29 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *optio
dnn_size += 4; dnn_size += 4;
header_size = dnn_size; header_size = dnn_size;
network = av_mallocz(sizeof(ConvolutionalNetwork)); native_model = av_mallocz(sizeof(NativeModel));
if (!network){ if (!native_model){
goto fail; goto fail;
} }
model->model = (void *)network; model->model = (void *)native_model;
avio_seek(model_file_context, file_size - 8, SEEK_SET); avio_seek(model_file_context, file_size - 8, SEEK_SET);
network->layers_num = (int32_t)avio_rl32(model_file_context); native_model->layers_num = (int32_t)avio_rl32(model_file_context);
network->operands_num = (int32_t)avio_rl32(model_file_context); native_model->operands_num = (int32_t)avio_rl32(model_file_context);
dnn_size += 8; dnn_size += 8;
avio_seek(model_file_context, header_size, SEEK_SET); avio_seek(model_file_context, header_size, SEEK_SET);
network->layers = av_mallocz(network->layers_num * sizeof(Layer)); native_model->layers = av_mallocz(native_model->layers_num * sizeof(Layer));
if (!network->layers){ if (!native_model->layers){
goto fail; goto fail;
} }
network->operands = av_mallocz(network->operands_num * sizeof(DnnOperand)); native_model->operands = av_mallocz(native_model->operands_num * sizeof(DnnOperand));
if (!network->operands){ if (!native_model->operands){
goto fail; goto fail;
} }
for (layer = 0; layer < network->layers_num; ++layer){ for (layer = 0; layer < native_model->layers_num; ++layer){
layer_type = (int32_t)avio_rl32(model_file_context); layer_type = (int32_t)avio_rl32(model_file_context);
dnn_size += 4; dnn_size += 4;
@@ -197,25 +197,25 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *optio
goto fail; goto fail;
} }
network->layers[layer].type = layer_type; native_model->layers[layer].type = layer_type;
parsed_size = layer_funcs[layer_type].pf_load(&network->layers[layer], model_file_context, file_size, network->operands_num); parsed_size = layer_funcs[layer_type].pf_load(&native_model->layers[layer], model_file_context, file_size, native_model->operands_num);
if (!parsed_size) { if (!parsed_size) {
goto fail; goto fail;
} }
dnn_size += parsed_size; dnn_size += parsed_size;
} }
for (int32_t i = 0; i < network->operands_num; ++i){ for (int32_t i = 0; i < native_model->operands_num; ++i){
DnnOperand *oprd; DnnOperand *oprd;
int32_t name_len; int32_t name_len;
int32_t operand_index = (int32_t)avio_rl32(model_file_context); int32_t operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 4; dnn_size += 4;
if (operand_index >= network->operands_num) { if (operand_index >= native_model->operands_num) {
goto fail; goto fail;
} }
oprd = &network->operands[operand_index]; oprd = &native_model->operands[operand_index];
name_len = (int32_t)avio_rl32(model_file_context); name_len = (int32_t)avio_rl32(model_file_context);
dnn_size += 4; dnn_size += 4;
@@ -257,25 +257,25 @@ fail:
DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, uint32_t nb_output) DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
{ {
ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model; NativeModel *native_model = (NativeModel *)model->model;
int32_t layer; int32_t layer;
uint32_t nb = FFMIN(nb_output, network->nb_output); uint32_t nb = FFMIN(nb_output, native_model->nb_output);
if (network->layers_num <= 0 || network->operands_num <= 0) if (native_model->layers_num <= 0 || native_model->operands_num <= 0)
return DNN_ERROR; return DNN_ERROR;
if (!network->operands[0].data) if (!native_model->operands[0].data)
return DNN_ERROR; return DNN_ERROR;
for (layer = 0; layer < network->layers_num; ++layer){ for (layer = 0; layer < native_model->layers_num; ++layer){
DNNLayerType layer_type = network->layers[layer].type; DNNLayerType layer_type = native_model->layers[layer].type;
layer_funcs[layer_type].pf_exec(network->operands, layer_funcs[layer_type].pf_exec(native_model->operands,
network->layers[layer].input_operand_indexes, native_model->layers[layer].input_operand_indexes,
network->layers[layer].output_operand_index, native_model->layers[layer].output_operand_index,
network->layers[layer].params); native_model->layers[layer].params);
} }
for (uint32_t i = 0; i < nb; ++i) { for (uint32_t i = 0; i < nb; ++i) {
DnnOperand *oprd = &network->operands[network->output_indexes[i]]; DnnOperand *oprd = &native_model->operands[native_model->output_indexes[i]];
outputs[i].data = oprd->data; outputs[i].data = oprd->data;
outputs[i].height = oprd->dims[1]; outputs[i].height = oprd->dims[1];
outputs[i].width = oprd->dims[2]; outputs[i].width = oprd->dims[2];
@@ -309,34 +309,34 @@ int32_t calculate_operand_data_length(const DnnOperand* oprd)
void ff_dnn_free_model_native(DNNModel **model) void ff_dnn_free_model_native(DNNModel **model)
{ {
ConvolutionalNetwork *network; NativeModel *native_model;
ConvolutionalParams *conv_params; ConvolutionalParams *conv_params;
int32_t layer; int32_t layer;
if (*model) if (*model)
{ {
if ((*model)->model) { if ((*model)->model) {
network = (ConvolutionalNetwork *)(*model)->model; native_model = (NativeModel *)(*model)->model;
if (network->layers) { if (native_model->layers) {
for (layer = 0; layer < network->layers_num; ++layer){ for (layer = 0; layer < native_model->layers_num; ++layer){
if (network->layers[layer].type == DLT_CONV2D){ if (native_model->layers[layer].type == DLT_CONV2D){
conv_params = (ConvolutionalParams *)network->layers[layer].params; conv_params = (ConvolutionalParams *)native_model->layers[layer].params;
av_freep(&conv_params->kernel); av_freep(&conv_params->kernel);
av_freep(&conv_params->biases); av_freep(&conv_params->biases);
} }
av_freep(&network->layers[layer].params); av_freep(&native_model->layers[layer].params);
} }
av_freep(&network->layers); av_freep(&native_model->layers);
} }
if (network->operands) { if (native_model->operands) {
for (uint32_t operand = 0; operand < network->operands_num; ++operand) for (uint32_t operand = 0; operand < native_model->operands_num; ++operand)
av_freep(&network->operands[operand].data); av_freep(&native_model->operands[operand].data);
av_freep(&network->operands); av_freep(&native_model->operands);
} }
av_freep(&network->output_indexes); av_freep(&native_model->output_indexes);
av_freep(&network); av_freep(&native_model);
} }
av_freep(model); av_freep(model);
} }

View File

@@ -107,14 +107,14 @@ typedef struct InputParams{
} InputParams; } InputParams;
// Represents simple feed-forward convolutional network. // Represents simple feed-forward convolutional network.
typedef struct ConvolutionalNetwork{ typedef struct NativeModel{
Layer *layers; Layer *layers;
int32_t layers_num; int32_t layers_num;
DnnOperand *operands; DnnOperand *operands;
int32_t operands_num; int32_t operands_num;
int32_t *output_indexes; int32_t *output_indexes;
uint32_t nb_output; uint32_t nb_output;
} ConvolutionalNetwork; } NativeModel;
DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *options); DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *options);

View File

@@ -487,15 +487,15 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
int64_t transpose_perm_shape[] = {4}; int64_t transpose_perm_shape[] = {4};
int64_t input_shape[] = {1, -1, -1, -1}; int64_t input_shape[] = {1, -1, -1, -1};
DNNReturnType layer_add_res; DNNReturnType layer_add_res;
DNNModel *native_model = NULL; DNNModel *model = NULL;
ConvolutionalNetwork *conv_network; NativeModel *native_model;
native_model = ff_dnn_load_model_native(model_filename, NULL); model = ff_dnn_load_model_native(model_filename, NULL);
if (!native_model){ if (!model){
return DNN_ERROR; return DNN_ERROR;
} }
conv_network = (ConvolutionalNetwork *)native_model->model; native_model = (NativeModel *)model->model;
tf_model->graph = TF_NewGraph(); tf_model->graph = TF_NewGraph();
tf_model->status = TF_NewStatus(); tf_model->status = TF_NewStatus();
@@ -528,26 +528,26 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
} }
transpose_op = TF_FinishOperation(op_desc, tf_model->status); transpose_op = TF_FinishOperation(op_desc, tf_model->status);
for (layer = 0; layer < conv_network->layers_num; ++layer){ for (layer = 0; layer < native_model->layers_num; ++layer){
switch (conv_network->layers[layer].type){ switch (native_model->layers[layer].type){
case DLT_INPUT: case DLT_INPUT:
layer_add_res = DNN_SUCCESS; layer_add_res = DNN_SUCCESS;
break; break;
case DLT_CONV2D: case DLT_CONV2D:
layer_add_res = add_conv_layer(tf_model, transpose_op, &op, layer_add_res = add_conv_layer(tf_model, transpose_op, &op,
(ConvolutionalParams *)conv_network->layers[layer].params, layer); (ConvolutionalParams *)native_model->layers[layer].params, layer);
break; break;
case DLT_DEPTH_TO_SPACE: case DLT_DEPTH_TO_SPACE:
layer_add_res = add_depth_to_space_layer(tf_model, &op, layer_add_res = add_depth_to_space_layer(tf_model, &op,
(DepthToSpaceParams *)conv_network->layers[layer].params, layer); (DepthToSpaceParams *)native_model->layers[layer].params, layer);
break; break;
case DLT_MIRROR_PAD: case DLT_MIRROR_PAD:
layer_add_res = add_pad_layer(tf_model, &op, layer_add_res = add_pad_layer(tf_model, &op,
(LayerPadParams *)conv_network->layers[layer].params, layer); (LayerPadParams *)native_model->layers[layer].params, layer);
break; break;
case DLT_MAXIMUM: case DLT_MAXIMUM:
layer_add_res = add_maximum_layer(tf_model, &op, layer_add_res = add_maximum_layer(tf_model, &op,
(DnnLayerMaximumParams *)conv_network->layers[layer].params, layer); (DnnLayerMaximumParams *)native_model->layers[layer].params, layer);
break; break;
default: default:
CLEANUP_ON_ERROR(tf_model); CLEANUP_ON_ERROR(tf_model);
@@ -567,7 +567,7 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
CLEANUP_ON_ERROR(tf_model); CLEANUP_ON_ERROR(tf_model);
} }
ff_dnn_free_model_native(&native_model); ff_dnn_free_model_native(&model);
return DNN_SUCCESS; return DNN_SUCCESS;
} }