1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-02 03:06:28 +02:00
FFmpeg/tests/dnn/dnn-layer-mathunary-test.c

145 lines
3.7 KiB
C
Raw Normal View History

/*
* Copyright (c) 2020
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "libavfilter/dnn/dnn_backend_native_layer_mathunary.h"
#include "libavutil/avassert.h"
#define EPS 0.00001
static float get_expected(float f, DNNMathUnaryOperation op)
{
switch (op)
{
case DMUO_ABS:
return (f >= 0) ? f : -f;
case DMUO_SIN:
return sin(f);
case DMUO_COS:
return cos(f);
case DMUO_TAN:
return tan(f);
case DMUO_ASIN:
return asin(f);
case DMUO_ACOS:
return acos(f);
case DMUO_ATAN:
return atan(f);
case DMUO_SINH:
return sinh(f);
case DMUO_COSH:
return cosh(f);
case DMUO_TANH:
return tanh(f);
case DMUO_ASINH:
return asinh(f);
case DMUO_ACOSH:
return acosh(f);
case DMUO_ATANH:
return atanh(f);
dnn_backend_native_layer_mathunary: add ceil support It can be tested with the model generated with below python script: import tensorflow as tf import os import numpy as np import imageio from tensorflow.python.framework import graph_util name = 'ceil' pb_file_path = os.getcwd() if not os.path.exists(pb_file_path+'/{}_savemodel/'.format(name)): os.mkdir(pb_file_path+'/{}_savemodel/'.format(name)) with tf.Session(graph=tf.Graph()) as sess: in_img = imageio.imread('detection.jpg') in_img = in_img.astype(np.float32) in_data = in_img[np.newaxis, :] input_x = tf.placeholder(tf.float32, shape=[1, None, None, 3], name='dnn_in') y = tf.math.ceil( input_x, name='dnn_out') sess.run(tf.global_variables_initializer()) constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ['dnn_out']) with tf.gfile.FastGFile(pb_file_path+'/{}_savemodel/model.pb'.format(name), mode='wb') as f: f.write(constant_graph.SerializeToString()) print("model.pb generated, please in ffmpeg path use\n \n \ python tools/python/convert.py ceil_savemodel/model.pb --outdir=ceil_savemodel/ \n \n \ to generate model.model\n") output = sess.run(y, feed_dict={ input_x: in_data}) imageio.imsave("out.jpg", np.squeeze(output)) print("To verify, please ffmpeg path use\n \n \ ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model=ceil_savemodel/model.pb:input=dnn_in:output=dnn_out:dnn_backend=tensorflow -f framemd5 ceil_savemodel/tensorflow_out.md5\n \n \ to generate output result of tensorflow model\n") print("To verify, please ffmpeg path use\n \n \ ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model=ceil_savemodel/model.model:input=dnn_in:output=dnn_out:dnn_backend=native -f framemd5 ceil_savemodel/native_out.md5\n \n \ to generate output result of native model\n") Signed-off-by: Mingyu Yin <mingyu.yin@intel.com> Reviewed-by: Guo, Yejun <yejun.guo@intel.com>
2020-07-31 09:41:24 +02:00
case DMUO_CEIL:
return ceil(f);
dnn_backend_native_layer_mathunary: add floor support It can be tested with the model generated with below python script: import tensorflow as tf import os import numpy as np import imageio from tensorflow.python.framework import graph_util name = 'floor' pb_file_path = os.getcwd() if not os.path.exists(pb_file_path+'/{}_savemodel/'.format(name)): os.mkdir(pb_file_path+'/{}_savemodel/'.format(name)) with tf.Session(graph=tf.Graph()) as sess: in_img = imageio.imread('detection.jpg') in_img = in_img.astype(np.float32) in_data = in_img[np.newaxis, :] input_x = tf.placeholder(tf.float32, shape=[1, None, None, 3], name='dnn_in') y_ = tf.math.floor(input_x*255)/255 y = tf.identity(y_, name='dnn_out') sess.run(tf.global_variables_initializer()) constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ['dnn_out']) with tf.gfile.FastGFile(pb_file_path+'/{}_savemodel/model.pb'.format(name), mode='wb') as f: f.write(constant_graph.SerializeToString()) print("model.pb generated, please in ffmpeg path use\n \n \ python tools/python/convert.py {}_savemodel/model.pb --outdir={}_savemodel/ \n \nto generate model.model\n".format(name,name)) output = sess.run(y, feed_dict={ input_x: in_data}) imageio.imsave("out.jpg", np.squeeze(output)) print("To verify, please ffmpeg path use\n \n \ ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model={}_savemodel/model.pb:input=dnn_in:output=dnn_out:dnn_backend=tensorflow -f framemd5 {}_savemodel/tensorflow_out.md5\n \ or\n \ ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model={}_savemodel/model.pb:input=dnn_in:output=dnn_out:dnn_backend=tensorflow {}_savemodel/out_tensorflow.jpg\n \nto generate output result of tensorflow model\n".format(name, name, name, name)) print("To verify, please ffmpeg path use\n \n \ ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model={}_savemodel/model.model:input=dnn_in:output=dnn_out:dnn_backend=native -f framemd5 {}_savemodel/native_out.md5\n \ or \n \ ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model={}_savemodel/model.model:input=dnn_in:output=dnn_out:dnn_backend=native {}_savemodel/out_native.jpg\n \nto generate output result of native model\n".format(name, name, name, name)) Signed-off-by: Mingyu Yin <mingyu.yin@intel.com>
2020-08-06 08:47:16 +02:00
case DMUO_FLOOR:
return floor(f);
case DMUO_ROUND:
return round(f);
default:
av_assert0(!"not supported yet");
return 0.f;
}
}
static int test(DNNMathUnaryOperation op)
{
DnnLayerMathUnaryParams params;
DnnOperand operands[2];
int32_t input_indexes[1];
float input[1*1*3*3] = {
0.1, 0.5, 0.75, -3, 2.5, 2, -2.1, 7.8, 100};
float *output;
params.un_op = op;
operands[0].data = input;
operands[0].dims[0] = 1;
operands[0].dims[1] = 1;
operands[0].dims[2] = 3;
operands[0].dims[3] = 3;
operands[1].data = NULL;
input_indexes[0] = 0;
dnn_execute_layer_math_unary(operands, input_indexes, 1, &params, NULL);
output = operands[1].data;
for (int i = 0; i < sizeof(input) / sizeof(float); ++i) {
float expected_output = get_expected(input[i], op);
int output_nan = isnan(output[i]);
int expected_nan = isnan(expected_output);
if ((!output_nan && !expected_nan && fabs(output[i] - expected_output) > EPS) ||
(output_nan && !expected_nan) || (!output_nan && expected_nan)) {
printf("at index %d, output: %f, expected_output: %f\n", i, output[i], expected_output);
av_freep(&output);
return 1;
}
}
av_freep(&output);
return 0;
}
int main(int agrc, char **argv)
{
if (test(DMUO_ABS))
return 1;
if (test(DMUO_SIN))
return 1;
if (test(DMUO_COS))
return 1;
if (test(DMUO_TAN))
return 1;
if (test(DMUO_ASIN))
return 1;
if (test(DMUO_ACOS))
return 1;
if (test(DMUO_ATAN))
return 1;
if (test(DMUO_SINH))
return 1;
if (test(DMUO_COSH))
return 1;
if (test(DMUO_TANH))
return 1;
if (test(DMUO_ASINH))
return 1;
if (test(DMUO_ACOSH))
return 1;
if (test(DMUO_ATANH))
return 1;
dnn_backend_native_layer_mathunary: add ceil support It can be tested with the model generated with below python script: import tensorflow as tf import os import numpy as np import imageio from tensorflow.python.framework import graph_util name = 'ceil' pb_file_path = os.getcwd() if not os.path.exists(pb_file_path+'/{}_savemodel/'.format(name)): os.mkdir(pb_file_path+'/{}_savemodel/'.format(name)) with tf.Session(graph=tf.Graph()) as sess: in_img = imageio.imread('detection.jpg') in_img = in_img.astype(np.float32) in_data = in_img[np.newaxis, :] input_x = tf.placeholder(tf.float32, shape=[1, None, None, 3], name='dnn_in') y = tf.math.ceil( input_x, name='dnn_out') sess.run(tf.global_variables_initializer()) constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ['dnn_out']) with tf.gfile.FastGFile(pb_file_path+'/{}_savemodel/model.pb'.format(name), mode='wb') as f: f.write(constant_graph.SerializeToString()) print("model.pb generated, please in ffmpeg path use\n \n \ python tools/python/convert.py ceil_savemodel/model.pb --outdir=ceil_savemodel/ \n \n \ to generate model.model\n") output = sess.run(y, feed_dict={ input_x: in_data}) imageio.imsave("out.jpg", np.squeeze(output)) print("To verify, please ffmpeg path use\n \n \ ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model=ceil_savemodel/model.pb:input=dnn_in:output=dnn_out:dnn_backend=tensorflow -f framemd5 ceil_savemodel/tensorflow_out.md5\n \n \ to generate output result of tensorflow model\n") print("To verify, please ffmpeg path use\n \n \ ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model=ceil_savemodel/model.model:input=dnn_in:output=dnn_out:dnn_backend=native -f framemd5 ceil_savemodel/native_out.md5\n \n \ to generate output result of native model\n") Signed-off-by: Mingyu Yin <mingyu.yin@intel.com> Reviewed-by: Guo, Yejun <yejun.guo@intel.com>
2020-07-31 09:41:24 +02:00
if (test(DMUO_CEIL))
return 1;
dnn_backend_native_layer_mathunary: add floor support It can be tested with the model generated with below python script: import tensorflow as tf import os import numpy as np import imageio from tensorflow.python.framework import graph_util name = 'floor' pb_file_path = os.getcwd() if not os.path.exists(pb_file_path+'/{}_savemodel/'.format(name)): os.mkdir(pb_file_path+'/{}_savemodel/'.format(name)) with tf.Session(graph=tf.Graph()) as sess: in_img = imageio.imread('detection.jpg') in_img = in_img.astype(np.float32) in_data = in_img[np.newaxis, :] input_x = tf.placeholder(tf.float32, shape=[1, None, None, 3], name='dnn_in') y_ = tf.math.floor(input_x*255)/255 y = tf.identity(y_, name='dnn_out') sess.run(tf.global_variables_initializer()) constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ['dnn_out']) with tf.gfile.FastGFile(pb_file_path+'/{}_savemodel/model.pb'.format(name), mode='wb') as f: f.write(constant_graph.SerializeToString()) print("model.pb generated, please in ffmpeg path use\n \n \ python tools/python/convert.py {}_savemodel/model.pb --outdir={}_savemodel/ \n \nto generate model.model\n".format(name,name)) output = sess.run(y, feed_dict={ input_x: in_data}) imageio.imsave("out.jpg", np.squeeze(output)) print("To verify, please ffmpeg path use\n \n \ ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model={}_savemodel/model.pb:input=dnn_in:output=dnn_out:dnn_backend=tensorflow -f framemd5 {}_savemodel/tensorflow_out.md5\n \ or\n \ ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model={}_savemodel/model.pb:input=dnn_in:output=dnn_out:dnn_backend=tensorflow {}_savemodel/out_tensorflow.jpg\n \nto generate output result of tensorflow model\n".format(name, name, name, name)) print("To verify, please ffmpeg path use\n \n \ ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model={}_savemodel/model.model:input=dnn_in:output=dnn_out:dnn_backend=native -f framemd5 {}_savemodel/native_out.md5\n \ or \n \ ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model={}_savemodel/model.model:input=dnn_in:output=dnn_out:dnn_backend=native {}_savemodel/out_native.jpg\n \nto generate output result of native model\n".format(name, name, name, name)) Signed-off-by: Mingyu Yin <mingyu.yin@intel.com>
2020-08-06 08:47:16 +02:00
if (test(DMUO_FLOOR))
return 1;
if (test(DMUO_ROUND))
return 1;
return 0;
}