1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

dnn_backend_native_layer_mathunary: add floor support

It can be tested with the model generated with below python script:

import tensorflow as tf
import os
import numpy as np
import imageio
from tensorflow.python.framework import graph_util
name = 'floor'

pb_file_path = os.getcwd()
if not os.path.exists(pb_file_path+'/{}_savemodel/'.format(name)):
    os.mkdir(pb_file_path+'/{}_savemodel/'.format(name))

with tf.Session(graph=tf.Graph()) as sess:
    in_img = imageio.imread('detection.jpg')
    in_img = in_img.astype(np.float32)
    in_data = in_img[np.newaxis, :]
    input_x = tf.placeholder(tf.float32, shape=[1, None, None, 3], name='dnn_in')
    y_ = tf.math.floor(input_x*255)/255
    y = tf.identity(y_, name='dnn_out')
    sess.run(tf.global_variables_initializer())
    constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ['dnn_out'])

    with tf.gfile.FastGFile(pb_file_path+'/{}_savemodel/model.pb'.format(name), mode='wb') as f:
        f.write(constant_graph.SerializeToString())

    print("model.pb generated, please in ffmpeg path use\n \n \
    python tools/python/convert.py {}_savemodel/model.pb --outdir={}_savemodel/ \n \nto generate model.model\n".format(name,name))

    output = sess.run(y, feed_dict={ input_x: in_data})
    imageio.imsave("out.jpg", np.squeeze(output))

    print("To verify, please ffmpeg path use\n \n \
    ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model={}_savemodel/model.pb:input=dnn_in:output=dnn_out:dnn_backend=tensorflow -f framemd5 {}_savemodel/tensorflow_out.md5\n  \
    or\n \
    ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model={}_savemodel/model.pb:input=dnn_in:output=dnn_out:dnn_backend=tensorflow {}_savemodel/out_tensorflow.jpg\n \nto generate output result of tensorflow model\n".format(name, name, name, name))

    print("To verify, please ffmpeg path use\n \n \
    ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model={}_savemodel/model.model:input=dnn_in:output=dnn_out:dnn_backend=native -f framemd5 {}_savemodel/native_out.md5\n  \
    or \n \
    ./ffmpeg -i detection.jpg -vf format=rgb24,dnn_processing=model={}_savemodel/model.model:input=dnn_in:output=dnn_out:dnn_backend=native {}_savemodel/out_native.jpg\n \nto generate output result of native model\n".format(name, name, name, name))

Signed-off-by: Mingyu Yin <mingyu.yin@intel.com>
This commit is contained in:
Mingyu Yin 2020-08-06 14:47:16 +08:00 committed by Guo, Yejun
parent 7031a7beae
commit fab00b0ae0
5 changed files with 11 additions and 2 deletions

View File

@ -134,6 +134,10 @@ int dnn_execute_layer_math_unary(DnnOperand *operands, const int32_t *input_oper
for (int i = 0; i < dims_count; ++i)
dst[i] = ceil(src[i]);
return 0;
case DMUO_FLOOR:
for (int i = 0; i < dims_count; ++i)
dst[i] = floor(src[i]);
return 0;
default:
return -1;
}

View File

@ -44,6 +44,7 @@ typedef enum {
DMUO_ACOSH = 11,
DMUO_ATANH = 12,
DMUO_CEIL = 13,
DMUO_FLOOR = 14,
DMUO_COUNT
} DNNMathUnaryOperation;

View File

@ -58,6 +58,8 @@ static float get_expected(float f, DNNMathUnaryOperation op)
return atanh(f);
case DMUO_CEIL:
return ceil(f);
case DMUO_FLOOR:
return floor(f);
default:
av_assert0(!"not supported yet");
return 0.f;
@ -132,5 +134,7 @@ int main(int agrc, char **argv)
return 1;
if (test(DMUO_CEIL))
return 1;
if (test(DMUO_FLOOR))
return 1;
return 0;
}

View File

@ -74,7 +74,7 @@ class TFConverter:
self.mathbin2code = {'Sub':0, 'Add':1, 'Mul':2, 'RealDiv':3, 'Minimum':4}
self.mathun2code = {'Abs':0, 'Sin':1, 'Cos':2, 'Tan':3, 'Asin':4,
'Acos':5, 'Atan':6, 'Sinh':7, 'Cosh':8, 'Tanh':9, 'Asinh':10,
'Acosh':11, 'Atanh':12, 'Ceil':13}
'Acosh':11, 'Atanh':12, 'Ceil':13, 'Floor':14}
self.mirrorpad_mode = {'CONSTANT':0, 'REFLECT':1, 'SYMMETRIC':2}
self.name_operand_dict = {}

View File

@ -23,4 +23,4 @@ str = 'FFMPEGDNNNATIVE'
major = 1
# increase minor when we don't have to re-convert the model file
minor = 19
minor = 20