2019-09-02 06:35:58 +02:00
|
|
|
# Copyright (c) 2019
|
|
|
|
#
|
|
|
|
# This file is part of FFmpeg.
|
|
|
|
#
|
|
|
|
# FFmpeg is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU Lesser General Public
|
|
|
|
# License as published by the Free Software Foundation; either
|
|
|
|
# version 2.1 of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# FFmpeg is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
# Lesser General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU Lesser General Public
|
|
|
|
# License along with FFmpeg; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
# ==============================================================================
|
|
|
|
|
|
|
|
str = 'FFMPEGDNNNATIVE'
|
|
|
|
|
|
|
|
# increase major and reset minor when we have to re-convert the model file
|
2019-10-21 14:38:03 +02:00
|
|
|
major = 1
|
2019-09-02 06:35:58 +02:00
|
|
|
|
|
|
|
# increase minor when we don't have to re-convert the model file
|
dnn_backend_native_layer_mathunary: add atanh support
It can be tested with the model generated with below python script:
import tensorflow as tf
import numpy as np
import imageio
in_img = imageio.imread('input.jpeg')
in_img = in_img.astype(np.float32)/255.0
in_data = in_img[np.newaxis, :]
x = tf.placeholder(tf.float32, shape=[1, None, None, 3], name='dnn_in')
please uncomment the part you want to test
x_sinh_1 = tf.sinh(x)
x_out = tf.divide(x_sinh_1, 1.176) # sinh(1.0)
x_cosh_1 = tf.cosh(x)
x_out = tf.divide(x_cosh_1, 1.55) # cosh(1.0)
x_tanh_1 = tf.tanh(x)
x__out = tf.divide(x_tanh_1, 0.77) # tanh(1.0)
x_asinh_1 = tf.asinh(x)
x_out = tf.divide(x_asinh_1, 0.89) # asinh(1.0/1.1)
x_acosh_1 = tf.add(x, 1.1)
x_acosh_2 = tf.acosh(x_acosh_1) # accept (1, inf)
x_out = tf.divide(x_acosh_2, 1.4) # acosh(2.1)
x_atanh_1 = tf.divide(x, 1.1)
x_atanh_2 = tf.atanh(x_atanh_1) # accept (-1, 1)
x_out = tf.divide(x_atanh_2, 1.55) # atanhh(1.0/1.1)
y = tf.identity(x_out, name='dnn_out') #please only preserve the x_out you want to test
sess=tf.Session()
sess.run(tf.global_variables_initializer())
graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ['dnn_out'])
tf.train.write_graph(graph_def, '.', 'image_process.pb', as_text=False)
print("image_process.pb generated, please use \
path_to_ffmpeg/tools/python/convert.py to generate image_process.model\n")
output = sess.run(y, feed_dict={x: in_data})
imageio.imsave("out.jpg", np.squeeze(output))
Signed-off-by: Ting Fu <ting.fu@intel.com>
2020-06-29 16:54:10 +02:00
|
|
|
minor = 18
|