1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

replace all __volatile__ by volatile

__volatile__ can cause problems with some compilers and volatile is a standard keyword.

Found-by: Reimar Döffinger <Reimar.Doeffinger@gmx.de>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-07-11 03:50:34 +02:00
parent b8a90976da
commit dd0a9b78db
11 changed files with 36 additions and 36 deletions

View File

@ -75,7 +75,7 @@ static void ff_acelp_interpolatef_mips(float *out, const float *in,
float v = 0; float v = 0;
for (i = 0; i < filter_length;i++) { for (i = 0; i < filter_length;i++) {
__asm__ __volatile__ ( __asm__ volatile (
"lwc1 %[in_val_p], 0(%[p_in_p]) \n\t" "lwc1 %[in_val_p], 0(%[p_in_p]) \n\t"
"lwc1 %[fc_val_p], 0(%[p_filter_coeffs_p]) \n\t" "lwc1 %[fc_val_p], 0(%[p_filter_coeffs_p]) \n\t"
"lwc1 %[in_val_m], 0(%[p_in_m]) \n\t" "lwc1 %[in_val_m], 0(%[p_in_m]) \n\t"
@ -108,7 +108,7 @@ static void ff_acelp_apply_order_2_transfer_function_mips(float *out, const floa
* loop is unrolled eight times * loop is unrolled eight times
*/ */
__asm__ __volatile__ ( __asm__ volatile (
"lwc1 $f0, 0(%[mem]) \n\t" "lwc1 $f0, 0(%[mem]) \n\t"
"blez %[n], ff_acelp_apply_order_2_transfer_function_end%= \n\t" "blez %[n], ff_acelp_apply_order_2_transfer_function_end%= \n\t"
"lwc1 $f1, 4(%[mem]) \n\t" "lwc1 $f1, 4(%[mem]) \n\t"

View File

@ -61,7 +61,7 @@ static void ff_weighted_vector_sumf_mips(
const float *a_end = in_a + length; const float *a_end = in_a + length;
/* loop unrolled two times */ /* loop unrolled two times */
__asm__ __volatile__ ( __asm__ volatile (
"blez %[length], ff_weighted_vector_sumf_end%= \n\t" "blez %[length], ff_weighted_vector_sumf_end%= \n\t"
"ff_weighted_vector_sumf_madd%=: \n\t" "ff_weighted_vector_sumf_madd%=: \n\t"

View File

@ -70,7 +70,7 @@ void hb_fir_filter_mips(float *out, const float fir_coef[HB_FIR_SIZE + 1],
* inner loop is entirely unrolled and instructions are scheduled * inner loop is entirely unrolled and instructions are scheduled
* to minimize pipeline stall * to minimize pipeline stall
*/ */
__asm__ __volatile__( __asm__ volatile(
"mtc1 $zero, %[output] \n\t" "mtc1 $zero, %[output] \n\t"
"lwc1 $f0, 0(%[p_data]) \n\t" "lwc1 $f0, 0(%[p_data]) \n\t"
"lwc1 $f1, 0(%[fir_coef]) \n\t" "lwc1 $f1, 0(%[fir_coef]) \n\t"

View File

@ -88,7 +88,7 @@ static void ff_celp_lp_synthesis_filterf_mips(float *out,
out2 = in[2]; out2 = in[2];
out3 = in[3]; out3 = in[3];
__asm__ __volatile__( __asm__ volatile(
"lwc1 $f2, 8(%[filter_coeffs]) \n\t" "lwc1 $f2, 8(%[filter_coeffs]) \n\t"
"lwc1 $f1, 4(%[filter_coeffs]) \n\t" "lwc1 $f1, 4(%[filter_coeffs]) \n\t"
"lwc1 $f0, 0(%[filter_coeffs]) \n\t" "lwc1 $f0, 0(%[filter_coeffs]) \n\t"
@ -113,7 +113,7 @@ static void ff_celp_lp_synthesis_filterf_mips(float *out,
); );
for (i = 5; i <= filter_length; i += 2) { for (i = 5; i <= filter_length; i += 2) {
__asm__ __volatile__( __asm__ volatile(
"lwc1 %[old_out3], -20(%[p_out]) \n\t" "lwc1 %[old_out3], -20(%[p_out]) \n\t"
"lwc1 $f5, 16(%[p_filter_coeffs]) \n\t" "lwc1 $f5, 16(%[p_filter_coeffs]) \n\t"
"addiu %[p_out], -8 \n\t" "addiu %[p_out], -8 \n\t"
@ -141,7 +141,7 @@ static void ff_celp_lp_synthesis_filterf_mips(float *out,
FFSWAP(float, old_out0, old_out2); FFSWAP(float, old_out0, old_out2);
} }
__asm__ __volatile__( __asm__ volatile(
"nmsub.s %[out3], %[out3], %[a], %[out2] \n\t" "nmsub.s %[out3], %[out3], %[a], %[out2] \n\t"
"nmsub.s %[out2], %[out2], %[a], %[out1] \n\t" "nmsub.s %[out2], %[out2], %[a], %[out1] \n\t"
"nmsub.s %[out3], %[out3], %[b], %[out1] \n\t" "nmsub.s %[out3], %[out3], %[b], %[out1] \n\t"
@ -176,7 +176,7 @@ static void ff_celp_lp_synthesis_filterf_mips(float *out,
p_out = &out[n]; p_out = &out[n];
out_val = in[n]; out_val = in[n];
for (i = 1; i <= filter_length; i++) { for (i = 1; i <= filter_length; i++) {
__asm__ __volatile__( __asm__ volatile(
"lwc1 %[fc_val], 0(%[p_filter_coeffs]) \n\t" "lwc1 %[fc_val], 0(%[p_filter_coeffs]) \n\t"
"lwc1 %[out_val_i], -4(%[p_out]) \n\t" "lwc1 %[out_val_i], -4(%[p_out]) \n\t"
"addiu %[p_filter_coeffs], 4 \n\t" "addiu %[p_filter_coeffs], 4 \n\t"
@ -219,7 +219,7 @@ static void ff_celp_lp_zero_synthesis_filterf_mips(float *out,
* outer loop is unrolled eight times so there is less memory access * outer loop is unrolled eight times so there is less memory access
* inner loop is unrolled two times * inner loop is unrolled two times
*/ */
__asm__ __volatile__( __asm__ volatile(
"filt_lp_inner%=: \n\t" "filt_lp_inner%=: \n\t"
"lwc1 %[fc_val], 0(%[p_filter_coeffs]) \n\t" "lwc1 %[fc_val], 0(%[p_filter_coeffs]) \n\t"
"lwc1 $f7, 6*4(%[p_in]) \n\t" "lwc1 $f7, 6*4(%[p_in]) \n\t"

View File

@ -59,7 +59,7 @@ static float ff_dot_productf_mips(const float* a, const float* b,
float sum; float sum;
const float* a_end = a + length; const float* a_end = a + length;
__asm__ __volatile__ ( __asm__ volatile (
"mtc1 $zero, %[sum] \n\t" "mtc1 $zero, %[sum] \n\t"
"blez %[length], ff_dot_productf_end%= \n\t" "blez %[length], ff_dot_productf_end%= \n\t"
"ff_dot_productf_madd%=: \n\t" "ff_dot_productf_madd%=: \n\t"

View File

@ -83,7 +83,7 @@ static void compute_antialias_mips_fixed(MPADecodeContext *s,
/** /**
* instructions are scheduled to minimize pipeline stall. * instructions are scheduled to minimize pipeline stall.
*/ */
__asm__ __volatile__ ( __asm__ volatile (
"lw %[tmp0], -1*4(%[ptr]) \n\t" "lw %[tmp0], -1*4(%[ptr]) \n\t"
"lw %[tmp1], 0*4(%[ptr]) \n\t" "lw %[tmp1], 0*4(%[ptr]) \n\t"
"lw %[temp_reg1], 0*4(%[csa]) \n\t" "lw %[temp_reg1], 0*4(%[csa]) \n\t"

View File

@ -82,7 +82,7 @@ static void compute_antialias_mips_float(MPADecodeContext *s,
* instructions are scheduled to minimize pipeline stall. * instructions are scheduled to minimize pipeline stall.
*/ */
__asm__ __volatile__ ( __asm__ volatile (
"compute_antialias_float_loop%=: \t\n" "compute_antialias_float_loop%=: \t\n"
"lwc1 %[in1], -1*4(%[ptr]) \t\n" "lwc1 %[in1], -1*4(%[ptr]) \t\n"
"lwc1 %[in2], 0(%[csa]) \t\n" "lwc1 %[in2], 0(%[csa]) \t\n"

View File

@ -70,7 +70,7 @@ static av_always_inline void ff_lsp2polyf_mips(const double *lsp, double *f, int
double tmp, f_j_2, f_j_1, f_j; double tmp, f_j_2, f_j_1, f_j;
double val = lsp[2*i]; double val = lsp[2*i];
__asm__ __volatile__( __asm__ volatile(
"move %[p_f], %[p_fi] \n\t" "move %[p_f], %[p_fi] \n\t"
"add.d %[val], %[val], %[val] \n\t" "add.d %[val], %[val], %[val] \n\t"
"addiu %[p_fi], 8 \n\t" "addiu %[p_fi], 8 \n\t"

View File

@ -84,7 +84,7 @@ static void ff_mpadsp_apply_window_mips_fixed(int32_t *synth_buf, int32_t *windo
* use of round_sample function from the original code is eliminated, * use of round_sample function from the original code is eliminated,
* changed with appropriate assembly instructions. * changed with appropriate assembly instructions.
*/ */
__asm__ __volatile__ ( __asm__ volatile (
"mthi $zero \n\t" "mthi $zero \n\t"
"mtlo %[temp1] \n\t" "mtlo %[temp1] \n\t"
"lw %[w_asm], 0(%[w]) \n\t" "lw %[w_asm], 0(%[w]) \n\t"
@ -175,7 +175,7 @@ static void ff_mpadsp_apply_window_mips_fixed(int32_t *synth_buf, int32_t *windo
access per two sample */ access per two sample */
for(j = 1; j < 16; j++) { for(j = 1; j < 16; j++) {
__asm__ __volatile__ ( __asm__ volatile (
"mthi $0, $ac1 \n\t" "mthi $0, $ac1 \n\t"
"mtlo $0, $ac1 \n\t" "mtlo $0, $ac1 \n\t"
"mthi $0 \n\t" "mthi $0 \n\t"
@ -299,7 +299,7 @@ static void ff_mpadsp_apply_window_mips_fixed(int32_t *synth_buf, int32_t *windo
p = synth_buf + 32; p = synth_buf + 32;
__asm__ __volatile__ ( __asm__ volatile (
"mthi $0 \n\t" "mthi $0 \n\t"
"mtlo %[temp1] \n\t" "mtlo %[temp1] \n\t"
"lw %[w_asm], 32*4(%[w]) \n\t" "lw %[w_asm], 32*4(%[w]) \n\t"
@ -372,7 +372,7 @@ static void imdct36_mips_fixed(int *out, int *buf, int *in, int *win)
* in order to eliminate unnecessary readings and writings in array * in order to eliminate unnecessary readings and writings in array
*/ */
__asm__ __volatile__ ( __asm__ volatile (
"lw %[t1], 17*4(%[in]) \n\t" "lw %[t1], 17*4(%[in]) \n\t"
"lw %[t2], 16*4(%[in]) \n\t" "lw %[t2], 16*4(%[in]) \n\t"
"lw %[t3], 15*4(%[in]) \n\t" "lw %[t3], 15*4(%[in]) \n\t"
@ -469,7 +469,7 @@ static void imdct36_mips_fixed(int *out, int *buf, int *in, int *win)
* "sub %[t0], %[temp_reg1],%[t0] \n\t" * "sub %[t0], %[temp_reg1],%[t0] \n\t"
*/ */
__asm__ __volatile__ ( __asm__ volatile (
"lw %[t7], 4*4(%[in1]) \n\t" "lw %[t7], 4*4(%[in1]) \n\t"
"lw %[t8], 8*4(%[in1]) \n\t" "lw %[t8], 8*4(%[in1]) \n\t"
"lw %[t6], 16*4(%[in1]) \n\t" "lw %[t6], 16*4(%[in1]) \n\t"
@ -585,7 +585,7 @@ static void imdct36_mips_fixed(int *out, int *buf, int *in, int *win)
* *
*/ */
__asm__ __volatile__ ( __asm__ volatile (
"lw %[t2], 1*4(%[tmp]) \n\t" "lw %[t2], 1*4(%[tmp]) \n\t"
"lw %[t3], 3*4(%[tmp]) \n\t" "lw %[t3], 3*4(%[tmp]) \n\t"
"lw %[t0], 0*4(%[tmp]) \n\t" "lw %[t0], 0*4(%[tmp]) \n\t"

View File

@ -79,7 +79,7 @@ static void ff_mpadsp_apply_window_mips_float(float *synth_buf, float *window,
* changed with appropriate assembly instructions. * changed with appropriate assembly instructions.
*/ */
__asm__ __volatile__ ( __asm__ volatile (
"lwc1 %[sum], 0(%[dither_state]) \t\n" "lwc1 %[sum], 0(%[dither_state]) \t\n"
"sll %[t_sample], %[incr1], 5 \t\n" "sll %[t_sample], %[incr1], 5 \t\n"
"sub %[t_sample], %[t_sample], %[incr1] \n\t" "sub %[t_sample], %[t_sample], %[incr1] \n\t"
@ -288,7 +288,7 @@ static void ff_dct32_mips_float(float *out, const float *tab)
/** /**
* instructions are scheduled to minimize pipeline stall. * instructions are scheduled to minimize pipeline stall.
*/ */
__asm__ __volatile__ ( __asm__ volatile (
"lwc1 %[fTmp1], 0*4(%[tab]) \n\t" "lwc1 %[fTmp1], 0*4(%[tab]) \n\t"
"lwc1 %[fTmp2], 31*4(%[tab]) \n\t" "lwc1 %[fTmp2], 31*4(%[tab]) \n\t"
"lwc1 %[fTmp3], 15*4(%[tab]) \n\t" "lwc1 %[fTmp3], 15*4(%[tab]) \n\t"
@ -350,7 +350,7 @@ static void ff_dct32_mips_float(float *out, const float *tab)
: [tab] "r" (tab) : [tab] "r" (tab)
); );
__asm__ __volatile__ ( __asm__ volatile (
"lwc1 %[fTmp1], 3*4(%[tab]) \n\t" "lwc1 %[fTmp1], 3*4(%[tab]) \n\t"
"lwc1 %[fTmp2], 28*4(%[tab]) \n\t" "lwc1 %[fTmp2], 28*4(%[tab]) \n\t"
"lwc1 %[fTmp3], 12*4(%[tab]) \n\t" "lwc1 %[fTmp3], 12*4(%[tab]) \n\t"
@ -412,7 +412,7 @@ static void ff_dct32_mips_float(float *out, const float *tab)
: [tab] "r" (tab) : [tab] "r" (tab)
); );
__asm__ __volatile__ ( __asm__ volatile (
"li.s %[fTmp1], 0.54119610014619698439 \n\t" "li.s %[fTmp1], 0.54119610014619698439 \n\t"
"sub.s %[fTmp2], %[val0], %[val3] \n\t" "sub.s %[fTmp2], %[val0], %[val3] \n\t"
"add.s %[val0], %[val0], %[val3] \n\t" "add.s %[val0], %[val0], %[val3] \n\t"
@ -436,7 +436,7 @@ static void ff_dct32_mips_float(float *out, const float *tab)
: :
); );
__asm__ __volatile__ ( __asm__ volatile (
"sub.s %[fTmp2], %[val16], %[val19] \n\t" "sub.s %[fTmp2], %[val16], %[val19] \n\t"
"add.s %[val16], %[val16], %[val19] \n\t" "add.s %[val16], %[val16], %[val19] \n\t"
"sub.s %[fTmp3], %[val23], %[val20] \n\t" "sub.s %[fTmp3], %[val23], %[val20] \n\t"
@ -457,7 +457,7 @@ static void ff_dct32_mips_float(float *out, const float *tab)
: [fTmp1] "f" (fTmp1) : [fTmp1] "f" (fTmp1)
); );
__asm__ __volatile__ ( __asm__ volatile (
"lwc1 %[fTmp1], 1*4(%[tab]) \n\t" "lwc1 %[fTmp1], 1*4(%[tab]) \n\t"
"lwc1 %[fTmp2], 30*4(%[tab]) \n\t" "lwc1 %[fTmp2], 30*4(%[tab]) \n\t"
"lwc1 %[fTmp3], 14*4(%[tab]) \n\t" "lwc1 %[fTmp3], 14*4(%[tab]) \n\t"
@ -519,7 +519,7 @@ static void ff_dct32_mips_float(float *out, const float *tab)
: [tab] "r" (tab) : [tab] "r" (tab)
); );
__asm__ __volatile__ ( __asm__ volatile (
"lwc1 %[fTmp1], 2*4(%[tab]) \n\t" "lwc1 %[fTmp1], 2*4(%[tab]) \n\t"
"lwc1 %[fTmp2], 29*4(%[tab]) \n\t" "lwc1 %[fTmp2], 29*4(%[tab]) \n\t"
"lwc1 %[fTmp3], 13*4(%[tab]) \n\t" "lwc1 %[fTmp3], 13*4(%[tab]) \n\t"
@ -581,7 +581,7 @@ static void ff_dct32_mips_float(float *out, const float *tab)
: [tab] "r" (tab) : [tab] "r" (tab)
); );
__asm__ __volatile__ ( __asm__ volatile (
"li.s %[fTmp1], 1.30656296487637652785 \n\t" "li.s %[fTmp1], 1.30656296487637652785 \n\t"
"sub.s %[fTmp2], %[val1], %[val2] \n\t" "sub.s %[fTmp2], %[val1], %[val2] \n\t"
"add.s %[val1], %[val1], %[val2] \n\t" "add.s %[val1], %[val1], %[val2] \n\t"
@ -605,7 +605,7 @@ static void ff_dct32_mips_float(float *out, const float *tab)
: :
); );
__asm__ __volatile__ ( __asm__ volatile (
"sub.s %[fTmp2], %[val17], %[val18] \n\t" "sub.s %[fTmp2], %[val17], %[val18] \n\t"
"add.s %[val17], %[val17], %[val18] \n\t" "add.s %[val17], %[val17], %[val18] \n\t"
"sub.s %[fTmp3], %[val22], %[val21] \n\t" "sub.s %[fTmp3], %[val22], %[val21] \n\t"
@ -626,7 +626,7 @@ static void ff_dct32_mips_float(float *out, const float *tab)
: [fTmp1] "f" (fTmp1) : [fTmp1] "f" (fTmp1)
); );
__asm__ __volatile__ ( __asm__ volatile (
"li.s %[fTmp1], 0.70710678118654752439 \n\t" "li.s %[fTmp1], 0.70710678118654752439 \n\t"
"sub.s %[fTmp2], %[val0], %[val1] \n\t" "sub.s %[fTmp2], %[val0], %[val1] \n\t"
"add.s %[val0], %[val0], %[val1] \n\t" "add.s %[val0], %[val0], %[val1] \n\t"
@ -663,7 +663,7 @@ static void ff_dct32_mips_float(float *out, const float *tab)
: [out] "r" (out) : [out] "r" (out)
); );
__asm__ __volatile__ ( __asm__ volatile (
"sub.s %[fTmp2], %[val8], %[val9] \n\t" "sub.s %[fTmp2], %[val8], %[val9] \n\t"
"add.s %[val8], %[val8], %[val9] \n\t" "add.s %[val8], %[val8], %[val9] \n\t"
"sub.s %[fTmp3], %[val11], %[val10] \n\t" "sub.s %[fTmp3], %[val11], %[val10] \n\t"
@ -704,7 +704,7 @@ static void ff_dct32_mips_float(float *out, const float *tab)
: [fTmp1] "f" (fTmp1), [out] "r" (out) : [fTmp1] "f" (fTmp1), [out] "r" (out)
); );
__asm__ __volatile__ ( __asm__ volatile (
"sub.s %[fTmp2], %[val16], %[val17] \n\t" "sub.s %[fTmp2], %[val16], %[val17] \n\t"
"add.s %[val16], %[val16], %[val17] \n\t" "add.s %[val16], %[val16], %[val17] \n\t"
"sub.s %[fTmp3], %[val19], %[val18] \n\t" "sub.s %[fTmp3], %[val19], %[val18] \n\t"
@ -730,7 +730,7 @@ static void ff_dct32_mips_float(float *out, const float *tab)
: [fTmp1] "f" (fTmp1) : [fTmp1] "f" (fTmp1)
); );
__asm__ __volatile__ ( __asm__ volatile (
"sub.s %[fTmp2], %[val24], %[val25] \n\t" "sub.s %[fTmp2], %[val24], %[val25] \n\t"
"add.s %[val24], %[val24], %[val25] \n\t" "add.s %[val24], %[val24], %[val25] \n\t"
"sub.s %[fTmp3], %[val27], %[val26] \n\t" "sub.s %[fTmp3], %[val27], %[val26] \n\t"
@ -799,7 +799,7 @@ static void imdct36_mips_float(float *out, float *buf, float *in, float *win)
*/ */
/* loop 1 and 2 */ /* loop 1 and 2 */
__asm__ __volatile__ ( __asm__ volatile (
"lwc1 %[in1], 17*4(%[in]) \t\n" "lwc1 %[in1], 17*4(%[in]) \t\n"
"lwc1 %[in2], 16*4(%[in]) \t\n" "lwc1 %[in2], 16*4(%[in]) \t\n"
"lwc1 %[in3], 15*4(%[in]) \t\n" "lwc1 %[in3], 15*4(%[in]) \t\n"
@ -871,7 +871,7 @@ static void imdct36_mips_float(float *out, float *buf, float *in, float *win)
); );
/* loop 3 */ /* loop 3 */
__asm__ __volatile__ ( __asm__ volatile (
"li.s %[c1], 0.5 \t\n" "li.s %[c1], 0.5 \t\n"
"lwc1 %[in1], 8*4(%[in]) \t\n" "lwc1 %[in1], 8*4(%[in]) \t\n"
"lwc1 %[in2], 16*4(%[in]) \t\n" "lwc1 %[in2], 16*4(%[in]) \t\n"
@ -1002,7 +1002,7 @@ static void imdct36_mips_float(float *out, float *buf, float *in, float *win)
); );
/* loop 4 */ /* loop 4 */
__asm__ __volatile__ ( __asm__ volatile (
"lwc1 %[in1], 2*4(%[tmp]) \t\n" "lwc1 %[in1], 2*4(%[tmp]) \t\n"
"lwc1 %[in2], 0(%[tmp]) \t\n" "lwc1 %[in2], 0(%[tmp]) \t\n"
"lwc1 %[in3], 3*4(%[tmp]) \t\n" "lwc1 %[in3], 3*4(%[tmp]) \t\n"

View File

@ -57,7 +57,7 @@ static av_always_inline av_const long int lrintf_mips(float x)
{ {
register int ret_int; register int ret_int;
__asm__ __volatile__ ( __asm__ volatile (
"cvt.w.s %[x], %[x] \n\t" "cvt.w.s %[x], %[x] \n\t"
"mfc1 %[ret_int], %[x] \n\t" "mfc1 %[ret_int], %[x] \n\t"