TWRP-ify AOSP code

Pull in most TWRP sources
Stub out partition management code
Make it compile -- probably will not boot
Kind of a mess but have to start somewhere
diff --git a/libjpegtwrp/asm/armv7/jdcolor-android-armv7.S b/libjpegtwrp/asm/armv7/jdcolor-android-armv7.S
new file mode 100644
index 0000000..95bd4bf
--- /dev/null
+++ b/libjpegtwrp/asm/armv7/jdcolor-android-armv7.S
@@ -0,0 +1,1223 @@
+/*------------------------------------------------------------------------
+* jdcolor-android-armv7.S
+*
+*  Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+*
+*  Redistribution and use in source and binary forms, with or without
+*  modification, are permitted provided that the following conditions are
+*  met:
+*      * Redistributions of source code must retain the above copyright
+*        notice, this list of conditions and the following disclaimer.
+*      * Redistributions in binary form must reproduce the above
+*        copyright notice, this list of conditions and the following
+*        disclaimer in the documentation and/or other materials provided
+*        with the distribution.
+*      * Neither the name of Code Aurora Forum, Inc. nor the names of its
+*        contributors may be used to endorse or promote products derived
+*        from this software without specific prior written permission.
+*
+*  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+*  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+*  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+*  ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+*  BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+*  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+*  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+*  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+*  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+*  OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+*  IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*--------------------------------------------------------------------------
+
+*--------------------------------------------------------------------------
+*                         FUNCTION LIST
+*--------------------------------------------------------------------------
+*
+* - yvup2rgb565_venum
+* - yyvup2rgb565_venum
+* - yvup2abgr8888_venum
+* - yyvup2abgr8888_venum
+*
+*--------------------------------------------------------------------------
+*/
+
+    .section yvu_plain_to_rgb_android, "x"  @ AREA
+    .text                              @ |.text|, CODE, READONLY
+    .align 2
+    .code  32                          @ CODE32
+
+/*-----------------------------------------------------------------------------
+ *   ARM Registers
+ * ---------------------------------------------------------------------------- */
+p_y       .req r0
+p_cr      .req r1
+p_cb      .req r2
+p_rgb     .req r3
+p_bgr     .req r3
+length    .req r12
+
+    .global yvup2rgb565_venum
+    .global yyvup2rgb565_venum
+    .global yvup2abgr8888_venum
+    .global yyvup2abgr8888_venum
+
+@ coefficients in color conversion matrix multiplication
+.equ COEFF_Y,          256             @ contribution of Y
+.equ COEFF_V_RED,      359             @ contribution of V for red
+.equ COEFF_U_GREEN,    -88             @ contribution of U for green
+.equ COEFF_V_GREEN,   -183             @ contribution of V for green
+.equ COEFF_U_BLUE,     454             @ contribution of U for blue
+
+@ Clamping constants 0x0 and 0xFF
+.equ COEFF_0,          0
+.equ COEFF_255,        255
+
+@ Bias coefficients for red, green and blue
+.equ COEFF_BIAS_R,   -45824            @ Red   bias =     -359*128 + 128
+.equ COEFF_BIAS_G,    34816            @ Green bias = (88+183)*128 + 128
+.equ COEFF_BIAS_B,   -57984            @ Blue  bias =     -454*128 + 128
+
+
+/*--------------------------------------------------------------------------
+* FUNCTION     : yvup2rgb565_venum
+*--------------------------------------------------------------------------
+* DESCRIPTION  : Perform YVU planar to RGB565 conversion.
+*--------------------------------------------------------------------------
+* C PROTOTYPE  : void yvup2rgb565_venum(uint8_t  *p_y,
+*                                       uint8_t  *p_cr,
+*                                       uint8_t  *p_cb,
+*                                       uint8_t  *p_rgb565,
+*                                       uint32_t  length)
+*--------------------------------------------------------------------------
+* REG INPUT    : R0: uint8_t  *p_y
+*                      pointer to the input Y Line
+*                R1: uint8_t  *p_cr
+*                      pointer to the input Cr Line
+*                R2: uint8_t  *p_cb
+*                      pointer to the input Cb Line
+*                R3: uint8_t  *p_rgb565
+*                      pointer to the output RGB Line
+*                R12: uint32_t  length
+*                      width of Line
+*--------------------------------------------------------------------------
+* STACK ARG    : None
+*--------------------------------------------------------------------------
+* REG OUTPUT   : None
+*--------------------------------------------------------------------------
+* MEM INPUT    : p_y      - a line of Y pixels
+*                p_cr     - a line of Cr pixels
+*                p_cb     - a line of Cb pixels
+*                length   - the width of the input line
+*--------------------------------------------------------------------------
+* MEM OUTPUT   : p_rgb565 - the converted rgb pixels
+*--------------------------------------------------------------------------
+* REG AFFECTED : ARM:  R0-R4, R12
+*                NEON: Q0-Q15
+*--------------------------------------------------------------------------
+* STACK USAGE  : none
+*--------------------------------------------------------------------------
+* CYCLES       : none
+*
+*--------------------------------------------------------------------------
+* NOTES        :
+*--------------------------------------------------------------------------
+*/
+.type yvup2rgb565_venum, %function
+yvup2rgb565_venum:
+    /*-------------------------------------------------------------------------
+     *  Store stack registers
+     * ------------------------------------------------------------------------ */
+    STMFD SP!, {LR}
+
+    VPUSH {D8-D15}
+
+    PLD [R0, R3]                       @ preload luma line
+
+    ADR   R12, constants
+
+    VLD1.S16  {D6, D7}, [R12]!         @ D6, D7: 359 |  -88 | -183 | 454 | 256 | 0 | 255 | 0
+    VLD1.S32  {D30, D31}, [R12]        @ Q15   :  -45824    |    34816   |  -57984 |     X
+
+    /*-------------------------------------------------------------------------
+     *  Load the 5th parameter via stack
+     *  R0 ~ R3 are used to pass the first 4 parameters, the 5th and above
+     *  parameters are passed via stack
+     * ------------------------------------------------------------------------ */
+    LDR R12, [SP, #68]                 @ LR is pushed into the stack so SP is
+                                       @ decreased by 4,
+                                       @ D8-D15 are also pushed into the stack
+                                       @ so SP is decreased by
+                                       @ 8-byte/D-Register * 8 D-Registers = 64,
+                                       @ so SP needs to be increased by 64+4=68
+                                       @ to get the value that was first pushed
+                                       @ into stack (the 5th parameter passed in
+                                       @ throught stack)
+
+    /*-------------------------------------------------------------------------
+     *  Load clamping parameters to duplicate vector elements
+     * ------------------------------------------------------------------------ */
+    VDUP.S16  Q4,  D7[1]               @ Q4:  0  |  0  |  0  |  0  |  0  |  0  |  0  |  0
+    VDUP.S16  Q5,  D7[2]               @ Q5: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255
+
+    /*-------------------------------------------------------------------------
+     *  Read bias
+     * ------------------------------------------------------------------------ */
+    VDUP.S32  Q0,   D30[0]             @ Q0:  -45824 | -45824 | -45824 | -45824
+    VDUP.S32  Q1,   D30[1]             @ Q1:   34816 |  34816 |  34816 |  34816
+    VDUP.S32  Q2,   D31[0]             @ Q2:  -70688 | -70688 | -70688 | -70688
+
+
+    /*-------------------------------------------------------------------------
+     *  The main loop
+     * ------------------------------------------------------------------------ */
+loop_yvup2rgb565:
+
+    /*-------------------------------------------------------------------------
+     *  Load input from Y, V and U
+     *  D12  : Y0  Y1  Y2  Y3  Y4  Y5  Y6  Y7
+     *  D14  : V0  V1  V2  V3  V4  V5  V6  V7
+     *  D15  : U0  U1  U2  U3  U4  U5  U6  U7
+     * ------------------------------------------------------------------------ */
+    VLD1.U8  {D12},  [p_y]!            @ Load 8 Y  elements (uint8) to D12
+    VLD1.U8  {D14},  [p_cr]!           @ Load 8 Cr elements (uint8) to D14
+    VLD1.U8  {D15},  [p_cb]!           @ Load 8 Cb elements (uint8) to D15
+
+    /*-------------------------------------------------------------------------
+     *  Expand uint8 value to uint16
+     *  D18, D19: Y0 Y1 Y2 Y3 Y4 Y5 Y6 Y7
+     *  D20, D21: V0 V1 V2 V3 V4 V5 V6 V7
+     *  D22, D23: U0 U1 U2 U3 U4 U5 U6 U7
+     * ------------------------------------------------------------------------ */
+    VMOVL.U8 Q9,  D12
+    VMOVL.U8 Q10, D14
+    VMOVL.U8 Q11, D15
+
+    /*-------------------------------------------------------------------------
+     *  Multiply contribution from chrominance, results are in 32-bit
+     * ------------------------------------------------------------------------ */
+    VMULL.S16  Q12, D20, D6[0]         @ Q12:  359*(V0,V1,V2,V3)     Red
+    VMULL.S16  Q13, D22, D6[1]         @ Q13:  -88*(U0,U1,U2,U3)     Green
+    VMLAL.S16  Q13, D20, D6[2]         @ Q13:  -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3)
+    VMULL.S16  Q14, D22, D6[3]         @ Q14:  454*(U0,U1,U2,U3)     Blue
+
+    /*-------------------------------------------------------------------------
+     *  Add bias
+     * ------------------------------------------------------------------------ */
+    VADD.S32  Q12, Q0                  @ Q12 add Red   bias -45824
+    VADD.S32  Q13, Q1                  @ Q13 add Green bias  34816
+    VADD.S32  Q14, Q2                  @ Q14 add Blue  bias -57984
+
+    /*-------------------------------------------------------------------------
+     *  Calculate Red, Green, Blue
+     * ------------------------------------------------------------------------ */
+    VMLAL.S16  Q12, D18, D7[0]         @ Q12: R0, R1, R2, R3 in 32-bit Q8 format
+    VMLAL.S16  Q13, D18, D7[0]         @ Q13: G0, G1, G2, G3 in 32-bit Q8 format
+    VMLAL.S16  Q14, D18, D7[0]         @ Q14: B0, B1, B2, B3 in 32-bit Q8 format
+
+    /*-------------------------------------------------------------------------
+     *  Right shift eight bits with rounding
+     * ------------------------------------------------------------------------ */
+    VSHRN.S32   D18 , Q12, #8          @ D18: R0, R1, R2, R3 in 16-bit Q0 format
+    VSHRN.S32   D20 , Q13, #8          @ D20: G0, G1, G2, G3 in 16-bit Q0 format
+    VSHRN.S32   D22,  Q14, #8          @ D22: B0, B1, B2, B3 in 16-bit Q0 format
+
+    /*-------------------------------------------------------------------------
+     *  Done with the first 4 elements, continue on the next 4 elements
+     * ------------------------------------------------------------------------ */
+
+    /*-------------------------------------------------------------------------
+     *  Multiply contribution from chrominance, results are in 32-bit
+     * ------------------------------------------------------------------------ */
+    VMULL.S16  Q12, D21, D6[0]         @ Q12:  359*(V0,V1,V2,V3)     Red
+    VMULL.S16  Q13, D23, D6[1]         @ Q13:  -88*(U0,U1,U2,U3)     Green
+    VMLAL.S16  Q13, D21, D6[2]         @ Q13:  -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3)
+    VMULL.S16  Q14, D23, D6[3]         @ Q14:  454*(U0,U1,U2,U3)     Blue
+
+    /*-------------------------------------------------------------------------
+     *  Add bias
+     * ------------------------------------------------------------------------ */
+    VADD.S32  Q12, Q0                  @ Q12 add Red   bias -45824
+    VADD.S32  Q13, Q1                  @ Q13 add Green bias  34816
+    VADD.S32  Q14, Q2                  @ Q14 add Blue  bias -57984
+
+    /*-------------------------------------------------------------------------
+     *  Calculate Red, Green, Blue
+     * ------------------------------------------------------------------------ */
+    VMLAL.S16  Q12, D19, D7[0]         @ Q12: R0, R1, R2, R3 in 32-bit Q8 format
+    VMLAL.S16  Q13, D19, D7[0]         @ Q13: G0, G1, G2, G3 in 32-bit Q8 format
+    VMLAL.S16  Q14, D19, D7[0]         @ Q14: B0, B1, B2, B3 in 32-bit Q8 format
+
+    /*-------------------------------------------------------------------------
+     *  Right shift eight bits with rounding
+     * ------------------------------------------------------------------------ */
+    VSHRN.S32   D19 , Q12, #8          @ D18: R0, R1, R2, R3 in 16-bit Q0 format
+    VSHRN.S32   D21 , Q13, #8          @ D20: G0, G1, G2, G3 in 16-bit Q0 format
+    VSHRN.S32   D23,  Q14, #8          @ D22: B0, B1, B2, B3 in 16-bit Q0 format
+
+    /*-------------------------------------------------------------------------
+     *  Clamp the value to be within [0~255]
+     * ------------------------------------------------------------------------ */
+    VMAX.S16  Q9, Q9, Q4               @ if Q9 <   0, Q9 =   0
+    VMIN.S16  Q9, Q9, Q5               @ if Q9 > 255, Q9 = 255
+    VQMOVUN.S16  D28, Q9               @ store Red to D28, narrow the value from int16 to int8
+
+    VMAX.S16  Q10, Q10, Q4             @ if Q10 <   0, Q10 =   0
+    VMIN.S16  Q10, Q10, Q5             @ if Q10 > 255, Q10 = 255
+    VQMOVUN.S16   D27, Q10             @ store Green to D27, narrow the value from int16 to int8
+
+    VMAX.S16  Q11, Q11, Q4             @ if Q11 <   0, Q11 =   0
+    VMIN.S16  Q11, Q11, Q5             @ if Q11 > 255, Q11 = 255
+    VQMOVUN.S16   D26, Q11             @ store Blue to D26, narrow the value from int16 to int8.
+
+    /*-------------------------------------------------------------------------
+     *  D27:  3 bits of Green + 5 bits of Blue
+     *  D28:  5 bits of Red   + 3 bits of Green
+     * ------------------------------------------------------------------------ */
+    VSRI.8   D28, D27, #5              @ right shift G by 5 and insert to R
+    VSHL.U8  D27, D27, #3              @ left  shift G by 3
+    VSRI.8   D27, D26, #3              @ right shift B by 3 and insert to G
+
+    SUBS length, length, #8            @ check if the length is less than 8
+
+    BMI  trailing_yvup2rgb565          @ jump to trailing processing if remaining length is less than 8
+
+    VST2.U8  {D27, D28}, [p_rgb]!      @ vector store Red, Green, Blue to destination
+                                       @ Blue at LSB
+
+    BHI loop_yvup2rgb565               @ loop if more than 8 pixels left
+
+    BEQ  end_yvup2rgb565               @ done if exactly 8 pixel processed in the loop
+
+
+trailing_yvup2rgb565:
+    /*-------------------------------------------------------------------------
+     *  There are from 1 ~ 7 pixels left in the trailing part.
+     *  First adding 7 to the length so the length would be from 0 ~ 6.
+     *  eg: 1 pixel left in the trailing part, so 1-8+7 = 0.
+     *  Then save 1 pixel unconditionally since at least 1 pixels left in the
+     *  trailing part.
+     * ------------------------------------------------------------------------ */
+    ADDS length, length, #7            @ there are 7 or less in the trailing part
+
+    VST2.U8 {D27[0], D28[0]}, [p_rgb]! @ at least 1 pixel left in the trailing part
+    BEQ  end_yvup2rgb565               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST2.U8 {D27[1], D28[1]}, [p_rgb]! @ store one more pixel
+    BEQ  end_yvup2rgb565               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST2.U8 {D27[2], D28[2]}, [p_rgb]! @ store one more pixel
+    BEQ  end_yvup2rgb565               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST2.U8 {D27[3], D28[3]}, [p_rgb]! @ store one more pixel
+    BEQ  end_yvup2rgb565               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST2.U8 {D27[4], D28[4]}, [p_rgb]! @ store one more pixel
+    BEQ  end_yvup2rgb565               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST2.U8 {D27[5], D28[5]}, [p_rgb]! @ store one more pixel
+    BEQ  end_yvup2rgb565               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST2.U8 {D27[6], D28[6]}, [p_rgb]! @ store one more pixel
+
+end_yvup2rgb565:
+    VPOP  {D8-D15}
+    LDMFD SP!, {PC}
+
+                                       @ end of yvup2rgb565
+
+
+/*--------------------------------------------------------------------------
+* FUNCTION     : yyvup2rgb565_venum
+*--------------------------------------------------------------------------
+* DESCRIPTION  : Perform YYVU planar to RGB565 conversion.
+*--------------------------------------------------------------------------
+* C PROTOTYPE  : void yyvup2rgb565_venum(uint8_t  *p_y,
+*                                        uint8_t  *p_cr,
+*                                        uint8_t  *p_cb,
+*                                        uint8_t  *p_rgb565,
+*                                        uint32_t  length)
+*--------------------------------------------------------------------------
+* REG INPUT    : R0: uint8_t  *p_y
+*                      pointer to the input Y Line
+*                R1: uint8_t  *p_cr
+*                      pointer to the input Cr Line
+*                R2: uint8_t  *p_cb
+*                      pointer to the input Cb Line
+*                R3: uint8_t  *p_rgb565
+*                      pointer to the output RGB Line
+*                R12: uint32_t  length
+*                      width of Line
+*--------------------------------------------------------------------------
+* STACK ARG    : None
+*--------------------------------------------------------------------------
+* REG OUTPUT   : None
+*--------------------------------------------------------------------------
+* MEM INPUT    : p_y      - a line of Y pixels
+*                p_cr     - a line of Cr pixels
+*                p_cb     - a line of Cb pixels
+*                length   - the width of the input line
+*--------------------------------------------------------------------------
+* MEM OUTPUT   : p_rgb565 - the converted rgb pixels
+*--------------------------------------------------------------------------
+* REG AFFECTED : ARM:  R0-R4, R12
+*                NEON: Q0-Q15
+*--------------------------------------------------------------------------
+* STACK USAGE  : none
+*--------------------------------------------------------------------------
+* CYCLES       : none
+*
+*--------------------------------------------------------------------------
+* NOTES        :
+*--------------------------------------------------------------------------
+*/
+.type yyvup2rgb565_venum, %function
+yyvup2rgb565_venum:
+    /*-------------------------------------------------------------------------
+     *  Store stack registers
+     * ------------------------------------------------------------------------ */
+    STMFD SP!, {LR}
+
+    VPUSH {D8-D15}
+
+    PLD [R0, R3]                       @ preload luma line
+
+    ADR   R12, constants
+
+    VLD1.S16  {D6, D7}, [R12]!         @ D6, D7: 359 |  -88 | -183 | 454 | 256 | 0 | 255 | 0
+    VLD1.S32  {D30, D31}, [R12]        @ Q15   :  -45824    |    34816   |  -57984 |     X
+
+    /*-------------------------------------------------------------------------
+     *  Load the 5th parameter via stack
+     *  R0 ~ R3 are used to pass the first 4 parameters, the 5th and above
+     *  parameters are passed via stack
+     * ------------------------------------------------------------------------ */
+    LDR R12, [SP, #68]                 @ LR is pushed into the stack so SP is
+                                       @ decreased by 4,
+                                       @ D8-D15 are also pushed into the stack
+                                       @ so SP is decreased by
+                                       @ 8-byte/D-Register * 8 D-Registers = 64,
+                                       @ so SP needs to be increased by 64+4=68
+                                       @ to get the value that was first pushed
+                                       @ into stack (the 5th parameter passed in
+                                       @ throught stack)
+
+    /*-------------------------------------------------------------------------
+     *  Load clamping parameters to duplicate vector elements
+     * ------------------------------------------------------------------------ */
+    VDUP.S16  Q4,  D7[1]               @ Q4:  0  |  0  |  0  |  0  |  0  |  0  |  0  |  0
+    VDUP.S16  Q5,  D7[2]               @ Q5: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255
+
+    /*-------------------------------------------------------------------------
+     *  Read bias
+     * ------------------------------------------------------------------------ */
+    VDUP.S32  Q0,   D30[0]             @ Q0:  -45824 | -45824 | -45824 | -45824
+    VDUP.S32  Q1,   D30[1]             @ Q1:   34816 |  34816 |  34816 |  34816
+    VDUP.S32  Q2,   D31[0]             @ Q2:  -70688 | -70688 | -70688 | -70688
+
+
+    /*-------------------------------------------------------------------------
+     *  The main loop
+     * ------------------------------------------------------------------------ */
+loop_yyvup2rgb565:
+
+    /*-------------------------------------------------------------------------
+     *  Load input from Y, V and U
+     *  D12, D13: Y0 Y2 Y4 Y6 Y8 Y10 Y12 Y14, Y1 Y3 Y5 Y7 Y9 Y11 Y13 Y15
+     *  D14     : V0 V1 V2 V3 V4 V5  V6  V7
+     *  D15     : U0 U1 U2 U3 U4 U5  U6  U7
+     * ------------------------------------------------------------------------ */
+    VLD2.U8  {D12,D13}, [p_y]!         @ Load 16 Luma elements (uint8) to D12, D13
+    VLD1.U8  {D14},     [p_cr]!        @ Load 8 Cr elements (uint8) to D14
+    VLD1.U8  {D15},     [p_cb]!        @ Load 8 Cb elements (uint8) to D15
+
+    /*-------------------------------------------------------------------------
+     *  Expand uint8 value to uint16
+     *  D24, D25: Y0 Y2 Y4 Y6 Y8 Y10 Y12 Y14
+     *  D26, D27: Y1 Y3 Y5 Y7 Y9 Y11 Y13 Y15
+     *  D28, D29: V0 V1 V2 V3 V4 V5  V6  V7
+     *  D30, D31: U0 U1 U2 U3 U4 U5  U6  U7
+     * ------------------------------------------------------------------------ */
+    VMOVL.U8 Q12, D12
+    VMOVL.U8 Q13, D13
+    VMOVL.U8 Q14, D14
+    VMOVL.U8 Q15, D15
+
+    /*-------------------------------------------------------------------------
+     *  Multiply contribution from chrominance, results are in 32-bit
+     * ------------------------------------------------------------------------ */
+    VMULL.S16  Q6, D28, D6[0]          @ Q6:  359*(V0,V1,V2,V3)     Red
+    VMULL.S16  Q7, D30, D6[1]          @ Q7:  -88*(U0,U1,U2,U3)     Green
+    VMLAL.S16  Q7, D28, D6[2]          @ q7:  -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3)
+    VMULL.S16  Q8, D30, D6[3]          @ q8:  454*(U0,U1,U2,U3)     Blue
+
+    /*-------------------------------------------------------------------------
+     *  Add bias
+     * ------------------------------------------------------------------------ */
+    VADD.S32  Q6, Q0                   @ Q6 add Red   bias -45824
+    VADD.S32  Q7, Q1                   @ Q7 add Green bias  34816
+    VADD.S32  Q8, Q2                   @ Q8 add Blue  bias -57984
+
+    /*-------------------------------------------------------------------------
+     *  Calculate Red, Green, Blue
+     * ------------------------------------------------------------------------ */
+    VMOV.S32   Q9, Q6
+    VMLAL.S16  Q6, D24, D7[0]          @ Q6: R0, R2, R4, R6 in 32-bit Q8 format
+    VMLAL.S16  Q9, D26, D7[0]          @ Q9: R1, R3, R5, R7 in 32-bit Q8 format
+
+    VMOV.S32   Q10, Q7
+    VMLAL.S16  Q7,  D24, D7[0]         @ Q7:  G0, G2, G4, G6 in 32-bit Q8 format
+    VMLAL.S16  Q10, D26, D7[0]         @ Q10: G1, G3, G5, G7 in 32-bit Q8 format
+
+    VMOV.S32   Q11, Q8
+    VMLAL.S16  Q8,  D24, D7[0]         @ Q8:  B0, B2, B4, B6 in 32-bit Q8 format
+    VMLAL.S16  Q11, D26, D7[0]         @ Q11: B1, B3, B5, B7 in 32-bit Q8 format
+
+    /*-------------------------------------------------------------------------
+     *  Right shift eight bits with rounding
+     * ------------------------------------------------------------------------ */
+    VSHRN.S32   D12, Q6,  #8           @ D12: R0 R2 R4 R6 in 16-bit Q0 format
+    VSHRN.S32   D13, Q9,  #8           @ D13: R1 R3 R5 R7 in 16-bit Q0 format
+    VZIP.16     D12, D13               @ Q6 : R0 R1 R2 R3 R4 R5 R6 R7
+
+    VSHRN.S32   D18, Q7,  #8           @ D18: G0 G2 G4 G6 in 16-bit Q0 format
+    VSHRN.S32   D19, Q10, #8           @ D19: G1 G3 G5 G7 in 16-bit Q0 format
+    VZIP.16     D18, D19               @ Q9 : G0 G1 G2 G3 G4 G5 G6 G7
+
+    VSHRN.S32   D20, Q8,  #8           @ D20: B0 B2 B4 B6 in 16-bit Q0 format
+    VSHRN.S32   D21, Q11, #8           @ D21: B1 B3 B5 B7 in 16-bit Q0 format
+    VZIP.16     D20, D21               @ Q10: B0 B1 B2 B3 B4 B5 B6 B7
+
+    /*-------------------------------------------------------------------------
+     *  Clamp the value to be within [0~255]
+     * ------------------------------------------------------------------------ */
+    VMAX.S16  Q6, Q6, Q4               @ if Q6 <   0, Q6 =   0
+    VMIN.S16  Q6, Q6, Q5               @ if Q6 > 255, Q6 = 255
+    VQMOVUN.S16  D23, Q6               @ store Red to D23, narrow the value from int16 to int8
+
+    VMAX.S16  Q9, Q9, Q4               @ if Q9 <   0, Q9 =   0
+    VMIN.S16  Q9, Q9, Q5               @ if Q9 > 255, Q9 = 255
+    VQMOVUN.S16  D22, Q9               @ store Green to D22, narrow the value from int16 to int8
+
+    VMAX.S16  Q10, Q10, Q4             @ if Q10 <   0, Q10 =   0
+    VMIN.S16  Q10, Q10, Q5             @ if Q10 > 255, Q10 = 255
+    VQMOVUN.S16   D21, Q10             @ store Blue to D21, narrow the value from int16 to int8
+
+    /*-------------------------------------------------------------------------
+     *  D22:  3 bits of Green + 5 bits of Blue
+     *  D23:  5 bits of Red   + 3 bits of Green
+     * ------------------------------------------------------------------------ */
+    VSRI.8   D23, D22, #5              @ right shift G by 5 and insert to R
+    VSHL.U8  D22, D22, #3              @ left shift G by 3
+    VSRI.8   D22, D21, #3              @ right shift B by 3 and insert to G
+
+    SUBS length, length, #8            @ check if the length is less than 8
+
+    BMI  trailing_yyvup2rgb565         @ jump to trailing processing if remaining length is less than 8
+
+    VST2.U8  {D22,D23}, [p_rgb]!       @ vector store Red, Green, Blue to destination
+                                       @ Blue at LSB
+
+    BEQ  end_yyvup2rgb565              @ done if exactly 8 pixel processed in the loop
+
+
+    /*-------------------------------------------------------------------------
+     *  Done with the first 8 elements, continue on the next 8 elements
+     * ------------------------------------------------------------------------ */
+
+    /*-------------------------------------------------------------------------
+     *  Multiply contribution from chrominance, results are in 32-bit
+     * ------------------------------------------------------------------------ */
+    VMULL.S16  Q6, D29, D6[0]          @ Q6: 359*(V4,V5,V6,V7)       Red
+    VMULL.S16  Q7, D31, D6[1]          @ Q7: -88*(U4,U5,U6,U7)      Green
+    VMLAL.S16  Q7, D29, D6[2]          @ Q7: -88*(U4,U5,U6,U7) - 183*(V4,V5,V6,V7)
+    VMULL.S16  Q8, D31, D6[3]          @ Q8: 454*(U4,U5,U6,U7)       Blue
+
+    /*-------------------------------------------------------------------------
+     *  Add bias
+     * ------------------------------------------------------------------------ */
+    VADD.S32  Q6, Q0                   @ Q6 add Red   bias -45824
+    VADD.S32  Q7, Q1                   @ Q7 add Green bias  34816
+    VADD.S32  Q8, Q2                   @ Q8 add Blue  bias -57984
+
+    /*-------------------------------------------------------------------------
+     *  Calculate Red, Green, Blue
+     * ------------------------------------------------------------------------ */
+    VMOV.S32   Q9, Q6
+    VMLAL.S16  Q6, D25, D7[0]          @ Q6: R8 R10 R12 R14 in 32-bit Q8 format
+    VMLAL.S16  Q9, D27, D7[0]          @ Q9: R9 R11 R13 R15 in 32-bit Q8 format
+
+    VMOV.S32   Q10, Q7
+    VMLAL.S16  Q7,  D25, D7[0]         @ Q7: G0, G2, G4, G6 in 32-bit Q8 format
+    VMLAL.S16  Q10, D27, D7[0]         @ Q10 : G1, G3, G5, G7 in 32-bit Q8 format
+
+    VMOV.S32   Q11, Q8
+    VMLAL.S16  Q8,  D25, D7[0]         @ Q8: B0, B2, B4, B6 in 32-bit Q8 format
+    VMLAL.S16  Q11, D27, D7[0]         @ Q11 : B1, B3, B5, B7 in 32-bit Q8 format
+
+    /*-------------------------------------------------------------------------
+     *  Right shift eight bits with rounding
+     * ------------------------------------------------------------------------ */
+    VSHRN.S32   D12, Q6,  #8           @ D12: R8 R10 R12 R14 in 16-bit Q0 format
+    VSHRN.S32   D13, Q9,  #8           @ D13: R9 R11 R13 R15 in 16-bit Q0 format
+    VZIP.16     D12, D13               @ Q6: R8 R9 R10 R11 R12 R13 R14 R15
+
+    VSHRN.S32   D18, Q7,  #8           @ D18: G8 G10 G12 G14 in 16-bit Q0 format
+    VSHRN.S32   D19, Q10, #8           @ D19: G9 G11 G13 G15 in 16-bit Q0 format
+    VZIP.16     D18, D19               @ Q9:  G8 G9 G10 G11 G12 G13 G14 G15
+
+    VSHRN.S32   D20, Q8,  #8           @ D20: B8 B10 B12 B14 in 16-bit Q0 format
+    VSHRN.S32   D21, Q11, #8           @ D21: B9 B11 B13 B15 in 16-bit Q0 format
+    VZIP.16     D20, D21               @ Q10: B8 B9 B10 B11 B12 B13 B14 B15
+
+    /*-------------------------------------------------------------------------
+     *  Clamp the value to be within [0~255]
+     * ------------------------------------------------------------------------ */
+    VMAX.S16  Q6, Q6, Q4               @ if Q6 <   0, Q6 =   0
+    VMIN.S16  Q6, Q6, Q5               @ if Q6 > 255, Q6 = 255
+    VQMOVUN.S16  D23, Q6               @ store Red to D23, narrow the value from int16 to int8
+
+    VMAX.S16  Q9, Q9, Q4               @ if Q9 <   0, Q9 =   0
+    VMIN.S16  Q9, Q9, Q5               @ if Q9 > 255, Q9 = 255
+    VQMOVUN.S16  D22, Q9               @ store Green to D22, narrow the value from int16 to int8
+
+    VMAX.S16  Q10, Q10, Q4             @ if Q10 <   0, Q10 =   0
+    VMIN.S16  Q10, Q10, Q5             @ if Q10 > 255, Q10 = 255
+    VQMOVUN.S16   D21, Q10             @ store Blue to D21, narrow the value from int16 to int8
+
+    /*-------------------------------------------------------------------------
+     *  D22:  3 bits of Green + 5 bits of Blue
+     *  D23:  5 bits of Red   + 3 bits of Green
+     * ------------------------------------------------------------------------ */
+    VSRI.8   D23, D22, #5              @ right shift G by 5 and insert to R
+    VSHL.U8  D22, D22, #3              @ left shift G by 3
+    VSRI.8   D22, D21, #3              @ right shift B by 3 and insert to G
+
+    SUBS length, length, #8            @ check if the length is less than 8
+
+    BMI  trailing_yyvup2rgb565         @ jump to trailing processing if remaining length is less than 8
+
+    VST2.U8  {D22,D23}, [p_rgb]!       @ vector store Red, Green, Blue to destination
+                                       @ Blue at LSB
+
+    BHI loop_yyvup2rgb565              @ loop if more than 8 pixels left
+
+    BEQ  end_yyvup2rgb565              @ done if exactly 8 pixel processed in the loop
+
+
+trailing_yyvup2rgb565:
+    /*-------------------------------------------------------------------------
+     *  There are from 1 ~ 7 pixels left in the trailing part.
+     *  First adding 7 to the length so the length would be from 0 ~ 6.
+     *  eg: 1 pixel left in the trailing part, so 1-8+7 = 0.
+     *  Then save 1 pixel unconditionally since at least 1 pixels left in the
+     *  trailing part.
+     * ------------------------------------------------------------------------ */
+    ADDS length, length, #7            @ there are 7 or less in the trailing part
+
+    VST2.U8 {D22[0],D23[0]}, [p_rgb]!  @ at least 1 pixel left in the trailing part
+    BEQ end_yyvup2rgb565               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST2.U8 {D22[1],D23[1]}, [p_rgb]!  @ store one more pixel
+    BEQ end_yyvup2rgb565               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST2.U8 {D22[2],D23[2]}, [p_rgb]!  @ store one more pixel
+    BEQ end_yyvup2rgb565               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST2.U8 {D22[3],D23[3]}, [p_rgb]!  @ store one more pixel
+    BEQ end_yyvup2rgb565               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST2.U8 {D22[4],D23[4]}, [p_rgb]!  @ store one more pixel
+    BEQ end_yyvup2rgb565               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST2.U8 {D22[5],D23[5]}, [p_rgb]!  @ store one more pixel
+    BEQ end_yyvup2rgb565               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST2.U8 {D22[6],D23[6]}, [p_rgb]!  @ store one more pixel
+
+end_yyvup2rgb565:
+    VPOP  {D8-D15}
+    LDMFD SP!, {PC}
+
+                                       @ end of yyvup2rgb565
+
+constants:
+    .hword (COEFF_V_RED),  (COEFF_U_GREEN), (COEFF_V_GREEN), (COEFF_U_BLUE) @   359  | -88   |  -183  | 454
+    .hword (COEFF_Y),      (COEFF_0),       (COEFF_255)    , (COEFF_0)      @   256  |   0   |   255  |  0
+    .word  (COEFF_BIAS_R), (COEFF_BIAS_G),  (COEFF_BIAS_B)                  @ -45824 | 34816 | -57984 |  X
+
+/*--------------------------------------------------------------------------
+* FUNCTION     : yvup2abgr8888_venum
+*--------------------------------------------------------------------------
+* DESCRIPTION  : Perform YVU planar to ABGR8888 conversion.
+*--------------------------------------------------------------------------
+* C PROTOTYPE  : void yvup2abgr8888_venum(uint8_t  *p_y,
+*                                         uint8_t  *p_cr,
+*                                         uint8_t  *p_cb,
+*                                         uint8_t  *p_abgr8888,
+*                                         uint32_t  length)
+*--------------------------------------------------------------------------
+* REG INPUT    : R0: uint8_t  *p_y
+*                      pointer to the input Y Line
+*                R1: uint8_t  *p_cr
+*                      pointer to the input Cr Line
+*                R2: uint8_t  *p_cb
+*                      pointer to the input Cb Line
+*                R3: uint8_t  *p_abgr8888
+*                      pointer to the output ABGR Line
+*                R12: uint32_t  length
+*                      width of Line
+*--------------------------------------------------------------------------
+* STACK ARG    : None
+*--------------------------------------------------------------------------
+* REG OUTPUT   : None
+*--------------------------------------------------------------------------
+* MEM INPUT    : p_y      - a line of Y pixels
+*                p_cr     - a line of Cr pixels
+*                p_cb     - a line of Cb pixels
+*                length   - the width of the input line
+*--------------------------------------------------------------------------
+* MEM OUTPUT   : p_abgr8888 - the converted ABGR pixels
+*--------------------------------------------------------------------------
+* REG AFFECTED : ARM:  R0-R4, R12
+*                NEON: Q0-Q15
+*--------------------------------------------------------------------------
+* STACK USAGE  : none
+*--------------------------------------------------------------------------
+* CYCLES       : none
+*
+*--------------------------------------------------------------------------
+* NOTES        :
+*--------------------------------------------------------------------------
+*/
+.type yvup2abgr8888_venum, %function
+yvup2abgr8888_venum:
+    /*-------------------------------------------------------------------------
+     *  Store stack registers
+     * ------------------------------------------------------------------------ */
+    STMFD SP!, {LR}
+
+    VPUSH {D8-D15}
+
+    PLD [R0, R3]                       @ preload luma line
+
+    ADR   R12, constants
+
+    VLD1.S16  {D6, D7}, [R12]!         @ D6, D7: 359 |  -88 | -183 | 454 | 256 | 0 | 255 | 0
+    VLD1.S32  {D30, D31}, [R12]        @ Q15   :  -45824    |    34816   |  -57984 |     X
+
+    /*-------------------------------------------------------------------------
+     *  Load the 5th parameter via stack
+     *  R0 ~ R3 are used to pass the first 4 parameters, the 5th and above
+     *  parameters are passed via stack
+     * ------------------------------------------------------------------------ */
+    LDR R12, [SP, #68]                 @ LR is pushed into the stack so SP is
+                                       @ decreased by 4,
+                                       @ D8-D15 are also pushed into the stack
+                                       @ so SP is decreased by
+                                       @ 8-byte/D-Register * 8 D-Registers = 64,
+                                       @ so SP needs to be increased by 64+4=68
+                                       @ to get the value that was first pushed
+                                       @ into stack (the 5th parameter passed in
+                                       @ throught stack)
+
+    /*-------------------------------------------------------------------------
+     *  Load clamping parameters to duplicate vector elements
+     * ------------------------------------------------------------------------ */
+    VDUP.S16  Q4,  D7[1]               @ Q4:  0  |  0  |  0  |  0  |  0  |  0  |  0  |  0
+    VDUP.S16  Q5,  D7[2]               @ Q5: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255
+
+    /*-------------------------------------------------------------------------
+     *  Read bias
+     * ------------------------------------------------------------------------ */
+    VDUP.S32  Q0,   D30[0]             @ Q0:  -45824 | -45824 | -45824 | -45824
+    VDUP.S32  Q1,   D30[1]             @ Q1:   34816 |  34816 |  34816 |  34816
+    VDUP.S32  Q2,   D31[0]             @ Q2:  -70688 | -70688 | -70688 | -70688
+
+
+    /*-------------------------------------------------------------------------
+     *  The main loop
+     * ------------------------------------------------------------------------ */
+loop_yvup2abgr:
+
+    /*-------------------------------------------------------------------------
+     *  Load input from Y, V and U
+     *  D12  : Y0  Y1  Y2  Y3  Y4  Y5  Y6  Y7
+     *  D14  : V0  V1  V2  V3  V4  V5  V6  V7
+     *  D15  : U0  U1  U2  U3  U4  U5  U6  U7
+     * ------------------------------------------------------------------------ */
+    VLD1.U8  {D12},  [p_y]!            @ Load 8 Luma elements (uint8) to D12
+    VLD1.U8  {D14},  [p_cr]!           @ Load 8 Cr elements (uint8) to D14
+    VLD1.U8  {D15},  [p_cb]!           @ Load 8 Cb elements (uint8) to D15
+
+    /*-------------------------------------------------------------------------
+     *  Expand uint8 value to uint16
+     *  D18, D19: Y0 Y1 Y2 Y3 Y4 Y5 Y6 Y7
+     *  D20, D21: V0 V1 V2 V3 V4 V5 V6 V7
+     *  D22, D23: U0 U1 U2 U3 U4 U5 U6 U7
+     * ------------------------------------------------------------------------ */
+    VMOVL.U8 Q9,  D12
+    VMOVL.U8 Q10, D14
+    VMOVL.U8 Q11, D15
+
+    /*-------------------------------------------------------------------------
+     *  Multiply contribution from chrominance, results are in 32-bit
+     * ------------------------------------------------------------------------ */
+    VMULL.S16  Q12, D20, D6[0]         @ Q12:  359*(V0,V1,V2,V3)     Red
+    VMULL.S16  Q13, D22, D6[1]         @ Q13:  -88*(U0,U1,U2,U3)     Green
+    VMLAL.S16  Q13, D20, D6[2]         @ Q13:  -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3)
+    VMULL.S16  Q14, D22, D6[3]         @ Q14:  454*(U0,U1,U2,U3)     Blue
+
+    /*-------------------------------------------------------------------------
+     *  Add bias
+     * ------------------------------------------------------------------------ */
+    VADD.S32  Q12, Q0                  @ Q12 add Red   bias -45824
+    VADD.S32  Q13, Q1                  @ Q13 add Green bias  34816
+    VADD.S32  Q14, Q2                  @ Q14 add Blue  bias -57984
+
+    /*-------------------------------------------------------------------------
+     *  Calculate Red, Green, Blue
+     * ------------------------------------------------------------------------ */
+    VMLAL.S16  Q12, D18, D7[0]         @ Q12: R0, R1, R2, R3 in 32-bit Q8 format
+    VMLAL.S16  Q13, D18, D7[0]         @ Q13: G0, G1, G2, G3 in 32-bit Q8 format
+    VMLAL.S16  Q14, D18, D7[0]         @ Q14: B0, B1, B2, B3 in 32-bit Q8 format
+
+    /*-------------------------------------------------------------------------
+     *  Right shift eight bits with rounding
+     * ------------------------------------------------------------------------ */
+    VSHRN.S32   D18 , Q12, #8          @ D18: R0, R1, R2, R3 in 16-bit Q0 format
+    VSHRN.S32   D20 , Q13, #8          @ D20: G0, G1, G2, G3 in 16-bit Q0 format
+    VSHRN.S32   D22,  Q14, #8          @ D22: B0, B1, B2, B3 in 16-bit Q0 format
+
+    /*-------------------------------------------------------------------------
+     *  Done with the first 4 elements, continue on the next 4 elements
+     * ------------------------------------------------------------------------ */
+
+    /*-------------------------------------------------------------------------
+     *  Multiply contribution from chrominance, results are in 32-bit
+     * ------------------------------------------------------------------------ */
+    VMULL.S16  Q12, D21, D6[0]         @ Q12:  359*(V0,V1,V2,V3)     Red
+    VMULL.S16  Q13, D23, D6[1]         @ Q13: -88*(U0,U1,U2,U3)     Green
+    VMLAL.S16  Q13, D21, D6[2]         @ Q13: -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3)
+    VMULL.S16  Q14, D23, D6[3]         @ Q14:  454*(U0,U1,U2,U3)     Blue
+
+    /*-------------------------------------------------------------------------
+     *  Add bias
+     * ------------------------------------------------------------------------ */
+    VADD.S32  Q12, Q0                  @ Q12 add Red   bias -45824
+    VADD.S32  Q13, Q1                  @ Q13 add Green bias  34816
+    VADD.S32  Q14, Q2                  @ Q14 add Blue  bias -57984
+
+    /*-------------------------------------------------------------------------
+     *  Calculate Red, Green, Blue
+     * ------------------------------------------------------------------------ */
+    VMLAL.S16  Q12, D19, D7[0]         @ Q12: R0, R1, R2, R3 in 32-bit Q8 format
+    VMLAL.S16  Q13, D19, D7[0]         @ Q13: G0, G1, G2, G3 in 32-bit Q8 format
+    VMLAL.S16  Q14, D19, D7[0]         @ Q14: B0, B1, B2, B3 in 32-bit Q8 format
+
+    /*-------------------------------------------------------------------------
+     *  Right shift eight bits with rounding
+     * ------------------------------------------------------------------------ */
+    VSHRN.S32   D19 , Q12, #8          @ D18: R0, R1, R2, R3 in 16-bit Q0 format
+    VSHRN.S32   D21 , Q13, #8          @ D20: G0, G1, G2, G3 in 16-bit Q0 format
+    VSHRN.S32   D23,  Q14, #8          @ D22: B0, B1, B2, B3 in 16-bit Q0 format
+
+    /*-------------------------------------------------------------------------
+     *  Clamp the value to be within [0~255]
+     * ------------------------------------------------------------------------ */
+    VMAX.S16  Q11, Q11, Q4             @ if Q11 <   0, Q11 =   0
+    VMIN.S16  Q11, Q11, Q5             @ if Q11 > 255, Q11 = 255
+    VQMOVUN.S16   D28, Q11             @ store Blue to D28, narrow the value from int16 to int8
+
+    VMAX.S16  Q10, Q10, Q4             @ if Q10 <   0, Q10 =   0
+    VMIN.S16  Q10, Q10, Q5             @ if Q10 > 255, Q10 = 255
+    VQMOVUN.S16   D27, Q10             @ store Green to D27, narrow the value from int16 to int8
+
+    VMAX.S16    Q9, Q9, Q4             @ if Q9 <   0, Q9 =   0
+    VMIN.S16    Q9, Q9, Q5             @ if Q9 > 255, Q9 = 255
+    VQMOVUN.S16    D26, Q9             @ store Red to D26, narrow the value from int16 to int8
+
+    /*-------------------------------------------------------------------------
+     *  abgr format with leading 0xFF byte
+     * ------------------------------------------------------------------------ */
+    VMOVN.I16  D29, Q5                 @ D29:  255 | 255 | 255 | 255 | 255 | 255 | 255 | 255
+
+    SUBS length, length, #8            @ check if the length is less than 8
+
+    BMI  trailing_yvup2abgr            @ jump to trailing processing if remaining length is less than 8
+
+    VST4.U8  {D26,D27,D28,D29}, [p_bgr]!   @ vector store Red, Green, Blue to destination
+                                       @ Blue at LSB
+
+    BHI loop_yvup2abgr                 @ loop if more than 8 pixels left
+
+    BEQ  end_yvup2abgr                 @ done if exactly 8 pixel processed in the loop
+
+
+trailing_yvup2abgr:
+    /*-------------------------------------------------------------------------
+     *  There are from 1 ~ 7 pixels left in the trailing part.
+     *  First adding 7 to the length so the length would be from 0 ~ 6.
+     *  eg: 1 pixel left in the trailing part, so 1-8+7 = 0.
+     *  Then save 1 pixel unconditionally since at least 1 pixels left in the
+     *  trailing part.
+     * ------------------------------------------------------------------------ */
+    ADDS length, length, #7            @ there are 7 or less in the trailing part
+
+    VST4.U8 {D26[0], D27[0], D28[0], D29[0]}, [p_bgr]! @ at least 1 pixel left in the trailing part
+    BEQ  end_yvup2abgr                 @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST4.U8 {D26[1], D27[1], D28[1], D29[1]}, [p_bgr]!  @ store one more pixel
+    BEQ  end_yvup2abgr                 @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST4.U8 {D26[2], D27[2], D28[2], D29[2]}, [p_bgr]!  @ store one more pixel
+    BEQ  end_yvup2abgr                 @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST4.U8 {D26[3], D27[3], D28[3], D29[3]}, [p_bgr]!  @ store one more pixel
+    BEQ  end_yvup2abgr                 @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST4.U8 {D26[4], D27[4], D28[4], D29[4]}, [p_bgr]!  @ store one more pixel
+    BEQ  end_yvup2abgr                 @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST4.U8 {D26[5], D27[5], D28[5], D29[5]}, [p_bgr]!  @ store one more pixel
+    BEQ  end_yvup2abgr                 @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST4.U8 {D26[6], D27[6], D28[6], D29[6]}, [p_bgr]! @ store one more pixel
+
+end_yvup2abgr:
+    VPOP  {D8-D15}
+    LDMFD SP!, {PC}
+                                       @ end of yvup2abgr
+
+/*--------------------------------------------------------------------------
+* FUNCTION     : yyvup2abgr8888_venum
+*--------------------------------------------------------------------------
+* DESCRIPTION  : Perform YYVU planar to ABGR8888 conversion.
+*--------------------------------------------------------------------------
+* C PROTOTYPE  : void yyvup2abgr8888_venum(uint8_t  *p_y,
+*                                          uint8_t  *p_cr,
+*                                          uint8_t  *p_cb,
+*                                          uint8_t  *p_abgr8888,
+*                                          uint32_t  length)
+*--------------------------------------------------------------------------
+* REG INPUT    : R0: uint8_t  *p_y
+*                      pointer to the input Y Line
+*                R1: uint8_t  *p_cr
+*                      pointer to the input Cr Line
+*                R2: uint8_t  *p_cb
+*                      pointer to the input Cb Line
+*                R3: uint8_t  *p_abgr8888
+*                      pointer to the output ABGR Line
+*                R12: uint32_t  length
+*                      width of Line
+*--------------------------------------------------------------------------
+* STACK ARG    : None
+*--------------------------------------------------------------------------
+* REG OUTPUT   : None
+*--------------------------------------------------------------------------
+* MEM INPUT    : p_y      - a line of Y pixels
+*                p_cr     - a line of Cr pixels
+*                p_cb     - a line of Cb pixels
+*                length   - the width of the input line
+*--------------------------------------------------------------------------
+* MEM OUTPUT   : p_abgr8888 - the converted ABGR pixels
+*--------------------------------------------------------------------------
+* REG AFFECTED : ARM:  R0-R4, R12
+*                NEON: Q0-Q15
+*--------------------------------------------------------------------------
+* STACK USAGE  : none
+*--------------------------------------------------------------------------
+* CYCLES       : none
+*
+*--------------------------------------------------------------------------
+* NOTES        :
+*--------------------------------------------------------------------------
+*/
+.type yyvup2abgr8888_venum, %function
+yyvup2abgr8888_venum:
+    /*-------------------------------------------------------------------------
+     *  Store stack registers
+     * ------------------------------------------------------------------------ */
+    STMFD SP!, {LR}
+
+    VPUSH {D8-D15}
+
+    PLD [R0, R3]                       @ preload luma line
+
+    ADR   R12, constants
+
+    VLD1.S16  {D6, D7}, [R12]!         @ D6, D7: 359 |  -88 | -183 | 454 | 256 | 0 | 255 | 0
+    VLD1.S32  {D30, D31}, [R12]        @ Q15   :  -45824    |    34816   |  -57984 |     X
+
+    /*-------------------------------------------------------------------------
+     *  Load the 5th parameter via stack
+     *  R0 ~ R3 are used to pass the first 4 parameters, the 5th and above
+     *  parameters are passed via stack
+     * ------------------------------------------------------------------------ */
+    LDR R12, [SP, #68]                 @ LR is pushed into the stack so SP is
+                                       @ decreased by 4,
+                                       @ D8-D15 are also pushed into the stack
+                                       @ so SP is decreased by
+                                       @ 8-byte/D-Register * 8 D-Registers = 64,
+                                       @ so SP needs to be increased by 64+4=68
+                                       @ to get the value that was first pushed
+                                       @ into stack (the 5th parameter passed in
+                                       @ throught stack)
+
+    /*-------------------------------------------------------------------------
+     *  Load clamping parameters to duplicate vector elements
+     * ------------------------------------------------------------------------ */
+    VDUP.S16  Q4,  D7[1]               @ Q4:  0  |  0  |  0  |  0  |  0  |  0  |  0  |  0
+    VDUP.S16  Q5,  D7[2]               @ Q5: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255
+
+    /*-------------------------------------------------------------------------
+     *  Read bias
+     * ------------------------------------------------------------------------ */
+    VDUP.S32  Q0,   D30[0]             @ Q0:  -45824 | -45824 | -45824 | -45824
+    VDUP.S32  Q1,   D30[1]             @ Q1:   34816 |  34816 |  34816 |  34816
+    VDUP.S32  Q2,   D31[0]             @ Q2:  -70688 | -70688 | -70688 | -70688
+
+
+    /*-------------------------------------------------------------------------
+     *  The main loop
+     * ------------------------------------------------------------------------ */
+loop_yyvup2abgr:
+
+    /*-------------------------------------------------------------------------
+     *  Load input from Y, V and U
+     *  D12, D13: Y0 Y2 Y4 Y6 Y8 Y10 Y12 Y14, Y1 Y3 Y5 Y7 Y9 Y11 Y13 Y15
+     *  D14  : V0  V1  V2  V3  V4  V5  V6  V7
+     *  D15  : U0  U1  U2  U3  U4  U5  U6  U7
+     * ------------------------------------------------------------------------ */
+    VLD2.U8  {D12,D13}, [p_y]!         @ Load 16 Luma elements (uint8) to D12, D13
+    VLD1.U8  {D14},  [p_cr]!           @ Load 8 Cr elements (uint8) to D14
+    VLD1.U8  {D15},  [p_cb]!           @ Load 8 Cb elements (uint8) to D15
+
+    /*-------------------------------------------------------------------------
+     *  Expand uint8 value to uint16
+     *  D24, D25: Y0 Y2 Y4 Y6 Y8 Y10 Y12 Y14
+     *  D26, D27: Y1 Y3 Y5 Y7 Y9 Y11 Y13 Y15
+     *  D28, D29: V0 V1 V2 V3 V4 V5  V6  V7
+     *  D30, D31: U0 U1 U2 U3 U4 U5  U6  U7
+     * ------------------------------------------------------------------------ */
+    VMOVL.U8 Q12, D12
+    VMOVL.U8 Q13, D13
+    VMOVL.U8 Q14, D14
+    VMOVL.U8 Q15, D15
+
+    /*-------------------------------------------------------------------------
+     *  Multiply contribution from chrominance, results are in 32-bit
+     * ------------------------------------------------------------------------ */
+    VMULL.S16  Q6, D28, D6[0]          @ Q6:  359*(V0,V1,V2,V3)     Red
+    VMULL.S16  Q7, D30, D6[1]          @ Q7: -88*(U0,U1,U2,U3)     Green
+    VMLAL.S16  Q7, D28, D6[2]          @ Q7: -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3)
+    VMULL.S16  Q8, D30, D6[3]          @ Q8:  454*(U0,U1,U2,U3)     Blue
+
+    /*-------------------------------------------------------------------------
+     *  Add bias
+     * ------------------------------------------------------------------------ */
+    VADD.S32  Q6, Q0                   @ Q6 add Red   bias -45824
+    VADD.S32  Q7, Q1                   @ Q7 add Green bias  34816
+    VADD.S32  Q8, Q2                   @ Q8 add Blue  bias -57984
+
+    /*-------------------------------------------------------------------------
+     *  Calculate Red, Green, Blue
+     * ------------------------------------------------------------------------ */
+    VMOV.S32   Q9, Q6
+    VMLAL.S16  Q6, D24, D7[0]          @ Q6: R0, R2, R4, R6 in 32-bit Q8 format
+    VMLAL.S16  Q9, D26, D7[0]          @ Q9: R1, R3, R5, R7 in 32-bit Q8 format
+
+    VMOV.S32   Q10, Q7
+    VMLAL.S16  Q7,  D24, D7[0]         @ Q7:  G0, G2, G4, G6 in 32-bit Q8 format
+    VMLAL.S16  Q10, D26, D7[0]         @ Q10: G1, G3, G5, G7 in 32-bit Q8 format
+
+    VMOV.S32   Q11, Q8
+    VMLAL.S16  Q8,  D24, D7[0]         @ Q8:  B0, B2, B4, B6 in 32-bit Q8 format
+    VMLAL.S16  Q11, D26, D7[0]         @ Q11: B1, B3, B5, B7 in 32-bit Q8 format
+
+    /*-------------------------------------------------------------------------
+     *  Right shift eight bits with rounding
+     * ------------------------------------------------------------------------ */
+    VSHRN.S32   D12, Q6,  #8           @ D12: R0 R2 R4 R6 in 16-bit Q0 format
+    VSHRN.S32   D13, Q9,  #8           @ D13: R1 R3 R5 R7 in 16-bit Q0 format
+    VZIP.16     D12, D13               @ Q6 : R0 R1 R2 R3 R4 R5 R6 R7
+
+    VSHRN.S32   D18, Q7,  #8           @ D18: G0 G2 G4 G6 in 16-bit Q0 format
+    VSHRN.S32   D19, Q10, #8           @ D19: G1 G3 G5 G7 in 16-bit Q0 format
+    VZIP.16     D18, D19               @ Q9 : G0 G1 G2 G3 G4 G5 G6 G7
+
+    VSHRN.S32   D20, Q8,  #8           @ D20: B0 B2 B4 B6 in 16-bit Q0 format
+    VSHRN.S32   D21, Q11, #8           @ D21: B1 B3 B5 B7 in 16-bit Q0 format
+    VZIP.16     D20, D21               @ Q10: B0 B1 B2 B3 B4 B5 B6 B7
+
+    /*-------------------------------------------------------------------------
+     *  Clamp the value to be within [0~255]
+     * ------------------------------------------------------------------------ */
+    VMAX.S16  Q10, Q10, Q4             @ if Q10 <   0, Q10 =   0
+    VMIN.S16  Q10, Q10, Q5             @ if Q10 > 255, Q10 = 255
+    VQMOVUN.S16   D23, Q10             @ store Blue to D23, narrow the value from int16 to int8
+
+    VMAX.S16  Q9, Q9, Q4               @ if Q9 <   0, Q9 =   0
+    VMIN.S16  Q9, Q9, Q5               @ if Q9 > 255, Q9 = 255
+    VQMOVUN.S16  D22, Q9               @ store Green to D22, narrow the value from int16 to int8
+
+    VMAX.S16  Q6, Q6, Q4               @ if Q6 <   0, Q6 =   0
+    VMIN.S16  Q6, Q6, Q5               @ if Q6 > 255, Q6 = 255
+    VQMOVUN.S16  D21, Q6               @ store Red to D21, narrow the value from int16 to int8
+
+    /*-------------------------------------------------------------------------
+     *  abgr format with leading 0xFF byte
+     * ------------------------------------------------------------------------ */
+    VMOVN.I16  D24, Q5                 @ D24:  255 | 255 | 255 | 255 | 255 | 255 | 255 | 255
+
+    SUBS length, length, #8            @ check if the length is less than 8
+
+    BMI  trailing_yyvup2abgr           @ jump to trailing processing if remaining length is less than 8
+
+    VST4.U8  {D21,D22,D23,D24}, [p_bgr]!   @ vector store Blue, Green, Red to destination
+                                       @ Red at LSB
+
+    BEQ  end_yyvup2abgr                @ done if exactly 8 pixel processed in the loop
+
+
+    /*-------------------------------------------------------------------------
+     *  Done with the first 8 elements, continue on the next 8 elements
+     * ------------------------------------------------------------------------ */
+
+    /*-------------------------------------------------------------------------
+     *  Multiply contribution from chrominance, results are in 32-bit
+     * ------------------------------------------------------------------------ */
+    VMULL.S16  Q6, D29, D6[0]          @ Q6: 359*(V4,V5,V6,V7)       Red
+    VMULL.S16  Q7, D31, D6[1]          @ Q7: -88*(U4,U5,U6,U7)      Green
+    VMLAL.S16  Q7, D29, D6[2]          @ Q7: -88*(U4,U5,U6,U7) - 183*(V4,V5,V6,V7)
+    VMULL.S16  Q8, D31, D6[3]          @ Q8: 454*(U4,U5,U6,U7)       Blue
+
+    /*-------------------------------------------------------------------------
+     *  Add bias
+     * ------------------------------------------------------------------------ */
+    VADD.S32  Q6, Q0                   @ Q6 add Red   bias -45824
+    VADD.S32  Q7, Q1                   @ Q7 add Green bias  34816
+    VADD.S32  Q8, Q2                   @ Q8 add Blue  bias -57984
+
+    /*-------------------------------------------------------------------------
+     *  Calculate Red, Green, Blue
+     * ------------------------------------------------------------------------ */
+    VMOV.S32   Q9, Q6
+    VMLAL.S16  Q6, D25, D7[0]          @ Q6: R8 R10 R12 R14 in 32-bit Q8 format
+    VMLAL.S16  Q9, D27, D7[0]          @ Q9: R9 R11 R13 R15 in 32-bit Q8 format
+
+    VMOV.S32   Q10, Q7
+    VMLAL.S16  Q7,  D25, D7[0]         @ Q7: G0, G2, G4, G6 in 32-bit Q8 format
+    VMLAL.S16  Q10, D27, D7[0]         @ Q10 : G1, G3, G5, G7 in 32-bit Q8 format
+
+    VMOV.S32   Q11, Q8
+    VMLAL.S16  Q8,  D25, D7[0]         @ Q8: B0, B2, B4, B6 in 32-bit Q8 format
+    VMLAL.S16  Q11, D27, D7[0]         @ Q11 : B1, B3, B5, B7 in 32-bit Q8 format
+
+    /*-------------------------------------------------------------------------
+     *  Right shift eight bits with rounding
+     * ------------------------------------------------------------------------ */
+    VSHRN.S32   D12, Q6,  #8           @ D12: R8 R10 R12 R14 in 16-bit Q0 format
+    VSHRN.S32   D13, Q9,  #8           @ D13: R9 R11 R13 R15 in 16-bit Q0 format
+    VZIP.16     D12, D13               @ Q6: R8 R9 R10 R11 R12 R13 R14 R15
+
+    VSHRN.S32   D18, Q7,  #8           @ D18: G8 G10 G12 G14 in 16-bit Q0 format
+    VSHRN.S32   D19, Q10, #8           @ D19: G9 G11 G13 G15 in 16-bit Q0 format
+    VZIP.16     D18, D19               @ Q9:  G8 G9 G10 G11 G12 G13 G14 G15
+
+    VSHRN.S32   D20, Q8,  #8           @ D20: B8 B10 B12 B14 in 16-bit Q0 format
+    VSHRN.S32   D21, Q11, #8           @ D21: B9 B11 B13 B15 in 16-bit Q0 format
+    VZIP.16     D20, D21               @ Q10: B8 B9 B10 B11 B12 B13 B14 B15
+
+    /*-------------------------------------------------------------------------
+     *  Clamp the value to be within [0~255]
+     * ------------------------------------------------------------------------ */
+    VMAX.S16  Q10, Q10, Q4             @ if Q10 <   0, Q10 =   0
+    VMIN.S16  Q10, Q10, Q5             @ if Q10 > 255, Q10 = 255
+    VQMOVUN.S16   D23, Q10             @ store Blue to D23, narrow the value from int16 to int8
+
+    VMAX.S16  Q9, Q9, Q4               @ if Q9 <   0, Q9 =   0
+    VMIN.S16  Q9, Q9, Q5               @ if Q9 > 255, Q9 = 255
+    VQMOVUN.S16  D22, Q9               @ store Green to D22, narrow the value from int16 to int8
+
+    VMAX.S16  Q6, Q6, Q4               @ if Q6 <   0, Q6 =   0
+    VMIN.S16  Q6, Q6, Q5               @ if Q6 > 255, Q6 = 255
+    VQMOVUN.S16  D21, Q6               @ store Red to D21, narrow the value from int16 to int8
+
+    /*-------------------------------------------------------------------------
+     *  abgr format with leading 0xFF byte
+     * ------------------------------------------------------------------------ */
+    VMOVN.I16  D24, Q5                 @ D24:  255 | 255 | 255 | 255 | 255 | 255 | 255 | 255
+
+    SUBS length, length, #8            @ check if the length is less than 8
+
+    BMI  trailing_yyvup2abgr           @ jump to trailing processing if remaining length is less than 8
+
+    VST4.U8  {D21,D22,D23,D24}, [p_bgr]!   @ vector store Blue, Green, Red to destination
+                                       @ Red at LSB
+
+    BHI loop_yyvup2abgr                @ loop if more than 8 pixels left
+
+    BEQ  end_yyvup2abgr                @ done if exactly 8 pixel processed in the loop
+
+
+trailing_yyvup2abgr:
+    /*-------------------------------------------------------------------------
+     *  There are from 1 ~ 7 pixels left in the trailing part.
+     *  First adding 7 to the length so the length would be from 0 ~ 6.
+     *  eg: 1 pixel left in the trailing part, so 1-8+7 = 0.
+     *  Then save 1 pixel unconditionally since at least 1 pixels left in the
+     *  trailing part.
+     * ------------------------------------------------------------------------ */
+    ADDS length, length, #7            @ there are 7 or less in the trailing part
+
+    VST4.U8 {D21[0],D22[0],D23[0],D24[0]}, [p_bgr]! @ at least 1 pixel left in the trailing part
+    BEQ end_yyvup2abgr                 @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST4.U8 {D21[1],D22[1],D23[1],D24[1]}, [p_bgr]!  @ store one more pixel
+    BEQ end_yyvup2abgr                 @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST4.U8 {D21[2],D22[2],D23[2],D24[2]}, [p_bgr]!  @ store one more pixel
+    BEQ end_yyvup2abgr                 @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST4.U8 {D21[3],D22[3],D23[3],D24[3]}, [p_bgr]!  @ store one more pixel
+    BEQ end_yyvup2abgr                 @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST4.U8 {D21[4],D22[4],D23[4],D24[4]}, [p_bgr]!  @ store one more pixel
+    BEQ end_yyvup2abgr                 @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST4.U8 {D21[5],D22[5],D23[5],D24[5]}, [p_bgr]!  @ store one more pixel
+    BEQ end_yyvup2abgr                 @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST4.U8 {D21[6],D22[6],D23[6],D24[6]}, [p_bgr]!  @ store one more pixel
+
+end_yyvup2abgr:
+    VPOP  {D8-D15}
+    LDMFD SP!, {PC}
+                                       @ end of yyvup2abgr
+
+.end
+
diff --git a/libjpegtwrp/asm/armv7/jdcolor-armv7.S b/libjpegtwrp/asm/armv7/jdcolor-armv7.S
new file mode 100644
index 0000000..b2da6d5
--- /dev/null
+++ b/libjpegtwrp/asm/armv7/jdcolor-armv7.S
@@ -0,0 +1,632 @@
+/*------------------------------------------------------------------------
+* jdcolor-armv7.S
+*
+*  Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+*
+*  Redistribution and use in source and binary forms, with or without
+*  modification, are permitted provided that the following conditions are
+*  met:
+*      * Redistributions of source code must retain the above copyright
+*        notice, this list of conditions and the following disclaimer.
+*      * Redistributions in binary form must reproduce the above
+*        copyright notice, this list of conditions and the following
+*        disclaimer in the documentation and/or other materials provided
+*        with the distribution.
+*      * Neither the name of Code Aurora Forum, Inc. nor the names of its
+*        contributors may be used to endorse or promote products derived
+*        from this software without specific prior written permission.
+*
+*  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+*  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+*  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+*  ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+*  BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+*  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+*  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+*  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+*  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+*  OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+*  IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*--------------------------------------------------------------------------
+
+*--------------------------------------------------------------------------
+*                         FUNCTION LIST
+*--------------------------------------------------------------------------
+*
+* - yvup2bgr888_venum
+* - yyvup2bgr888_venum
+*
+*--------------------------------------------------------------------------
+*/
+
+    .section yvu_plain_to_bgr, "x"     @ AREA
+    .text                              @ |.text|, CODE, READONLY
+    .align 2
+    .code  32                          @ CODE32
+
+/*-----------------------------------------------------------------------------
+ *   ARM Registers
+ * ---------------------------------------------------------------------------- */
+p_y       .req r0
+p_cr      .req r1
+p_cb      .req r2
+p_rgb     .req r3
+p_bgr     .req r3
+length    .req r12
+
+    .global yvup2bgr888_venum
+    .global yyvup2bgr888_venum
+
+@ coefficients in color conversion matrix multiplication
+.equ COEFF_Y,          256             @ contribution of Y
+.equ COEFF_V_RED,      359             @ contribution of V for red
+.equ COEFF_U_GREEN,    -88             @ contribution of U for green
+.equ COEFF_V_GREEN,   -183             @ contribution of V for green
+.equ COEFF_U_BLUE,     454             @ contribution of U for blue
+
+@ Clamping constants 0x0 and 0xFF
+.equ COEFF_0,          0
+.equ COEFF_255,        255
+
+@ Bias coefficients for red, green and blue
+.equ COEFF_BIAS_R,   -45824            @ Red   bias =     -359*128 + 128
+.equ COEFF_BIAS_G,    34816            @ Green bias = (88+183)*128 + 128
+.equ COEFF_BIAS_B,   -57984            @ Blue  bias =     -454*128 + 128
+
+constants:
+    .hword (COEFF_V_RED),  (COEFF_U_GREEN), (COEFF_V_GREEN), (COEFF_U_BLUE) @   359  | -88   |  -183  | 454
+    .hword (COEFF_Y),      (COEFF_0),       (COEFF_255)    , (COEFF_0)      @   256  |   0   |   255  |  0
+    .word  (COEFF_BIAS_R), (COEFF_BIAS_G),  (COEFF_BIAS_B)                  @ -45824 | 34816 | -57984 |  X
+
+/*--------------------------------------------------------------------------
+* FUNCTION     : yvup2bgr888_venum
+*--------------------------------------------------------------------------
+* DESCRIPTION  : Perform YVU planar to BGR888 conversion.
+*--------------------------------------------------------------------------
+* C PROTOTYPE  : void yvup2bgr888_venum(uint8_t  *p_y,
+*                                       uint8_t  *p_cr,
+*                                       uint8_t  *p_cb,
+*                                       uint8_t  *p_bgr888,
+*                                       uint32_t  length)
+*--------------------------------------------------------------------------
+* REG INPUT    : R0: uint8_t  *p_y
+*                      pointer to the input Y Line
+*                R1: uint8_t  *p_cr
+*                      pointer to the input Cr Line
+*                R2: uint8_t  *p_cb
+*                      pointer to the input Cb Line
+*                R3: uint8_t  *p_bgr888
+*                      pointer to the output BGR Line
+*                R12: uint32_t  length
+*                      width of Line
+*--------------------------------------------------------------------------
+* STACK ARG    : None
+*--------------------------------------------------------------------------
+* REG OUTPUT   : None
+*--------------------------------------------------------------------------
+* MEM INPUT    : p_y      - a line of Y pixels
+*                p_cr     - a line of Cr pixels
+*                p_cb     - a line of Cb pixels
+*                length   - the width of the input line
+*--------------------------------------------------------------------------
+* MEM OUTPUT   : p_bgr888 - the converted bgr pixels
+*--------------------------------------------------------------------------
+* REG AFFECTED : ARM:  R0-R4, R12
+*                NEON: Q0-Q15
+*--------------------------------------------------------------------------
+* STACK USAGE  : none
+*--------------------------------------------------------------------------
+* CYCLES       : none
+*
+*--------------------------------------------------------------------------
+* NOTES        :
+*--------------------------------------------------------------------------
+*/
+.type yvup2bgr888_venum, %function
+yvup2bgr888_venum:
+
+    /*-------------------------------------------------------------------------
+     *  Store stack registers
+     * ------------------------------------------------------------------------ */
+    STMFD SP!, {LR}
+
+    VPUSH {D8-D15}
+
+    PLD [R0, R3]                      @ preload luma line
+
+    ADR   R12, constants
+
+    VLD1.S16  {D6, D7}, [R12]!        @ D6, D7: 359 | -88 | -183 | 454 | 256 | 0 | 255 | 0
+    VLD1.S32  {D30, D31}, [R12]       @ Q15   :  -45824   |    34816   |  -57984 |     X
+
+    /*-------------------------------------------------------------------------
+     *  Load the 5th parameter via stack
+     *  R0 ~ R3 are used to pass the first 4 parameters, the 5th and above
+     *  parameters are passed via stack
+     * ------------------------------------------------------------------------ */
+    LDR R12, [SP, #68]                 @ LR is pushed into the stack so SP is
+                                       @ decreased by 4,
+                                       @ D8-D15 are also pushed into the stack
+                                       @ so SP is decreased by
+                                       @ 8-byte/D-Register * 8 D-Registers = 64,
+                                       @ so SP needs to be increased by 64+4=68
+                                       @ to get the value that was first pushed
+                                       @ into stack (the 5th parameter passed in
+                                       @ throught stack)
+
+    /*-------------------------------------------------------------------------
+     *  Load clamping parameters to duplicate vector elements
+     * ------------------------------------------------------------------------ */
+    VDUP.S16  Q4,  D7[1]              @ Q4:  0  |  0  |  0  |  0  |  0  |  0  |  0  |  0
+    VDUP.S16  Q5,  D7[2]              @ Q5: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255
+
+    /*-------------------------------------------------------------------------
+     *  Read bias
+     * ------------------------------------------------------------------------ */
+    VDUP.S32  Q0,   D30[0]            @ Q0:  -45824 | -45824 | -45824 | -45824
+    VDUP.S32  Q1,   D30[1]            @ Q1:   34816 |  34816 |  34816 |  34816
+    VDUP.S32  Q2,   D31[0]            @ Q2:  -57984 | -57984 | -57984 | -57984
+
+
+    /*-------------------------------------------------------------------------
+     *  The main loop
+     * ------------------------------------------------------------------------ */
+loop_yvup2bgr888:
+
+    /*-------------------------------------------------------------------------
+     *  Load input from Y, V and U
+     *  D12  : Y0  Y1  Y2  Y3  Y4  Y5  Y6  Y7
+     *  D14  : V0  V1  V2  V3  V4  V5  V6  V7
+     *  D15  : U0  U1  U2  U3  U4  U5  U6  U7
+     * ------------------------------------------------------------------------ */
+    VLD1.U8  {D12},  [p_y]!           @ Load 8 Luma elements (uint8) to D12
+    VLD1.U8  {D14},  [p_cr]!          @ Load 8 Cr elements (uint8) to D14
+    VLD1.U8  {D15},  [p_cb]!          @ Load 8 Cb elements (uint8) to D15
+
+    /*-------------------------------------------------------------------------
+     *  Expand uint8 value to uint16
+     *  D18, D19: Y0 Y1 Y2 Y3 Y4 Y5 Y6 Y7
+     *  D20, D21: V0 V1 V2 V3 V4 V5 V6 V7
+     *  D22, D23: U0 U1 U2 U3 U4 U5 U6 U7
+     * ------------------------------------------------------------------------ */
+    VMOVL.U8 Q9,  D12
+    VMOVL.U8 Q10, D14
+    VMOVL.U8 Q11, D15
+
+    /*-------------------------------------------------------------------------
+     *  Multiply contribution from chrominance, results are in 32-bit
+     * ------------------------------------------------------------------------ */
+    VMULL.S16  Q12, D20, D6[0]        @ Q12:  359*(V0,V1,V2,V3)     Red
+    VMULL.S16  Q13, D22, D6[1]        @ Q13:  -88*(U0,U1,U2,U3)     Green
+    VMLAL.S16  Q13, D20, D6[2]        @ Q13:  -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3)
+    VMULL.S16  Q14, D22, D6[3]        @ Q14:  454*(U0,U1,U2,U3)     Blue
+
+    /*-------------------------------------------------------------------------
+     *  Add bias
+     * ------------------------------------------------------------------------ */
+    VADD.S32  Q12, Q0                 @ Q12 add Red   bias -45824
+    VADD.S32  Q13, Q1                 @ Q13 add Green bias  34816
+    VADD.S32  Q14, Q2                 @ Q14 add Blue  bias -57984
+
+    /*-------------------------------------------------------------------------
+     *  Calculate Red, Green, Blue
+     * ------------------------------------------------------------------------ */
+    VMLAL.S16  Q12, D18, D7[0]        @ Q12: R0, R1, R2, R3 in 32-bit Q8 format
+    VMLAL.S16  Q13, D18, D7[0]        @ Q13: G0, G1, G2, G3 in 32-bit Q8 format
+    VMLAL.S16  Q14, D18, D7[0]        @ Q14: B0, B1, B2, B3 in 32-bit Q8 format
+
+    /*-------------------------------------------------------------------------
+     *  Right shift eight bits with rounding
+     * ------------------------------------------------------------------------ */
+    VSHRN.S32   D18 , Q12, #8         @ D18: R0, R1, R2, R3 in 16-bit Q0 format
+    VSHRN.S32   D20 , Q13, #8         @ D20: G0, G1, G2, G3 in 16-bit Q0 format
+    VSHRN.S32   D22,  Q14, #8         @ D22: B0, B1, B2, B3 in 16-bit Q0 format
+
+    /*-------------------------------------------------------------------------
+     *  Done with the first 4 elements, continue on the next 4 elements
+     * ------------------------------------------------------------------------ */
+
+    /*-------------------------------------------------------------------------
+     *  Multiply contribution from chrominance, results are in 32-bit
+     * ------------------------------------------------------------------------ */
+    VMULL.S16  Q12, D21, D6[0]        @ Q12:  359*(V0,V1,V2,V3)     Red
+    VMULL.S16  Q13, D23, D6[1]        @ Q13:  -88*(U0,U1,U2,U3)     Green
+    VMLAL.S16  Q13, D21, D6[2]        @ Q13:  -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3)
+    VMULL.S16  Q14, D23, D6[3]        @ Q14:  454*(U0,U1,U2,U3)     Blue
+
+    /*-------------------------------------------------------------------------
+     *  Add bias
+     * ------------------------------------------------------------------------ */
+    VADD.S32  Q12, Q0                 @ Q12 add Red   bias -45824
+    VADD.S32  Q13, Q1                 @ Q13 add Green bias  34816
+    VADD.S32  Q14, Q2                 @ Q14 add Blue  bias -57984
+
+    /*-------------------------------------------------------------------------
+     *  Calculate Red, Green, Blue
+     * ------------------------------------------------------------------------ */
+    VMLAL.S16  Q12, D19, D7[0]        @ Q12: R0, R1, R2, R3 in 32-bit Q8 format
+    VMLAL.S16  Q13, D19, D7[0]        @ Q13: G0, G1, G2, G3 in 32-bit Q8 format
+    VMLAL.S16  Q14, D19, D7[0]        @ Q14: B0, B1, B2, B3 in 32-bit Q8 format
+
+    /*-------------------------------------------------------------------------
+     *  Right shift eight bits with rounding
+     * ------------------------------------------------------------------------ */
+    VSHRN.S32   D19 , Q12, #8         @ D18: R0, R1, R2, R3 in 16-bit Q0 format
+    VSHRN.S32   D21 , Q13, #8         @ D20: G0, G1, G2, G3 in 16-bit Q0 format
+    VSHRN.S32   D23,  Q14, #8         @ D22: B0, B1, B2, B3 in 16-bit Q0 format
+
+    /*-------------------------------------------------------------------------
+     *  Clamp the value to be within [0~255]
+     * ------------------------------------------------------------------------ */
+    VMAX.S16  Q11, Q11, Q4            @ if Q11 <   0, Q11 =   0
+    VMIN.S16  Q11, Q11, Q5            @ if Q11 > 255, Q11 = 255
+    VQMOVUN.S16   D28, Q11            @ store Blue to D28, narrow the value from int16 to int8
+
+    VMAX.S16  Q10, Q10, Q4            @ if Q10 <   0, Q10 =   0
+    VMIN.S16  Q10, Q10, Q5            @ if Q10 > 255, Q10 = 255
+    VQMOVUN.S16   D27, Q10            @ store Green to D27, narrow the value from int16 to int8
+
+    VMAX.S16    Q9, Q9, Q4            @ if Q9 <   0, Q9 =   0
+    VMIN.S16    Q9, Q9, Q5            @ if Q9 > 255, Q9 = 255
+    VQMOVUN.S16    D26, Q9            @ store Red to D26, narrow the value from int16 to int8.
+
+    SUBS length, length, #8           @ check if the length is less than 8
+
+    BMI  trailing_yvup2bgr888         @ jump to trailing processing if remaining length is less than 8
+
+    VST3.U8  {D26,D27,D28}, [p_bgr]!  @ vector store Red, Green, Blue to destination
+                                      @ Blue at LSB
+
+    BHI loop_yvup2bgr888              @ loop if more than 8 pixels left
+
+    BEQ  end_yvup2bgr888              @ done if exactly 8 pixel processed in the loop
+
+
+trailing_yvup2bgr888:
+    /*-------------------------------------------------------------------------
+     *  There are from 1 ~ 7 pixels left in the trailing part.
+     *  First adding 7 to the length so the length would be from 0 ~ 6.
+     *  eg: 1 pixel left in the trailing part, so 1-8+7 = 0.
+     *  Then save 1 pixel unconditionally since at least 1 pixels left in the
+     *  trailing part.
+     * ------------------------------------------------------------------------ */
+    ADDS length, length, #7           @ there are 7 or less in the trailing part
+
+    VST3.U8 {D26[0], D27[0], D28[0]}, [p_bgr]! @ at least 1 pixel left in the trailing part
+    BEQ  end_yvup2bgr888                       @ done if 0 pixel left
+
+    SUBS length, length, #1           @ update length counter
+    VST3.U8 {D26[1], D27[1], D28[1]}, [p_bgr]!  @ store one more pixel
+    BEQ  end_yvup2bgr888                        @ done if 0 pixel left
+
+    SUBS length, length, #1           @ update length counter
+    VST3.U8 {D26[2], D27[2], D28[2]}, [p_bgr]!  @ store one more pixel
+    BEQ  end_yvup2bgr888                        @ done if 0 pixel left
+
+    SUBS length, length, #1           @ update length counter
+    VST3.U8 {D26[3], D27[3], D28[3]}, [p_bgr]!  @ store one more pixel
+    BEQ  end_yvup2bgr888                        @ done if 0 pixel left
+
+    SUBS length, length, #1           @ update length counter
+    VST3.U8 {D26[4], D27[4], D28[4]}, [p_bgr]!  @ store one more pixel
+    BEQ  end_yvup2bgr888                        @ done if 0 pixel left
+
+    SUBS length, length, #1           @ update length counter
+    VST3.U8 {D26[5], D27[5], D28[5]}, [p_bgr]!  @ store one more pixel
+    BEQ  end_yvup2bgr888                        @ done if 0 pixel left
+
+    SUBS length, length, #1           @ update length counter
+    VST3.U8 {D26[6], D27[6], D28[6]}, [p_bgr]!  @ store one more pixel
+
+end_yvup2bgr888:
+    VPOP  {D8-D15}
+    LDMFD SP!, {PC}
+
+                                      @ end of yvup2bgr888
+
+
+/*-------------------------------------------------------------------------
+* FUNCTION     : yyvup2bgr888_venum
+*--------------------------------------------------------------------------
+* DESCRIPTION  : Perform YYVU planar to BGR888 conversion.
+*--------------------------------------------------------------------------
+* C PROTOTYPE  : void yyvup2bgr888_venum(uint8_t  *p_y,
+*                                        uint8_t  *p_cr,
+*                                        uint8_t  *p_cb,
+*                                        uint8_t  *p_bgr888,
+*                                        uint32_t  length)
+*--------------------------------------------------------------------------
+* REG INPUT    : R0: uint8_t  *p_y
+*                      pointer to the input Y Line
+*                R1: uint8_t  *p_cr
+*                      pointer to the input Cr Line
+*                R2: uint8_t  *p_cb
+*                      pointer to the input Cb Line
+*                R3: uint8_t  *p_bgr888
+*                      pointer to the output BGR Line
+*                R12: uint32_t  length
+*                      width of Line
+*--------------------------------------------------------------------------
+* STACK ARG    : None
+*--------------------------------------------------------------------------
+* REG OUTPUT   : None
+*--------------------------------------------------------------------------
+* MEM INPUT    : p_y      - a line of Y pixels
+*                p_cr     - a line of Cr pixels
+*                p_cb     - a line of Cb pixels
+*                length   - the width of the input line
+*--------------------------------------------------------------------------
+* MEM OUTPUT   : p_bgr888 - the converted bgr pixels
+*--------------------------------------------------------------------------
+* REG AFFECTED : ARM:  R0-R4, R12
+*                NEON: Q0-Q15
+*--------------------------------------------------------------------------
+* STACK USAGE  : none
+*--------------------------------------------------------------------------
+* CYCLES       : none
+*
+*--------------------------------------------------------------------------
+* NOTES        :
+*--------------------------------------------------------------------------
+*/
+.type yyvup2bgr888_venum, %function
+yyvup2bgr888_venum:
+    /*-------------------------------------------------------------------------
+     *  Store stack registers
+     * ------------------------------------------------------------------------ */
+    STMFD SP!, {LR}
+
+    VPUSH {D8-D15}
+
+    PLD [R0, R3]                       @ preload luma line
+
+    ADR   R12, constants
+
+    VLD1.S16  {D6, D7}, [R12]!         @ D6, D7: 359 | -88 | -183 | 454 | 256 | 0 | 255 | 0
+    VLD1.S32  {D30, D31}, [R12]        @ Q15   :  -45824   |    34816   |  -57984 |     X
+
+    /*-------------------------------------------------------------------------
+     *  Load the 5th parameter via stack
+     *  R0 ~ R3 are used to pass the first 4 parameters, the 5th and above
+     *  parameters are passed via stack
+     * ------------------------------------------------------------------------ */
+    LDR R12, [SP, #68]                 @ LR is pushed into the stack so SP is
+                                       @ decreased by 4,
+                                       @ D8-D15 are also pushed into the stack
+                                       @ so SP is decreased by
+                                       @ 8-byte/D-Register * 8 D-Registers = 64,
+                                       @ so SP needs to be increased by 64+4=68
+                                       @ to get the value that was first pushed
+                                       @ into stack (the 5th parameter passed in
+                                       @ throught stack)
+
+    /*-------------------------------------------------------------------------
+     *  Load clamping parameters to duplicate vector elements
+     * ------------------------------------------------------------------------ */
+    VDUP.S16  Q4,  D7[1]               @ Q4:  0  |  0  |  0  |  0  |  0  |  0  |  0  |  0
+    VDUP.S16  Q5,  D7[2]               @ Q5: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255
+
+    /*-------------------------------------------------------------------------
+     *  Read bias
+     * ------------------------------------------------------------------------ */
+    VDUP.S32  Q0,   D30[0]             @ Q0:  -45824 | -45824 | -45824 | -45824
+    VDUP.S32  Q1,   D30[1]             @ Q1:   34816 |  34816 |  34816 |  34816
+    VDUP.S32  Q2,   D31[0]             @ Q2:  -70688 | -70688 | -70688 | -70688
+
+
+    /*-------------------------------------------------------------------------
+     *  The main loop
+     * ------------------------------------------------------------------------ */
+loop_yyvup2bgr888:
+
+    /*-------------------------------------------------------------------------
+     *  Load input from Y, V and U
+     *  D12, D13: Y0 Y2 Y4 Y6 Y8 Y10 Y12 Y14, Y1 Y3 Y5 Y7 Y9 Y11 Y13 Y15
+     *  D14  : V0  V1  V2  V3  V4  V5  V6  V7
+     *  D15  : U0  U1  U2  U3  U4  U5  U6  U7
+     * ------------------------------------------------------------------------ */
+    VLD2.U8  {D12,D13}, [p_y]!         @ Load 16 Luma elements (uint8) to D12, D13
+    VLD1.U8  {D14},  [p_cr]!           @ Load 8 Cr elements (uint8) to D14
+    VLD1.U8  {D15},  [p_cb]!           @ Load 8 Cb elements (uint8) to D15
+
+    /*-------------------------------------------------------------------------
+     *  Expand uint8 value to uint16
+     *  D24, D25: Y0 Y2 Y4 Y6 Y8 Y10 Y12 Y14
+     *  D26, D27: Y1 Y3 Y5 Y7 Y9 Y11 Y13 Y15
+     *  D28, D29: V0 V1 V2 V3 V4 V5  V6  V7
+     *  D30, D31: U0 U1 U2 U3 U4 U5  U6  U7
+     * ------------------------------------------------------------------------ */
+    VMOVL.U8 Q12, D12
+    VMOVL.U8 Q13, D13
+    VMOVL.U8 Q14, D14
+    VMOVL.U8 Q15, D15
+
+    /*-------------------------------------------------------------------------
+     *  Multiply contribution from chrominance, results are in 32-bit
+     * ------------------------------------------------------------------------ */
+    VMULL.S16  Q6, D28, D6[0]          @ Q6:  359*(V0,V1,V2,V3)     Red
+    VMULL.S16  Q7, D30, D6[1]          @ Q7: -88*(U0,U1,U2,U3)     Green
+    VMLAL.S16  Q7, D28, D6[2]          @ q7: -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3)
+    VMULL.S16  Q8, D30, D6[3]          @ q8:  454*(U0,U1,U2,U3)     Blue
+
+    /*-------------------------------------------------------------------------
+     *  Add bias
+     * ------------------------------------------------------------------------ */
+    VADD.S32  Q6, Q0                   @ Q6 add Red   bias -45824
+    VADD.S32  Q7, Q1                   @ Q7 add Green bias  34816
+    VADD.S32  Q8, Q2                   @ Q8 add Blue  bias -57984
+
+    /*-------------------------------------------------------------------------
+     *  Calculate Red, Green, Blue
+     * ------------------------------------------------------------------------ */
+    VMOV.S32   Q9, Q6
+    VMLAL.S16  Q6, D24, D7[0]          @ Q6: R0, R2, R4, R6 in 32-bit Q8 format
+    VMLAL.S16  Q9, D26, D7[0]          @ Q9: R1, R3, R5, R7 in 32-bit Q8 format
+
+    VMOV.S32   Q10, Q7
+    VMLAL.S16  Q7,  D24, D7[0]         @ Q7:  G0, G2, G4, G6 in 32-bit Q8 format
+    VMLAL.S16  Q10, D26, D7[0]         @ Q10: G1, G3, G5, G7 in 32-bit Q8 format
+
+    VMOV.S32   Q11, Q8
+    VMLAL.S16  Q8,  D24, D7[0]         @ Q8:  B0, B2, B4, B6 in 32-bit Q8 format
+    VMLAL.S16  Q11, D26, D7[0]         @ Q11: B1, B3, B5, B7 in 32-bit Q8 format
+
+    /*-------------------------------------------------------------------------
+     *  Right shift eight bits with rounding
+     * ------------------------------------------------------------------------ */
+    VSHRN.S32   D12, Q6,  #8           @ D12: R0 R2 R4 R6 in 16-bit Q0 format
+    VSHRN.S32   D13, Q9,  #8           @ D13: R1 R3 R5 R7 in 16-bit Q0 format
+    VZIP.16     D12, D13               @ Q6 : R0 R1 R2 R3 R4 R5 R6 R7
+
+    VSHRN.S32   D18, Q7,  #8           @ D18: G0 G2 G4 G6 in 16-bit Q0 format
+    VSHRN.S32   D19, Q10, #8           @ D19: G1 G3 G5 G7 in 16-bit Q0 format
+    VZIP.16     D18, D19               @ Q9 : G0 G1 G2 G3 G4 G5 G6 G7
+
+    VSHRN.S32   D20, Q8,  #8           @ D20: B0 B2 B4 B6 in 16-bit Q0 format
+    VSHRN.S32   D21, Q11, #8           @ D21: B1 B3 B5 B7 in 16-bit Q0 format
+    VZIP.16     D20, D21               @ Q10: B0 B1 B2 B3 B4 B5 B6 B7
+
+    /*-------------------------------------------------------------------------
+     *  Clamp the value to be within [0~255]
+     * ------------------------------------------------------------------------ */
+    VMAX.S16  Q10, Q10, Q4             @ if Q10 <   0, Q10 =   0
+    VMIN.S16  Q10, Q10, Q5             @ if Q10 > 255, Q10 = 255
+    VQMOVUN.S16   D23, Q10             @ store Blue to D23, narrow the value from int16 to int8
+
+    VMAX.S16  Q9, Q9, Q4               @ if Q9 <   0, Q9 =   0
+    VMIN.S16  Q9, Q9, Q5               @ if Q9 > 255, Q9 = 255
+    VQMOVUN.S16  D22, Q9               @ store Green to D22, narrow the value from int16 to int8
+
+    VMAX.S16  Q6, Q6, Q4               @ if Q6 <   0, Q6 =   0
+    VMIN.S16  Q6, Q6, Q5               @ if Q6 > 255, Q6 = 255
+    VQMOVUN.S16  D21, Q6               @ store Red to D21, narrow the value from int16 to int8
+
+    SUBS length, length, #8            @ check if the length is less than 8
+
+    BMI  trailing_yyvup2bgr888         @ jump to trailing processing if remaining length is less than 8
+
+    VST3.U8  {D21,D22,D23}, [p_bgr]!   @ vector store Blue, Green, Red to destination
+                                       @ Red at LSB
+
+    BEQ  end_yyvup2bgr888              @ done if exactly 8 pixel processed in the loop
+
+    /*-------------------------------------------------------------------------
+     *  Done with the first 8 elements, continue on the next 8 elements
+     * ------------------------------------------------------------------------ */
+
+    /*-------------------------------------------------------------------------
+     *  Multiply contribution from chrominance, results are in 32-bit
+     * ------------------------------------------------------------------------ */
+    VMULL.S16  Q6, D29, D6[0]          @ Q6: 359*(V4,V5,V6,V7)       Red
+    VMULL.S16  Q7, D31, D6[1]          @ Q7: -88*(U4,U5,U6,U7)      Green
+    VMLAL.S16  Q7, D29, D6[2]          @ Q7: -88*(U4,U5,U6,U7) - 183*(V4,V5,V6,V7)
+    VMULL.S16  Q8, D31, D6[3]          @ Q8: 454*(U4,U5,U6,U7)       Blue
+
+    /*-------------------------------------------------------------------------
+     *  Add bias
+     * ------------------------------------------------------------------------ */
+    VADD.S32  Q6, Q0                   @ Q6 add Red   bias -45824
+    VADD.S32  Q7, Q1                   @ Q7 add Green bias  34816
+    VADD.S32  Q8, Q2                   @ Q8 add Blue  bias -70688
+
+    /*-------------------------------------------------------------------------
+     *  Calculate Red, Green, Blue
+     * ------------------------------------------------------------------------ */
+    VMOV.S32   Q9, Q6
+    VMLAL.S16  Q6, D25, D7[0]          @ Q6: R8 R10 R12 R14 in 32-bit Q8 format
+    VMLAL.S16  Q9, D27, D7[0]          @ Q9: R9 R11 R13 R15 in 32-bit Q8 format
+
+    VMOV.S32   Q10, Q7
+    VMLAL.S16  Q7,  D25, D7[0]         @ Q7: G0, G2, G4, G6 in 32-bit Q8 format
+    VMLAL.S16  Q10, D27, D7[0]         @ Q10 : G1, G3, G5, G7 in 32-bit Q8 format
+
+    VMOV.S32   Q11, Q8
+    VMLAL.S16  Q8,  D25, D7[0]         @ Q8: B0, B2, B4, B6 in 32-bit Q8 format
+    VMLAL.S16  Q11, D27, D7[0]         @ Q11 : B1, B3, B5, B7 in 32-bit Q8 format
+
+    /*-------------------------------------------------------------------------
+     *  Right shift eight bits with rounding
+     * ------------------------------------------------------------------------ */
+    VSHRN.S32   D12, Q6,  #8           @ D12: R8 R10 R12 R14 in 16-bit Q0 format
+    VSHRN.S32   D13, Q9,  #8           @ D13: R9 R11 R13 R15 in 16-bit Q0 format
+    VZIP.16     D12, D13               @ Q6: R8 R9 R10 R11 R12 R13 R14 R15
+
+    VSHRN.S32   D18, Q7,  #8           @ D18: G8 G10 G12 G14 in 16-bit Q0 format
+    VSHRN.S32   D19, Q10, #8           @ D19: G9 G11 G13 G15 in 16-bit Q0 format
+    VZIP.16     D18, D19               @ Q9:  G8 G9 G10 G11 G12 G13 G14 G15
+
+    VSHRN.S32   D20, Q8,  #8           @ D20: B8 B10 B12 B14 in 16-bit Q0 format
+    VSHRN.S32   D21, Q11, #8           @ D21: B9 B11 B13 B15 in 16-bit Q0 format
+    VZIP.16     D20, D21               @ Q10: B8 B9 B10 B11 B12 B13 B14 B15
+
+    /*-------------------------------------------------------------------------
+     *  Clamp the value to be within [0~255]
+     * ------------------------------------------------------------------------ */
+    VMAX.S16  Q10, Q10, Q4             @ if Q10 <   0, Q10 =   0
+    VMIN.S16  Q10, Q10, Q5             @ if Q10 > 255, Q10 = 255
+    VQMOVUN.S16   D23, Q10             @ store Blue to D23, narrow the value from int16 to int8
+
+    VMAX.S16  Q9, Q9, Q4               @ if Q9 <   0, Q9 =   0
+    VMIN.S16  Q9, Q9, Q5               @ if Q9 > 255, Q9 = 255
+    VQMOVUN.S16  D22, Q9               @ store Green to D22, narrow the value from int16 to int8
+
+    VMAX.S16  Q6, Q6, Q4               @ if Q6 <   0, Q6 =   0
+    VMIN.S16  Q6, Q6, Q5               @ if Q6 > 255, Q6 = 255
+    VQMOVUN.S16  D21, Q6               @ store Red to D21, narrow the value from int16 to int8
+
+
+    SUBS length, length, #8            @ check if the length is less than 8
+
+    BMI  trailing_yyvup2bgr888         @ jump to trailing processing if remaining length is less than 8
+
+    VST3.U8  {D21,D22,D23}, [p_bgr]!   @ vector store Blue, Green, Red to destination
+                                       @ Red at LSB
+
+    BHI loop_yyvup2bgr888              @ loop if more than 8 pixels left
+
+    BEQ  end_yyvup2bgr888              @ done if exactly 8 pixel processed in the loop
+
+
+trailing_yyvup2bgr888:
+    /*-------------------------------------------------------------------------
+     *  There are from 1 ~ 7 pixels left in the trailing part.
+     *  First adding 7 to the length so the length would be from 0 ~ 6.
+     *  eg: 1 pixel left in the trailing part, so 1-8+7 = 0.
+     *  Then save 1 pixel unconditionally since at least 1 pixels left in the
+     *  trailing part.
+     * ------------------------------------------------------------------------ */
+    ADDS length, length, #7            @ there are 7 or less in the trailing part
+
+    VST3.U8 {D21[0],D22[0],D23[0]}, [p_bgr]! @ at least 1 pixel left in the trailing part
+    BEQ end_yyvup2bgr888               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST3.U8 {D21[1],D22[1],D23[1]}, [p_bgr]!  @ store one more pixel
+    BEQ end_yyvup2bgr888               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST3.U8 {D21[2],D22[2],D23[2]}, [p_bgr]!  @ store one more pixel
+    BEQ end_yyvup2bgr888               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST3.U8 {D21[3],D22[3],D23[3]}, [p_bgr]!  @ store one more pixel
+    BEQ end_yyvup2bgr888               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST3.U8 {D21[4],D22[4],D23[4]}, [p_bgr]!  @ store one more pixel
+    BEQ end_yyvup2bgr888               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST3.U8 {D21[5],D22[5],D23[5]}, [p_bgr]!  @ store one more pixel
+    BEQ end_yyvup2bgr888               @ done if 0 pixel left
+
+    SUBS length, length, #1            @ update length counter
+    VST3.U8 {D21[6],D22[6],D23[6]}, [p_bgr]!  @ store one more pixel
+
+end_yyvup2bgr888:
+    VPOP  {D8-D15}
+    LDMFD SP!, {PC}
+
+                                       @ end of yyvup2bgr888
+
+.end
diff --git a/libjpegtwrp/asm/armv7/jdidct-armv7.S b/libjpegtwrp/asm/armv7/jdidct-armv7.S
new file mode 100644
index 0000000..d61e219
--- /dev/null
+++ b/libjpegtwrp/asm/armv7/jdidct-armv7.S
@@ -0,0 +1,762 @@
+/*=========================================================================
+* jdidct-armv7.s
+*
+*  Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+*
+*  Redistribution and use in source and binary forms, with or without
+*  modification, are permitted provided that the following conditions are
+*  met:
+*      * Redistributions of source code must retain the above copyright
+*        notice, this list of conditions and the following disclaimer.
+*      * Redistributions in binary form must reproduce the above
+*        copyright notice, this list of conditions and the following
+*        disclaimer in the documentation and/or other materials provided
+*        with the distribution.
+*      * Neither the name of Code Aurora Forum, Inc. nor the names of its
+*        contributors may be used to endorse or promote products derived
+*        from this software without specific prior written permission.
+*
+*  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+*  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+*  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+*  ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+*  BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+*  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+*  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+*  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+*  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+*  OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+*  IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*==========================================================================
+
+*==========================================================================
+*                         FUNCTION LIST
+*--------------------------------------------------------------------------
+* - idct_1x1_venum
+* - idct_2x2_venum
+* - idct_4x4_venum
+* - idct_8x8_venum
+*
+*==========================================================================
+*/
+
+@==========================================================================
+@ MACRO DEFINITION
+@==========================================================================
+    .macro Transpose8x8
+        @==================================================================
+        @ Transpose an 8 x 8 x 16 bit matrix in place
+        @ Input: q8 to q15
+        @ Output: q8 to q15
+        @ Registers used: q8 to q15
+        @ Assumptions: 8 x 8 x 16 bit data
+        @==================================================================
+
+        vswp d17, d24                  @q8, q12
+        vswp d23, d30                  @q11, q15
+        vswp d21, d28                  @q10, q14
+        vswp d19, d26                  @q9, q13
+
+        vtrn.32 q8,  q10
+        vtrn.32 q9,  q11
+        vtrn.32 q12, q14
+        vtrn.32 q13, q15
+
+        vtrn.16 q8,  q9
+        vtrn.16 q10, q11
+        vtrn.16 q12, q13
+        vtrn.16 q14, q15
+    .endm
+
+    .macro IDCT1D
+        @==================================================================
+        @ One dimensional 64 element inverse DCT
+        @ Input: q8 to q15 loaded with data
+        @        q0 loaded with constants
+        @ Output: q8 to q15
+        @ Registers used: q0, q4 to q15
+        @ Assumptions: 16 bit data, first elements in least significant
+        @ halfwords
+        @==================================================================
+
+        @1st stage
+        vqrdmulh.s16 q4,  q15, d0[2]   @q4 = a1*vx7
+        vqrdmulh.s16 q5,  q9,  d0[2]   @q5 = a1*vx1
+        vqrdmulh.s16 q6,  q13, d0[3]   @q6 = a2*vx5
+        vqrdmulh.s16 q7,  q11, d1[1]   @q7 = ma2*vx3
+        vqrdmulh.s16 q2,  q14, d0[1]   @q6 = a0*vx6
+        vqrdmulh.s16 q3,  q10, d0[1]   @q7 = a0*vx2
+        vqadd.s16   q9,  q4,  q9       @q9 = t1 = a1*vx7 + vx1
+        vqsub.s16   q5,  q5,  q15      @q5 = t8 = a1*vx1 - vx7
+        vqadd.s16   q15, q6,  q11      @q15 = t7 = a2*vx5 + vx3
+        vqadd.s16   q11, q7,  q13      @q11 = t3 = ma2*vx3 + vx5
+
+        @2nd stage
+        vqadd.s16   q13, q8,  q12      @q13 = t5 = vx0 + vx4
+        vqsub.s16   q8,  q8,  q12      @q8 = t0 = vx0 - vx4
+        vqadd.s16   q10, q2,  q10      @q10 = t2 = a0*vx6 + vx2
+        vqsub.s16   q12, q3,  q14      @q12 = t4 = a0*vx2 - vx6
+        vqadd.s16   q14, q5,  q11      @q14 = t6 = t8 + t3
+        vqsub.s16   q11, q5,  q11      @q11 = t3 = t8 - t3
+        vqsub.s16   q5,  q9,  q15      @q5 = t8 = t1 - t7
+        vqadd.s16   q9,  q9,  q15      @q9 = t1 = t1 + t7
+
+        @3rd stage
+        vqadd.s16   q15, q13, q10      @q15 = t7 = t5 + t2
+        vqsub.s16   q10, q13, q10      @q10 = t2 = t5 - t2
+        vqadd.s16   q13, q8,  q12      @q13 = t5 = t0 + t4
+        vqsub.s16   q7,  q8,  q12      @q7 = t0 = t0 - t4
+        vqsub.s16   q12, q5,  q11      @q12 = t4 = t8 - t3
+        vqadd.s16   q11, q5,  q11      @q11 = t3 = t8 + t3
+
+        @4th stage
+        vqadd.s16   q8,  q15, q9       @q8 = vy0 = t7 + t1
+        vqsub.s16   q15, q15, q9       @q15 = vy7 = t7 - t1
+        vqrdmulh.s16 q6,  q12, d0[0]   @q6 = c4*t4
+        vqrdmulh.s16 q4,  q11, d0[0]   @q4 = c4*t3
+        vqsub.s16   q12, q10, q14      @q12 = vy4 = t2 - t6
+        vqadd.s16   q11, q10, q14      @q11 = vy3 = t2 + t6
+        vqadd.s16   q10, q7,  q6       @q10 = vy2 = t0 + c4*t4
+        vqsub.s16   q14, q13, q4       @q14 = vy6 = t5 - c4*t3
+        vqadd.s16   q9,  q13, q4       @q9 = vy1 = t5 + c4*t3
+        vqsub.s16   q13, q7,  q6       @q13 = vy5 = t0 - c4*t4
+    .endm
+
+    .macro PART1
+        @==================================================================
+        @ Load input input data from memory and shift
+        @==================================================================
+        vld1.16   {d16, d17},[r0]!     @q8 =row0
+        vqshl.s16  q8,  q8,  #4        @Input data too big?!!
+                                       @Maximum MPEG input is 2047/-2048.
+        vld1.16   {d18, d19},[r0]!     @q9 =row1
+        vqshl.s16  q9,  q9,  #4        @Shift 1 instead of 4
+
+        vld1.16   {d20, d21},[r0]!     @q10=row2
+        vqshl.s16  q10, q10, #4
+
+        vld1.16   {d22, d23},[r0]!     @q11=row3
+        vqshl.s16  q11, q11, #4
+
+        vld1.16   {d24, d25},[r0]!     @q12=row4
+        vqshl.s16  q12, q12, #4
+
+        vld1.16   {d26, d27},[r0]!     @q13=row5
+        vqshl.s16  q13, q13, #4
+        vld1.16   {d28, d29},[r0]!     @q14=row6
+        vqshl.s16  q14, q14, #4
+        vld1.16   {d30, d31},[r0]!     @q15=row7
+        vqshl.s16  q15, q15, #4
+
+        @==================================================================
+        @ refresh the constants that was clobbered last time through IDCT1D
+        @==================================================================
+        vld1.16   {d4, d5},[r7]        @q2 =constants[2]
+        vld1.16   {d6, d7},[r8]        @q3 =constants[3]
+        vld1.16   {d8, d9},[r9]        @q4 =constants[4]
+    .endm
+
+    .macro PART2
+        @==================================================================
+        @ Prescale the input
+        @==================================================================
+        vqrdmulh.s16 q12, q12, q1      @q12=row4 * constants[1] = vx4
+        vqrdmulh.s16 q15, q15, q2      @q15=row7 * constants[2] = vx7
+        vqrdmulh.s16 q9,  q9,  q2      @q9 =row1 * constants[2] = vx1
+        vqrdmulh.s16 q13, q13, q4      @q13=row5 * constants[4] = vx5
+        vqrdmulh.s16 q11, q11, q4      @q11=row3 * constants[4] = vx3
+        vqrdmulh.s16 q14, q14, q3      @q14=row6 * constants[3] = vx6
+        vqrdmulh.s16 q10, q10, q3      @q10=row2 * constants[3] = vx2
+        vqrdmulh.s16 q8,  q8,  q1      @q8 =row0 * constants[1] = vx0
+
+        @==================================================================
+        @ At thsi point, the input 8x8 x 16 bit coefficients are
+        @ transposed, prescaled, and loaded in q8 to q15
+        @ q0 loaded with scalar constants
+        @ Perform 1D IDCT
+        @==================================================================
+        IDCT1D                         @perform 1d idct
+
+        @==================================================================
+        @ Transpose the intermediate results to get read for vertical
+        @ transformation
+        @==================================================================
+        vswp d17, d24                  @q8, q12
+        vswp d23, d30                  @q11, q15
+        vswp d21, d28                  @q10, q14
+        vswp d19, d26                  @q9, q13
+
+        @==================================================================
+        @ Load the bias
+        @==================================================================
+        vdup.32 q4, d1[1]              @a cycle is saved by loading
+                                       @the bias at this point
+
+        @==================================================================
+        @ Finish the transposition
+        @==================================================================
+        vtrn.32 q8,  q10
+        vtrn.32 q9,  q11
+        vtrn.32 q12, q14
+        vtrn.32 q13, q15
+        vtrn.16 q8,  q9
+        vtrn.16 q10, q11
+        vtrn.16 q12, q13
+        vtrn.16 q14, q15
+
+        @==================================================================
+        @ Add bias
+        @==================================================================
+        vqadd.s16 q8, q8, q4
+
+        @==================================================================
+        @ IDCT 2nd half
+        @==================================================================
+        IDCT1D                         @perform 1d dct
+
+        @==================================================================
+        @ Scale and clamp the output to correct range and save to memory
+        @ 1. scale to 8bits by right shift 6
+        @ 2. clamp output to [0, 255] by min/max
+        @ 3. use multiple store. Each store will save one row of output.
+        @    The st queue size is 4, so do no more than 4 str in sequence.
+        @==================================================================
+        ldr       r5, =constants+5*16  @constants[5],
+        vld1.16   d10, [r5]            @load clamping parameters
+        vdup.s16  q6,  d10[0]          @q6=[0000000000000000]
+        vdup.s16  q7,  d10[1]          @q7=[FFFFFFFFFFFFFFFF]
+
+        @Save the results
+        vshr.s16  q8,  q8,  #6         @q8 = vy0
+        vmax.s16  q8,  q8,  q6         @clamp >0
+        vmin.s16  q8,  q8,  q7         @clamp <255
+
+        vshr.s16  q9,  q9,  #6         @q9 = vy1
+        vmax.s16  q9,  q9,  q6         @clamp >0
+        vmin.s16  q9,  q9,  q7         @clamp <255
+
+        vshr.s16  q10, q10, #6         @q10 = vy2
+        vmax.s16  q10, q10, q6         @clamp >0
+        vmin.s16  q10, q10, q7         @clamp <255
+
+        vshr.s16  q11, q11, #6         @q11 = vy3
+        vmax.s16  q11, q11, q6         @clamp >0
+        vmin.s16  q11, q11, q7         @clamp <255
+
+        vst1.16  {d16, d17},[r1],r2    @q8 =row0
+        vst1.16  {d18, d19},[r1],r2    @q9 =row1
+        vst1.16  {d20, d21},[r1],r2    @q10=row2
+        vst1.16  {d22, d23},[r1],r2    @q11=row3
+
+        vshr.s16  q12, q12, #6         @q12 = vy4
+        vmax.s16  q12, q12, q6         @clamp >0
+        vmin.s16  q12, q12, q7         @clamp <255
+
+        vshr.s16  q13, q13, #6         @q13 = vy5
+        vmax.s16  q13, q13, q6         @clamp >0
+        vmin.s16  q13, q13, q7         @clamp <255
+
+        vshr.s16  q14, q14, #6         @q14 = vy6
+        vmax.s16  q14, q14, q6         @clamp >0
+        vmin.s16  q14, q14, q7         @clamp <255
+
+        vshr.s16  q15, q15, #6         @q15 = vy7
+        vmax.s16  q15, q15, q6         @clamp >0
+        vmin.s16  q15, q15, q7         @clamp <255
+
+        vst1.16  {d24, d25},[r1],r2    @q12=row4
+        vst1.16  {d26, d27},[r1],r2    @q13=row5
+        vst1.16  {d28, d29},[r1],r2    @q14=row6
+        vst1.16  {d30, d31},[r1]       @q15=row7
+    .endm
+
+    .macro BIG_BODY_TRANSPOSE_INPUT
+        @==================================================================
+        @ Main body of idct
+        @==================================================================
+        PART1
+        Transpose8x8
+        PART2
+    .endm
+
+    .macro IDCT_ENTRY
+        @==================================================================
+        @ Load the locations of the constants
+        @==================================================================
+        ldr  r5,  =constants+0*16      @constants[0]
+        ldr  r6,  =constants+1*16      @constants[1]
+        ldr  r7,  =constants+2*16      @constants[2]
+        ldr  r8,  =constants+3*16      @constants[3]
+        ldr  r9,  =constants+4*16      @constants[4]
+
+        @==================================================================
+        @ Load the coefficients
+        @ only some input coefficients are load due to register constrain
+        @==================================================================
+        vld1.16   {d0, d1},[r5]        @q0 =constants[0] (scalars)
+        vld1.16   {d2, d3},[r6]        @q1 =constants[1]
+    .endm
+@==========================================================================
+@ END of MACRO DEFINITION
+@==========================================================================
+
+
+    .section idct_func, "x"            @ ARE
+    .text                              @ idct_func, CODE, READONLY
+    .align 2
+    .code 32                           @ CODE32
+
+@==========================================================================
+@ Main Routine
+@==========================================================================
+
+    .global idct_1x1_venum
+    .global idct_2x2_venum
+    .global idct_4x4_venum
+    .global idct_8x8_venum
+
+@==========================================================================
+@ FUNCTION     : idct_1x1_venum
+@--------------------------------------------------------------------------
+@ DISCRIPTION  : ARM optimization of one 1x1 block iDCT
+@--------------------------------------------------------------------------
+@ C PROTOTYPE  : void idct_1x1_venum(int16 * input,
+@                                    int16 * output,
+@                                    int32 stride)
+@--------------------------------------------------------------------------
+@ REG INPUT    : R0 pointer to input (int16)
+@                R1 pointer to output (int16)
+@                R2 block stride
+@--------------------------------------------------------------------------
+@ STACK ARG    : None
+@--------------------------------------------------------------------------
+@ MEM INPUT    : None
+@--------------------------------------------------------------------------
+@ REG OUTPUT   : None
+@--------------------------------------------------------------------------
+@ MEM OUTPUT   : None
+@--------------------------------------------------------------------------
+@ REG AFFECTED : R0 - R2
+@--------------------------------------------------------------------------
+@ STACK USAGE  : none
+@--------------------------------------------------------------------------
+@ CYCLES       : 17 cycles
+@--------------------------------------------------------------------------
+@ NOTES        :
+@ This idct_1x1_venum code was developed with ARM instruction set.
+@
+@ ARM REGISTER ALLOCATION
+@ =========================================================================
+@ r0  : pointer to input data
+@ r1  : pointer to output area
+@ r2  : stride in the output buffer
+@==========================================================================
+.type idct_1x1_venum, %function
+idct_1x1_venum:
+
+    ldrsh   r3, [r0]                   @ Load signed half word (int16)
+    ldr     r2, =1028                  @ 1028 = 4 + 128 << 3
+                                       @ 4 for rounding, 128 for offset
+    add     r2, r3, r2
+    asrs    r2, r2, #3                 @ Divide by 8, and set status bit
+    movmi   r2, #0                     @ Clamp to be greater than 0
+    cmp     r2, #255
+    movgt   r2, #255                   @ Clamp to be less than 255
+    str     r2, [r1]                   @ Save output
+    bx      lr                         @ Return to caller
+
+                                       @ end of idct_1x1_venum
+
+
+@==========================================================================
+@ FUNCTION     : idct_2x2_venum
+@--------------------------------------------------------------------------
+@ DISCRIPTION  : VeNum optimization of one 2x2 block iDCT
+@--------------------------------------------------------------------------
+@ C PROTOTYPE  : void idct_2x2_venum(int16 * input,
+@                                    int16 * output,
+@                                    int32 stride)
+@--------------------------------------------------------------------------
+@ REG INPUT    : R0 pointer to input (int16)
+@                R1 pointer to output (int16)
+@                R2 block stride
+@--------------------------------------------------------------------------
+@ STACK ARG    : None
+@--------------------------------------------------------------------------
+@ MEM INPUT    : None
+@--------------------------------------------------------------------------
+@ REG OUTPUT   : None
+@--------------------------------------------------------------------------
+@ MEM OUTPUT   : None
+@--------------------------------------------------------------------------
+@ REG AFFECTED : R0 - R2
+@--------------------------------------------------------------------------
+@ STACK USAGE  : none
+@--------------------------------------------------------------------------
+@ CYCLES       : 27 cycles
+@--------------------------------------------------------------------------
+@ NOTES        : Output buffer must be an 8x8 16-bit buffer
+@
+@ ARM REGISTER ALLOCATION
+@ ==========================================
+@ r0  : pointer to input data
+@ r1  : pointer to output area
+@ r2  : stride in the output buffer
+@ -------------------------------------------
+@
+@ VENUM REGISTER ALLOCATION
+@ =================================================
+@ q0     : output x0 - x4
+@ q1     : not used
+@ q2     : not used
+@ q3     : not used
+@ q4     : not used
+@ q5     : not used
+@ q6     : not used
+@ q7     : not used
+@ q8     : input y0 - y4
+@ q9     : intermediate value
+@ q10    : intermediate value
+@ q11    : offset value
+@ q12    : clamp value
+@ q13    : not used
+@ q14    : not used
+@ q15    : not used
+@==========================================================================
+.type idct_2x2_venum, %function
+idct_2x2_venum:
+
+    vld4.32    {d16, d17, d18, d19}, [r0]
+                                       @  d16: y0 | y1 | y2 | y3  (LSB | MSB)
+
+    vtrn.32    d16, d17                @  d16: y0 | y1 | X | X
+                                       @  d17: y2 | y3 | X | X
+
+    vqadd.s16  d18, d16, d17           @ d18: y0+y2 | y1+y3 | X | X   q: saturated
+    vqsub.s16  d19, d16, d17           @ d19: y0-y2 | y1-y3 | X | X   q: saturated
+
+    vtrn.16    d18, d19                @ d18: y0+y2 | y0-y2 | X | X
+                                       @ d19: y1+y3 | y1-y3 | X | X
+
+    vqadd.s16  d20, d18, d19           @ d20: (y0+y2)+(y1+y3) | (y0-y2)+(y1-y3)
+                                       @       x0 | x2 | X | X
+    vqsub.s16  d21, d18, d19           @ d21: (y0+y2)-(y1+y3) | (y0-y2)-(y1-y3)
+                                       @       x1 | x3 | X | X
+
+    vtrn.16    d20, d21                @ d20:  x0 | x1 | X | X
+                                       @ d21:  x2 | x3 | X | X
+
+    vrshr.s16  q10, q10, #3               @ Divide by 8
+
+    vmov.i16   q11, #128               @ q11 = 128|128|128|128|128|128|128|128
+    vqadd.s16  q0, q10, q11            @ Add offset to make output in [0,255]
+
+    vmov.i16   q12, #0                   @ q12 = [0000000000000000]
+    vmov.i16   q13, #255               @ q13 = [FFFFFFFFFFFFFFFF] (hex)
+
+    vmax.s16   q0, q0, q12             @ Clamp > 0
+    vmin.s16   q0, q0, q13             @ Clamp < 255
+
+    vstr       d0, [r1]                @ Store  x0 | x1 | X | X
+                                       @ Potential out of boundary issue
+    add        r1, r1, r2              @ Add the offset to the output pointer
+    vstr       d1, [r1]                @ Store  x2 | x3 | X | X
+                                       @ Potential out of boundary issue
+    bx         lr                      @ Return to caller
+
+                                       @ end of idct_2x2_venum
+
+
+@==========================================================================
+@ FUNCTION     : idct_4x4_venum
+@--------------------------------------------------------------------------
+@ DISCRIPTION  : VeNum optimization of one 4x4 block iDCT
+@--------------------------------------------------------------------------
+@ C PROTOTYPE  : void idct_4x4_venum(int16 * input,
+@                                    int16 * output,
+@                                    int32 stride)
+@--------------------------------------------------------------------------
+@ REG INPUT    : R0 pointer to input (int16)
+@                R1 pointer to output (int16)
+@                R2 block stride
+@--------------------------------------------------------------------------
+@ STACK ARG    : None
+@--------------------------------------------------------------------------
+@ MEM INPUT    : None
+@--------------------------------------------------------------------------
+@ REG OUTPUT   : None
+@--------------------------------------------------------------------------
+@ MEM OUTPUT   : None
+@--------------------------------------------------------------------------
+@ REG AFFECTED : R0 - R3, R12
+@--------------------------------------------------------------------------
+@ STACK USAGE  : none
+@--------------------------------------------------------------------------
+@ CYCLES       : 56 cycles
+@--------------------------------------------------------------------------
+@ NOTES        :
+@
+@ ARM REGISTER ALLOCATION
+@ ==========================================
+@ r0  : pointer to input data
+@ r1  : pointer to output area
+@ r2  : stride in the output buffer
+@ r3  : pointer to the coefficient set
+@ r12 : pointer to the coefficient set
+@ -------------------------------------------
+@
+@ VENUM REGISTER ALLOCATION
+@ =================================================
+@ q0     : coefficients[0]
+@ q1     : coefficients[1]
+@ q2     : coefficients[2]
+@ q3     : coefficients[3]
+@ q4     : not used
+@ q5     : not used
+@ q6     : not used
+@ q7     : not used
+@ q8     : input y0 - y7
+@ q9     : input y8 - y15
+@ q10    : intermediate value
+@ q11    : intermediate value
+@ q12    : intermediate value
+@ q13    : intermediate value
+@ q14    : intermediate value
+@ q15    : intermediate value
+@==========================================================================
+.type idct_4x4_venum, %function
+idct_4x4_venum:
+
+        @ Load the locations of the first 2 sets of coefficients
+        ldr  r3,   =coefficient+0*16   @ coefficient[0]
+        ldr  r12,  =coefficient+1*16   @ coefficient[1]
+
+        @ Load the first 2 sets of coefficients
+        vld1.16  {d0, d1},[r3]         @ q0 = C4 | C2 | C4 | C6 | C4 | C2 | C4 | C6
+        vld1.16  {d2, d3},[r12]        @ q1 = C4 | C6 | C4 | C2 | C4 | C6 | C4 | C2
+
+        @ Load the locations of the second 2 sets of coefficients
+        ldr  r3,   =coefficient+2*16   @ coefficient[2]
+        ldr  r12,  =coefficient+3*16   @ coefficient[3]
+
+        @ Load the second 2 sets of coefficients
+        vld1.16  {d4, d5},[r3]         @ q2 = C4 | C4 | C4 | C4 | C2 | C2 | C2 | C2
+        vld1.16  {d6, d7},[r12]        @ q3 = C4 | C4 | C4 | C4 | C6 | C6 | C6 | C6
+
+        @ Load the input values
+        vld1.16  {d16}, [r0], r2       @ d16:   y0  | y1  | y2  | y3  (LSB | MSB)
+        vld1.16  {d17}, [r0], r2       @ d17:   y4  | y5  | y6  | y7  (LSB | MSB)
+        vld1.16  {d18}, [r0], r2       @ d18:   y8  | y9  | y10 | y11 (LSB | MSB)
+        vld1.16  {d19}, [r0], r2       @ d19:   y12 | y13 | y14 | y15 (LSB | MSB)
+
+        @ Apply iDCT Horizonally
+
+        @ q8: y0 |y1 |y2 |y3 |y4 |y5 |y6 |y7
+        @ q9: y8 |y9 |y10|y11|y12|y13|y14|y15
+
+        @======================================================================
+        @ vqrdmulh doubles the result and save the high 16 bits of the result,
+        @ this is equivalent to right shift by 15 bits.
+        @ since coefficients are in Q15 format, it contradicts with the right
+        @ shift 15 here, so the final result is in Q0 format
+        @
+        @ vqrdmulh will also round the result
+        @======================================================================
+
+        vqrdmulh.s16  q10, q8, q0      @ q10: C4*y0  | C2*y1  | C4*y2  | C6*y3  | C4*y4  | C2*y5  | C4*y6  | C6*y7
+        vqrdmulh.s16  q11, q8, q1      @ q11: C4*y0  | C6*y1  | C4*y2  | C2*y3  | C4*y4  | C6*y5  | C4*y6  | C2*y7
+
+        vqrdmulh.s16  q12, q9, q0      @ q12: C4*y8  | C2*y9  | C4*y10 | C6*y11 | C4*y12 | C2*y13 | C4*y14 | C6*y15
+        vqrdmulh.s16  q13, q9, q1      @ q13: C4*y8  | C6*y9  | C4*y10 | C2*y11 | C4*y12 | C6*y13 | C4*y14 | C2*y15
+
+        vtrn.32       q10, q12         @ q10: C4*y0  | C2*y1  | C4*y8  | C2*y9  | C4*y4  | C2*y5  | C4*y12 | C2*y13
+                                       @ q12: C4*y2  | C6*y3  | C4*y10 | C6*y11 | C4*y6  | C6*y7  | C4*y14 | C6*y15
+
+        vtrn.32       q11, q13         @ q11: C4*y0  | C6*y1  | C4*y8  | C6*y9  | C4*y4  | C6*y5  | C4*y12 | C6*y13
+                                       @ q13: C4*y2  | C2*y3  | C4*y10 | C2*y11 | C4*y6  | C2*y7  | C4*y14 | C2*y15
+
+        vqadd.s16     q14, q10, q12    @ q14: C4*y0 + C4*y2 | C2*y1 + C6*y3 | C4*y8 + C4*y10 | C2*y9 + C6*y11 | C4*y4 + C4*y6 | C2*y5 + C6*y7 | C4*y12 + C4*y14 | C2*y13 + C6*y15
+                                       @       S0 | S2 | S8 | S10 | S4 | S6 | S12 | S14
+
+        vqsub.s16     q15, q11, q13    @ q15: C4*y0 - C4*y2 | C6*y1 - C2*y3 | C4*y8 - C4*y10 | C6*y9 - C2*y11 | C4*y4 - C4*y6 | C6*y5 - C2*y7 | C4*y12 - C4*y14 | C6*y13 - C2*y15
+                                       @       S1 | S3 | S9 | S11 | S5 | S7 | S13 | S15
+
+        vtrn.16       q14, q15         @ q14: S0 | S1 | S8  | S9  | S4 | S5 | S12 | S13
+                                       @ q15: S2 | S3 | S10 | S11 | S6 | S7 | S14 | S15
+
+        vqadd.s16     q8, q14, q15     @ q8:  Z0 | Z1 | Z8  | Z9  | Z4 | Z5 | Z12 | Z13
+        vqsub.s16     q9, q14, q15     @ q9:  Z3 | Z2 | Z11 | Z10 | Z7 | Z6 | Z15 | Z14
+        vrev32.16     q9, q9           @ q9:  Z2 | Z3 | Z10 | Z11 | Z6 | Z7 | Z14 | Z15
+
+
+        @ Apply iDCT Vertically
+
+        vtrn.32       q8, q9           @ q8:  Z0 | Z1 | Z2  | Z3  | Z4  | Z5  | Z6  | Z7
+                                       @ q9:  Z8 | Z9 | Z10 | Z11 | Z12 | Z13 | Z14 | Z15
+
+
+        vqrdmulh.s16  q10, q8, q2      @ q10: C4*Z0 | C4*Z1 | C4*Z2 | C4*Z3 | C2*Z4 | C2*Z5 | C2*Z6 | C2*Z7
+        vqrdmulh.s16  q11, q8, q3      @ q11: C4*Z0 | C4*Z1 | C4*Z2 | C4*Z3 | C6*Z4 | C6*Z5 | C6*Z6 | C6*Z7
+
+        vqrdmulh.s16  q12, q9, q2      @ q12: C4*Z8 | C4*Z9 | C4*Z10 | C4*Z11 | C2*Z12 | C2*Z13 | C2*Z14 | C2*Z15
+        vqrdmulh.s16  q13, q9, q3      @ q13: C4*Z8 | C4*Z9 | C4*Z10 | C4*Z11 | C6*Z12 | C6*Z13 | C6*Z14 | C6*Z15
+
+        vqadd.s16     q14, q10, q13    @ q14: C4*Z0+C4*Z8 | C4*Z1+C4*Z9 | C4*Z2+C4*Z10 | C4*Z3+C4*Z11 | C2*Z4+C6*Z12 | C2*Z5+C6*Z13 | C2*Z6+C6*Z14 | C2*Z7+C6*Z15
+                                       @      s0 | s4 | s8 | s12 | s2 | s6 | s10 | s14
+
+        vqsub.s16     q15, q11, q12    @ q15: C4*Z0-C4*Z8 | C4*Z1-C4*Z9 | C4*Z2-C4*Z10 | C4*Z3-C4*Z11 | C6*Z4-C2*Z12 | C6*Z5-C2*Z13 | C6*Z6-C2*Z14 | C6*Z7-C2*Z15
+                                       @      s1 | s5 | s9 | s13 | s3 | s7 | s11 | s15
+
+        vswp          d29, d30         @ q14: s0 | s4 | s8  | s12 | s1 | s5 | s9  | s13
+                                       @ q15: s2 | s6 | s10 | s14 | s3 | s7 | s11 | s15
+
+        vqadd.s16     q8, q14, q15     @ q8:  x0 | x4 | x8  | x12 | x1 | x5 | x9 | x13
+        vqsub.s16     q9, q14, q15     @ q9:  x3 | x7 | x11 | x15 | x2 | x6 | x10 | x14
+
+        vmov.i16      q10, #0           @ q10=[0000000000000000]
+        vmov.i16      q11, #255        @ q11=[FFFFFFFFFFFFFFFF] (hex)
+
+        vmov.i16      q0, #128         @ q0 = 128|128|128|128|128|128|128|128
+
+        vqadd.s16     q8, q8, q0       @ Add the offset
+        vqadd.s16     q9, q9, q0       @ Add the offset
+
+        vmax.s16      q8, q8, q10      @ clamp > 0
+        vmin.s16      q8, q8, q11      @ clamp < 255
+
+        vmax.s16      q9, q9, q10      @ clamp > 0
+        vmin.s16      q9, q9, q11      @ clamp < 255
+
+        vst1.16       {d16}, [r1], r2  @  d16:   x0 | x1  | x2  | x3  (LSB | MSB)
+        vst1.16       {d17}, [r1], r2  @  d17:   x4 | x5  | x6  | x7  (LSB | MSB)
+        vst1.16       {d19}, [r1], r2  @  d18:   x8 | x9  | x10 | x11 (LSB | MSB)
+        vst1.16       {d18}, [r1], r2  @  d19:   x12| x13 | x14 | x15 (LSB | MSB)
+
+        bx         lr                  @ Return to caller
+
+                                       @ end of idct_4x4_venum
+
+@==========================================================================
+@ FUNCTION     : idct_8x8_venum
+@--------------------------------------------------------------------------
+@ DISCRIPTION  : VeNum optimization of one 8x8 block iDCT
+@--------------------------------------------------------------------------
+@ C PROTOTYPE  : void idct_8x8_venum(int16 * input,
+@                                    int16 * output,
+@                                    int32 stride)
+@--------------------------------------------------------------------------
+@ REG INPUT    : R0 pointer to input (int16)
+@                R1 pointer to output (int16)
+@                R2 block stride
+@--------------------------------------------------------------------------
+@ STACK ARG    : None
+@--------------------------------------------------------------------------
+@ MEM INPUT    : None
+@--------------------------------------------------------------------------
+@ REG OUTPUT   : None
+@--------------------------------------------------------------------------
+@ MEM OUTPUT   : None
+@--------------------------------------------------------------------------
+@ REG AFFECTED : R0 - R9
+@--------------------------------------------------------------------------
+@ STACK USAGE  : none
+@--------------------------------------------------------------------------
+@ CYCLES       : 177 cycles
+@--------------------------------------------------------------------------
+@ NOTES        :
+@
+@ It was tested to be IEEE 1180 compliant. Since IEEE 1180 compliance is more stringent
+@ than MPEG-4 compliance, this version is also MPEG-4 compliant.
+@
+@ CODE STRUCTURE:
+@ (i)   Macros for transposing an 8x8 matrix and for configuring the VFP unit are defined.
+@ (ii)  Macro for IDCT in one dimension is defined as four stages
+@ (iii) The two dimensional code begins
+@ (iv)  constants are defined in the area DataArea
+@
+@ PROGRAM FLOW:
+@
+@ The VFP is configured
+@ The parameters to IDCT are loaded
+@ the coefficients are loaded
+@ loop:
+@    decrement loop counter
+@    The first input Matrix is loaded and pre-scaled
+@    The input is prescaled using the constants
+@    IDCT is performed in one dimension on the 8 columns
+@    The matrix is transposed
+@    A bias is loaded an added to the matrix
+@    IDCT is performed in one dimension on the 8 rows
+@    The matrix is post-scaled
+@    The matrix is saved
+@    test loop counter and loop if greater than zero
+@ stop
+@
+@
+@ ARM REGISTER ALLOCATION
+@ ==========================================
+@ r0 : pointer to input data
+@ r1 : pointer to output are
+@ r2 : stride in the output buffer
+@ r3 :
+@ r4 :
+@ r5 : pointer to constants[0] [5]
+@ r6 : pointer to constants[1]
+@ r7 : pointer to constants[2]
+@ r8 : pointer to constants[3]
+@ r9 : pointer to constants[4]
+@ -------------------------------------------
+@
+@ VENUM REGISTER ALLOCATION
+@ =================================================
+@ q0     : constants[0]
+@ q1     : constants[1]
+@ q2     : constants[2], IDCT1D in-place scratch
+@ q3     : constants[3], IDCT1D in-place scratch
+@ q4     : constants[4], IDCT1D in-place scratch, and bias compensation
+@ q5     :               IDCT1D in-place scratch
+@ q6     :               IDCT1D in-place scratch
+@ q7     :               IDCT1D in-place scratch
+@ q8     : Matrix[0]     IDCT1D in-place scratch
+@ q9     : Matrix[1]     IDCT1D in-place scratch
+@ q10    : Matrix[2]     IDCT1D in-place scratch
+@ q11    : Matrix[3]     IDCT1D in-place scratch
+@ q12    : Matrix[4]     IDCT1D in-place scratch
+@ q13    : Matrix[5]     IDCT1D in-place scratch
+@ q14    : Matrix[6]     IDCT1D in-place scratch
+@ q15    : Matrix[7]     IDCT1D in-place scratch
+@==========================================================================
+.type idct_8x8_venum, %function
+idct_8x8_venum:
+
+        push {r5-r9}
+        vpush {d8-d15}
+        IDCT_ENTRY
+        BIG_BODY_TRANSPOSE_INPUT
+        vpop {d8-d15}
+        pop  {r5-r9}
+        bx   lr
+                                       @ end of idct_8x8_venum
+
+@==========================================================================
+@ Constants Definition AREA: define idct kernel, bias
+@==========================================================================
+    .section ro_data_area              @ AREA  RODataArea
+    .data                              @ DATA, READONLY
+    .align 5                           @ ALIGN=5
+
+constants:
+        .hword  23170, 13573, 6518,  21895, -23170, -21895, 8223,  8224
+        .hword  16384, 22725, 21407, 19266, 16384,  19266,  21407, 22725
+        .hword  22725, 31521, 29692, 26722, 22725,  26722,  29692, 31521
+        .hword  21407, 29692, 27969, 25172, 21407,  25172,  27969, 29692
+        .hword  19266, 26722, 25172, 22654, 19266,  22654,  25172, 26722
+        .hword      0,   255,     0,     0
+
+coefficient:                           @ These are the coefficent used by 4x4 iDCT in Q15 format
+        .hword 11585, 15137,  11585,  6270, 11585, 15137,  11585,  6270  @ C4, C2, C4, C6, C4, C2, C4, C6 /2
+        .hword 11585,  6270,  11585, 15137, 11585,  6270,  11585, 15137  @ C4, C6, C4, C2, C4, C6, C4, C2 /2
+        .hword 11585, 11585,  11585, 11585, 15137, 15137,  15137, 15137  @ C4, C4, C4, C4, C2, C2, C2, C2 /2
+        .hword 11585, 11585,  11585, 11585,  6270,  6270,   6270,  6270  @ C4, C4, C4, C4, C6, C6, C6, C6 /2
+
+.end