diff -Nru android-platform-art-8.1.0+r23/debian/changelog android-platform-art-8.1.0+r23/debian/changelog
--- android-platform-art-8.1.0+r23/debian/changelog	2018-12-13 09:11:09.000000000 +0000
+++ android-platform-art-8.1.0+r23/debian/changelog	2019-03-19 01:46:21.000000000 +0000
@@ -1,3 +1,22 @@
+android-platform-art (8.1.0+r23-3+rpi2) buster-staging; urgency=medium
+
+  * Revert changes from 8.1.0+r23-3+rpi1
+  * Replace ubfx and sbfx with shift instructions
+  * Replace movw with mov and orr
+  * Failed attempt to avoid tagging asm files as armv7
+    (I'm just going to let this one through despite the apparent
+    armv7 content, i suspect that is a lesser evil than not having
+    the package at all).
+
+ -- Peter Michael Green <plugwash@raspbian.org>  Tue, 19 Mar 2019 01:46:21 +0000
+
+android-platform-art (8.1.0+r23-3+rpi1) buster-staging; urgency=medium
+
+  * Don't build andriod libs (but do build dmtracedump)  on armhf for raspbian,
+    they apparently contain assembler that is not armv6 compatible.
+
+ -- Peter Michael Green <plugwash@raspbian.org>  Tue, 19 Mar 2019 01:31:47 +0000
+
 android-platform-art (8.1.0+r23-3) unstable; urgency=medium
 
   * Put all LDFLAGS that link libraries at the end of the command line. This
diff -Nru android-platform-art-8.1.0+r23/debian/patches/dont-tag-asm-as-armv7.patch android-platform-art-8.1.0+r23/debian/patches/dont-tag-asm-as-armv7.patch
--- android-platform-art-8.1.0+r23/debian/patches/dont-tag-asm-as-armv7.patch	1970-01-01 00:00:00.000000000 +0000
+++ android-platform-art-8.1.0+r23/debian/patches/dont-tag-asm-as-armv7.patch	2019-03-19 01:46:21.000000000 +0000
@@ -0,0 +1,14 @@
+Description: Don't tag asm files as armv7
+Author: Peter Michael Green <plugwash@raspbian.org>
+
+--- android-platform-art-8.1.0+r23.orig/runtime/arch/arm/asm_support_arm.S
++++ android-platform-art-8.1.0+r23/runtime/arch/arm/asm_support_arm.S
+@@ -34,7 +34,7 @@
+ #endif
+ 
+ .syntax unified
+-.arch armv7-a
++.arch armv6
+ .thumb
+ 
+ // Macro to generate the value of Runtime::Current into rDest. As it uses labels
diff -Nru android-platform-art-8.1.0+r23/debian/patches/hack-out-ubfx-and-sbfx.patch android-platform-art-8.1.0+r23/debian/patches/hack-out-ubfx-and-sbfx.patch
--- android-platform-art-8.1.0+r23/debian/patches/hack-out-ubfx-and-sbfx.patch	1970-01-01 00:00:00.000000000 +0000
+++ android-platform-art-8.1.0+r23/debian/patches/hack-out-ubfx-and-sbfx.patch	2019-03-19 01:46:21.000000000 +0000
@@ -0,0 +1,1344 @@
+Description: hack out ubfx and sbfx which are not supported on armv6
+ I wrote a script to replace ubfx and sbfx with shift operations, 
+ unfortunately these clobber the carry flag, so there is some risk,
+ hopefully it's ok.....
+Author: Peter Michael Green <plugwash@raspbian.org>
+
+---
+The information above should follow the Patch Tagging Guidelines, please
+checkout http://dep.debian.net/deps/dep3/ to learn about the format. Here
+are templates for supplementary fields that you might want to add:
+
+Origin: <vendor|upstream|other>, <url of original patch>
+Bug: <url in upstream bugtracker>
+Bug-Debian: https://bugs.debian.org/<bugnumber>
+Bug-Ubuntu: https://launchpad.net/bugs/<bugnumber>
+Forwarded: <no|not-needed|url proving that it has been forwarded>
+Reviewed-By: <name and email of someone who approved the patch>
+Last-Update: 2019-03-19
+
+Index: android-platform-art-8.1.0+r23/runtime/interpreter/mterp/out/mterp_arm.S
+===================================================================
+--- android-platform-art-8.1.0+r23.orig/runtime/interpreter/mterp/out/mterp_arm.S
++++ android-platform-art-8.1.0+r23/runtime/interpreter/mterp/out/mterp_arm.S
+@@ -416,7 +416,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     /* for move, move-object, long-to-int */
+     /* op vA, vB */
+     mov     r1, rINST, lsr #12          @ r1<- B from 15:12
+-    ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
++    @ begin replacement of ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
++    lsl r0, rINST, #20
++    lsr r0, r0, #28
++    @ end replacement of ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+     GET_VREG r2, r1                     @ r2<- fp[B]
+     GET_INST_OPCODE ip                  @ ip<- opcode from rINST
+@@ -470,7 +473,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     /* move-wide vA, vB */
+     /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
+     VREG_INDEX_TO_ADDR r2, rINST        @ r2<- &fp[A]
+     ldmia   r3, {r0-r1}                 @ r0/r1<- fp[B]
+@@ -522,7 +528,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     /* for move, move-object, long-to-int */
+     /* op vA, vB */
+     mov     r1, rINST, lsr #12          @ r1<- B from 15:12
+-    ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
++    @ begin replacement of ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
++    lsl r0, rINST, #20
++    lsr r0, r0, #28
++    @ end replacement of ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+     GET_VREG r2, r1                     @ r2<- fp[B]
+     GET_INST_OPCODE ip                  @ ip<- opcode from rINST
+@@ -723,8 +732,14 @@ artMterpAsmInstructionStart = .L_op_nop
+ .L_op_const_4: /* 0x12 */
+ /* File: arm/op_const_4.S */
+     /* const/4 vA, #+B */
+-    sbfx    r1, rINST, #12, #4          @ r1<- sssssssB (sign-extended)
+-    ubfx    r0, rINST, #8, #4           @ r0<- A
++    @ begin replacement of sbfx    r1, rINST, #12, #4          @ r1<- sssssssB (sign-extended)
++    lsl r1, rINST, #16
++    asr r1, r1, #28
++    @ end replacement of sbfx    r1, rINST, #12, #4          @ r1<- sssssssB (sign-extended)
++    @ begin replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
++    lsl r0, rINST, #20
++    lsr r0, r0, #28
++    @ end replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+     GET_INST_OPCODE ip                  @ ip<- opcode from rINST
+     SET_VREG r1, r0                     @ fp[A]<- r1
+@@ -975,7 +990,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     mov       r3, rSELF                 @ r3<- self
+     bl        MterpInstanceOf           @ (index, &obj, method, self)
+     ldr       r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+-    ubfx      r9, rINST, #8, #4         @ r9<- A
++    @ begin replacement of ubfx      r9, rINST, #8, #4         @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx      r9, rINST, #8, #4         @ r9<- A
+     PREFETCH_INST 2
+     cmp       r1, #0                    @ exception pending?
+     bne       MterpException
+@@ -992,7 +1010,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      * Return the length of an array.
+      */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    r2, rINST, #8, #4           @ r2<- A
++    @ begin replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
+     GET_VREG r0, r1                     @ r0<- vB (object ref)
+     cmp     r0, #0                      @ is object null?
+     beq     common_errNullObject        @ yup, fail
+@@ -1139,7 +1160,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      * double to get a byte offset.
+      */
+     /* goto +AA */
+-    sbfx    rINST, rINST, #8, #8           @ rINST<- ssssssAA (sign-extended)
++    @ begin replacement of sbfx    rINST, rINST, #8, #8           @ rINST<- ssssssAA (sign-extended)
++    lsl rINST, rINST, #16
++    asr rINST, rINST, #24
++    @ end replacement of sbfx    rINST, rINST, #8, #8           @ rINST<- ssssssAA (sign-extended)
+     b       MterpCommonTakenBranchNoFlags
+ 
+ /* ------------------------------ */
+@@ -1424,7 +1448,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* if-cmp vA, vB, +CCCC */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    r0, rINST, #8, #4           @ r0<- A
++    @ begin replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
++    lsl r0, rINST, #20
++    lsr r0, r0, #28
++    @ end replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
+     GET_VREG r3, r1                     @ r3<- vB
+     GET_VREG r0, r0                     @ r0<- vA
+     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
+@@ -1450,7 +1477,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* if-cmp vA, vB, +CCCC */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    r0, rINST, #8, #4           @ r0<- A
++    @ begin replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
++    lsl r0, rINST, #20
++    lsr r0, r0, #28
++    @ end replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
+     GET_VREG r3, r1                     @ r3<- vB
+     GET_VREG r0, r0                     @ r0<- vA
+     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
+@@ -1476,7 +1506,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* if-cmp vA, vB, +CCCC */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    r0, rINST, #8, #4           @ r0<- A
++    @ begin replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
++    lsl r0, rINST, #20
++    lsr r0, r0, #28
++    @ end replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
+     GET_VREG r3, r1                     @ r3<- vB
+     GET_VREG r0, r0                     @ r0<- vA
+     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
+@@ -1502,7 +1535,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* if-cmp vA, vB, +CCCC */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    r0, rINST, #8, #4           @ r0<- A
++    @ begin replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
++    lsl r0, rINST, #20
++    lsr r0, r0, #28
++    @ end replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
+     GET_VREG r3, r1                     @ r3<- vB
+     GET_VREG r0, r0                     @ r0<- vA
+     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
+@@ -1528,7 +1564,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* if-cmp vA, vB, +CCCC */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    r0, rINST, #8, #4           @ r0<- A
++    @ begin replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
++    lsl r0, rINST, #20
++    lsr r0, r0, #28
++    @ end replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
+     GET_VREG r3, r1                     @ r3<- vB
+     GET_VREG r0, r0                     @ r0<- vA
+     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
+@@ -1554,7 +1593,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* if-cmp vA, vB, +CCCC */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    r0, rINST, #8, #4           @ r0<- A
++    @ begin replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
++    lsl r0, rINST, #20
++    lsr r0, r0, #28
++    @ end replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
+     GET_VREG r3, r1                     @ r3<- vB
+     GET_VREG r0, r0                     @ r0<- vA
+     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
+@@ -2244,7 +2286,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     mov      r3, rSELF                     @ r3<- self
+     bl       artGet32InstanceFromCode
+     ldr      r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+-    ubfx     r2, rINST, #8, #4             @ r2<- A
++    @ begin replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
+     PREFETCH_INST 2
+     cmp      r3, #0
+     bne      MterpPossibleException        @ bail out
+@@ -2274,7 +2319,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     mov      r3, rSELF                     @ r3<- self
+     bl       artGet64InstanceFromCode
+     ldr      r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+-    ubfx     r2, rINST, #8, #4             @ r2<- A
++    @ begin replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
+     PREFETCH_INST 2
+     cmp      r3, #0
+     bne      MterpException                @ bail out
+@@ -2303,7 +2351,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     mov      r3, rSELF                     @ r3<- self
+     bl       artGetObjInstanceFromCode
+     ldr      r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+-    ubfx     r2, rINST, #8, #4             @ r2<- A
++    @ begin replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
+     PREFETCH_INST 2
+     cmp      r3, #0
+     bne      MterpPossibleException        @ bail out
+@@ -2335,7 +2386,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     mov      r3, rSELF                     @ r3<- self
+     bl       artGetBooleanInstanceFromCode
+     ldr      r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+-    ubfx     r2, rINST, #8, #4             @ r2<- A
++    @ begin replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
+     PREFETCH_INST 2
+     cmp      r3, #0
+     bne      MterpPossibleException        @ bail out
+@@ -2367,7 +2421,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     mov      r3, rSELF                     @ r3<- self
+     bl       artGetByteInstanceFromCode
+     ldr      r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+-    ubfx     r2, rINST, #8, #4             @ r2<- A
++    @ begin replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
+     PREFETCH_INST 2
+     cmp      r3, #0
+     bne      MterpPossibleException        @ bail out
+@@ -2399,7 +2456,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     mov      r3, rSELF                     @ r3<- self
+     bl       artGetCharInstanceFromCode
+     ldr      r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+-    ubfx     r2, rINST, #8, #4             @ r2<- A
++    @ begin replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
+     PREFETCH_INST 2
+     cmp      r3, #0
+     bne      MterpPossibleException        @ bail out
+@@ -2431,7 +2491,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     mov      r3, rSELF                     @ r3<- self
+     bl       artGetShortInstanceFromCode
+     ldr      r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+-    ubfx     r2, rINST, #8, #4             @ r2<- A
++    @ begin replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx     r2, rINST, #8, #4             @ r2<- A
+     PREFETCH_INST 2
+     cmp      r3, #0
+     bne      MterpPossibleException        @ bail out
+@@ -2460,7 +2523,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     FETCH    r0, 1                      @ r0<- field ref CCCC
+     mov      r1, rINST, lsr #12         @ r1<- B
+     GET_VREG r1, r1                     @ r1<- fp[B], the object pointer
+-    ubfx     r2, rINST, #8, #4          @ r2<- A
++    @ begin replacement of ubfx     r2, rINST, #8, #4          @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx     r2, rINST, #8, #4          @ r2<- A
+     GET_VREG r2, r2                     @ r2<- fp[A]
+     ldr      r3, [rFP, #OFF_FP_METHOD]  @ r3<- referrer
+     PREFETCH_INST 2
+@@ -2481,7 +2547,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     FETCH    r0, 1                      @ r0<- field ref CCCC
+     mov      r1, rINST, lsr #12         @ r1<- B
+     GET_VREG r1, r1                     @ r1<- fp[B], the object pointer
+-    ubfx     r2, rINST, #8, #4          @ r2<- A
++    @ begin replacement of ubfx     r2, rINST, #8, #4          @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx     r2, rINST, #8, #4          @ r2<- A
+     VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[A]
+     ldr      r3, [rFP, #OFF_FP_METHOD]  @ r3<- referrer
+     PREFETCH_INST 2
+@@ -2524,7 +2593,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     FETCH    r0, 1                      @ r0<- field ref CCCC
+     mov      r1, rINST, lsr #12         @ r1<- B
+     GET_VREG r1, r1                     @ r1<- fp[B], the object pointer
+-    ubfx     r2, rINST, #8, #4          @ r2<- A
++    @ begin replacement of ubfx     r2, rINST, #8, #4          @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx     r2, rINST, #8, #4          @ r2<- A
+     GET_VREG r2, r2                     @ r2<- fp[A]
+     ldr      r3, [rFP, #OFF_FP_METHOD]  @ r3<- referrer
+     PREFETCH_INST 2
+@@ -2552,7 +2624,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     FETCH    r0, 1                      @ r0<- field ref CCCC
+     mov      r1, rINST, lsr #12         @ r1<- B
+     GET_VREG r1, r1                     @ r1<- fp[B], the object pointer
+-    ubfx     r2, rINST, #8, #4          @ r2<- A
++    @ begin replacement of ubfx     r2, rINST, #8, #4          @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx     r2, rINST, #8, #4          @ r2<- A
+     GET_VREG r2, r2                     @ r2<- fp[A]
+     ldr      r3, [rFP, #OFF_FP_METHOD]  @ r3<- referrer
+     PREFETCH_INST 2
+@@ -2580,7 +2655,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     FETCH    r0, 1                      @ r0<- field ref CCCC
+     mov      r1, rINST, lsr #12         @ r1<- B
+     GET_VREG r1, r1                     @ r1<- fp[B], the object pointer
+-    ubfx     r2, rINST, #8, #4          @ r2<- A
++    @ begin replacement of ubfx     r2, rINST, #8, #4          @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx     r2, rINST, #8, #4          @ r2<- A
+     GET_VREG r2, r2                     @ r2<- fp[A]
+     ldr      r3, [rFP, #OFF_FP_METHOD]  @ r3<- referrer
+     PREFETCH_INST 2
+@@ -2608,7 +2686,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     FETCH    r0, 1                      @ r0<- field ref CCCC
+     mov      r1, rINST, lsr #12         @ r1<- B
+     GET_VREG r1, r1                     @ r1<- fp[B], the object pointer
+-    ubfx     r2, rINST, #8, #4          @ r2<- A
++    @ begin replacement of ubfx     r2, rINST, #8, #4          @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx     r2, rINST, #8, #4          @ r2<- A
+     GET_VREG r2, r2                     @ r2<- fp[A]
+     ldr      r3, [rFP, #OFF_FP_METHOD]  @ r3<- referrer
+     PREFETCH_INST 2
+@@ -3362,7 +3443,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* unop vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r3                     @ r0<- vB
+                                @ optional op; may set condition codes
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+@@ -3388,7 +3472,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* unop vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r3                     @ r0<- vB
+                                @ optional op; may set condition codes
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+@@ -3413,7 +3500,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* unop vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+     ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+@@ -3441,7 +3531,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* unop vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+     ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+@@ -3470,7 +3563,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* unop vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r3                     @ r0<- vB
+                                @ optional op; may set condition codes
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+@@ -3495,7 +3591,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* unop vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+     ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+@@ -3523,7 +3622,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* unop vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     GET_VREG r0, r3                     @ r0<- vB
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+                                @ optional op; may set condition codes
+@@ -3551,7 +3653,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     mov     r3, rINST, lsr #12          @ r3<- B
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     flds    s0, [r3]                    @ s0<- vB
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+     fsitos  s1, s0                              @ s1<- op
+     GET_INST_OPCODE ip                  @ extract opcode from rINST
+@@ -3575,7 +3680,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     mov     r3, rINST, lsr #12          @ r3<- B
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     flds    s0, [r3]                    @ s0<- vB
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+     fsitod  d0, s0                              @ d0<- op
+     CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
+@@ -3594,7 +3702,10 @@ artMterpAsmInstructionStart = .L_op_nop
+     /* for move, move-object, long-to-int */
+     /* op vA, vB */
+     mov     r1, rINST, lsr #12          @ r1<- B from 15:12
+-    ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
++    @ begin replacement of ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
++    lsl r0, rINST, #20
++    lsr r0, r0, #28
++    @ end replacement of ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+     GET_VREG r2, r1                     @ r2<- fp[B]
+     GET_INST_OPCODE ip                  @ ip<- opcode from rINST
+@@ -3623,7 +3734,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      */
+     /* unop vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
+     ldmia   r3, {r0-r1}                 @ r0/r1<- vB/vB+1
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+@@ -3647,7 +3761,10 @@ artMterpAsmInstructionStart = .L_op_nop
+      * For: long-to-double
+      */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
+     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
+     vldr    d0, [r3]                    @ d0<- vAA
+@@ -3681,7 +3798,10 @@ constvalop_long_to_double:
+     mov     r3, rINST, lsr #12          @ r3<- B
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     flds    s0, [r3]                    @ s0<- vB
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+     ftosizs s1, s0                              @ s1<- op
+     GET_INST_OPCODE ip                  @ extract opcode from rINST
+@@ -3704,7 +3824,10 @@ constvalop_long_to_double:
+      */
+     /* unop vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     GET_VREG r0, r3                     @ r0<- vB
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+                                @ optional op; may set condition codes
+@@ -3733,7 +3856,10 @@ constvalop_long_to_double:
+     mov     r3, rINST, lsr #12          @ r3<- B
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     flds    s0, [r3]                    @ s0<- vB
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+     vcvt.f64.f32  d0, s0                              @ d0<- op
+     CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
+@@ -3758,7 +3884,10 @@ constvalop_long_to_double:
+     mov     r3, rINST, lsr #12          @ r3<- B
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     fldd    d0, [r3]                    @ d0<- vB
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+     ftosizd  s0, d0                              @ s0<- op
+     GET_INST_OPCODE ip                  @ extract opcode from rINST
+@@ -3781,7 +3910,10 @@ constvalop_long_to_double:
+      */
+     /* unop vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+     ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+@@ -3811,7 +3943,10 @@ constvalop_long_to_double:
+     mov     r3, rINST, lsr #12          @ r3<- B
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     fldd    d0, [r3]                    @ d0<- vB
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+     vcvt.f32.f64  s0, d0                              @ s0<- op
+     GET_INST_OPCODE ip                  @ extract opcode from rINST
+@@ -3835,7 +3970,10 @@ constvalop_long_to_double:
+      */
+     /* unop vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r3                     @ r0<- vB
+                                @ optional op; may set condition codes
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+@@ -3861,7 +3999,10 @@ constvalop_long_to_double:
+      */
+     /* unop vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r3                     @ r0<- vB
+                                @ optional op; may set condition codes
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+@@ -3887,7 +4028,10 @@ constvalop_long_to_double:
+      */
+     /* unop vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r3                     @ r0<- vB
+                                @ optional op; may set condition codes
+     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
+@@ -5133,7 +5277,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r1, r3                     @ r1<- vB
+     GET_VREG r0, r9                     @ r0<- vA
+     .if 0
+@@ -5171,7 +5318,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r1, r3                     @ r1<- vB
+     GET_VREG r0, r9                     @ r0<- vA
+     .if 0
+@@ -5210,7 +5360,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r1, r3                     @ r1<- vB
+     GET_VREG r0, r9                     @ r0<- vA
+     .if 0
+@@ -5242,7 +5395,10 @@ constvalop_long_to_double:
+      *
+      */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r1, r3                     @ r1<- vB
+     GET_VREG r0, r9                     @ r0<- vA
+     cmp     r1, #0                      @ is second operand zero?
+@@ -5277,7 +5433,10 @@ constvalop_long_to_double:
+      *
+      */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r1, r3                     @ r1<- vB
+     GET_VREG r0, r9                     @ r0<- vA
+     cmp     r1, #0                      @ is second operand zero?
+@@ -5317,7 +5476,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r1, r3                     @ r1<- vB
+     GET_VREG r0, r9                     @ r0<- vA
+     .if 0
+@@ -5355,7 +5517,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r1, r3                     @ r1<- vB
+     GET_VREG r0, r9                     @ r0<- vA
+     .if 0
+@@ -5393,7 +5558,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r1, r3                     @ r1<- vB
+     GET_VREG r0, r9                     @ r0<- vA
+     .if 0
+@@ -5431,7 +5599,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r1, r3                     @ r1<- vB
+     GET_VREG r0, r9                     @ r0<- vA
+     .if 0
+@@ -5469,7 +5640,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r1, r3                     @ r1<- vB
+     GET_VREG r0, r9                     @ r0<- vA
+     .if 0
+@@ -5507,7 +5681,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r1, r3                     @ r1<- vB
+     GET_VREG r0, r9                     @ r0<- vA
+     .if 0
+@@ -5545,7 +5722,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+@@ -5585,7 +5765,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+@@ -5618,7 +5801,10 @@ constvalop_long_to_double:
+      */
+     /* mul-long/2addr vA, vB */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
+     VREG_INDEX_TO_ADDR rINST, r9        @ rINST<- &fp[A]
+     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+@@ -5654,7 +5840,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+@@ -5695,7 +5884,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+@@ -5735,7 +5927,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+@@ -5775,7 +5970,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+@@ -5815,7 +6013,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+@@ -5844,7 +6045,10 @@ constvalop_long_to_double:
+      */
+     /* shl-long/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r2, r3                     @ r2<- vB
+     CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
+     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
+@@ -5871,7 +6075,10 @@ constvalop_long_to_double:
+      */
+     /* shr-long/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r2, r3                     @ r2<- vB
+     CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
+     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
+@@ -5898,7 +6105,10 @@ constvalop_long_to_double:
+      */
+     /* ushr-long/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r2, r3                     @ r2<- vB
+     CLEAR_SHADOW_PAIR r9, lr, ip        @ Zero out the shadow regs
+     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[A]
+@@ -5929,7 +6139,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
+     flds    s1, [r3]                    @ s1<- vB
+@@ -5955,7 +6168,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
+     flds    s1, [r3]                    @ s1<- vB
+@@ -5981,7 +6197,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
+     flds    s1, [r3]                    @ s1<- vB
+@@ -6007,7 +6226,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
+     flds    s1, [r3]                    @ s1<- vB
+@@ -6041,7 +6263,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r1, r3                     @ r1<- vB
+     GET_VREG r0, r9                     @ r0<- vA
+     .if 0
+@@ -6073,7 +6298,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
+     fldd    d1, [r3]                    @ d1<- vB
+@@ -6101,7 +6329,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
+     fldd    d1, [r3]                    @ d1<- vB
+@@ -6129,7 +6360,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
+     fldd    d1, [r3]                    @ d1<- vB
+@@ -6157,7 +6391,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r3, rINST, lsr #12          @ r3<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
+     CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
+     fldd    d1, [r3]                    @ d1<- vB
+@@ -6192,7 +6429,10 @@ constvalop_long_to_double:
+      */
+     /* binop/2addr vA, vB */
+     mov     r1, rINST, lsr #12          @ r1<- B
+-    ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    @ begin replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
++    lsl rINST, rINST, #20
++    lsr rINST, rINST, #28
++    @ end replacement of ubfx    rINST, rINST, #8, #4        @ rINST<- A
+     VREG_INDEX_TO_ADDR r1, r1           @ r1<- &fp[B]
+     VREG_INDEX_TO_ADDR r9, rINST        @ r9<- &fp[A]
+     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+@@ -6231,7 +6471,10 @@ constvalop_long_to_double:
+     /* binop/lit16 vA, vB, #+CCCC */
+     FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
+     mov     r2, rINST, lsr #12          @ r2<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r2                     @ r0<- vB
+     .if 0
+     cmp     r1, #0                      @ is second operand zero?
+@@ -6267,7 +6510,10 @@ constvalop_long_to_double:
+     /* binop/lit16 vA, vB, #+CCCC */
+     FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
+     mov     r2, rINST, lsr #12          @ r2<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r2                     @ r0<- vB
+     .if 0
+     cmp     r1, #0                      @ is second operand zero?
+@@ -6303,7 +6549,10 @@ constvalop_long_to_double:
+     /* binop/lit16 vA, vB, #+CCCC */
+     FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
+     mov     r2, rINST, lsr #12          @ r2<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r2                     @ r0<- vB
+     .if 0
+     cmp     r1, #0                      @ is second operand zero?
+@@ -6334,7 +6583,10 @@ constvalop_long_to_double:
+      */
+     FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
+     mov     r2, rINST, lsr #12          @ r2<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r2                     @ r0<- vB
+     cmp     r1, #0                      @ is second operand zero?
+     beq     common_errDivideByZero
+@@ -6368,7 +6620,10 @@ constvalop_long_to_double:
+      */
+     FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
+     mov     r2, rINST, lsr #12          @ r2<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r2                     @ r0<- vB
+     cmp     r1, #0                      @ is second operand zero?
+     beq     common_errDivideByZero
+@@ -6405,7 +6660,10 @@ constvalop_long_to_double:
+     /* binop/lit16 vA, vB, #+CCCC */
+     FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
+     mov     r2, rINST, lsr #12          @ r2<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r2                     @ r0<- vB
+     .if 0
+     cmp     r1, #0                      @ is second operand zero?
+@@ -6440,7 +6698,10 @@ constvalop_long_to_double:
+     /* binop/lit16 vA, vB, #+CCCC */
+     FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
+     mov     r2, rINST, lsr #12          @ r2<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r2                     @ r0<- vB
+     .if 0
+     cmp     r1, #0                      @ is second operand zero?
+@@ -6475,7 +6736,10 @@ constvalop_long_to_double:
+     /* binop/lit16 vA, vB, #+CCCC */
+     FETCH_S r1, 1                       @ r1<- ssssCCCC (sign-extended)
+     mov     r2, rINST, lsr #12          @ r2<- B
+-    ubfx    r9, rINST, #8, #4           @ r9<- A
++    @ begin replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
++    lsl r9, rINST, #20
++    lsr r9, r9, #28
++    @ end replacement of ubfx    r9, rINST, #8, #4           @ r9<- A
+     GET_VREG r0, r2                     @ r0<- vB
+     .if 0
+     cmp     r1, #0                      @ is second operand zero?
+@@ -6833,7 +7097,10 @@ constvalop_long_to_double:
+     mov     r9, rINST, lsr #8           @ r9<- AA
+     and     r2, r3, #255                @ r2<- BB
+     GET_VREG r0, r2                     @ r0<- vBB
+-    ubfx    r1, r3, #8, #5                            @ optional; typically r1<- ssssssCC (sign extended)
++    @ begin replacement of ubfx    r1, r3, #8, #5                            @ optional; typically r1<- ssssssCC (sign extended)
++    lsl r1, r3, #19
++    lsr r1, r1, #27
++    @ end replacement of ubfx    r1, r3, #8, #5                            @ optional; typically r1<- ssssssCC (sign extended)
+     .if 0
+     @cmp     r1, #0                     @ is second operand zero?
+     beq     common_errDivideByZero
+@@ -6874,7 +7141,10 @@ constvalop_long_to_double:
+     mov     r9, rINST, lsr #8           @ r9<- AA
+     and     r2, r3, #255                @ r2<- BB
+     GET_VREG r0, r2                     @ r0<- vBB
+-    ubfx    r1, r3, #8, #5                            @ optional; typically r1<- ssssssCC (sign extended)
++    @ begin replacement of ubfx    r1, r3, #8, #5                            @ optional; typically r1<- ssssssCC (sign extended)
++    lsl r1, r3, #19
++    lsr r1, r1, #27
++    @ end replacement of ubfx    r1, r3, #8, #5                            @ optional; typically r1<- ssssssCC (sign extended)
+     .if 0
+     @cmp     r1, #0                     @ is second operand zero?
+     beq     common_errDivideByZero
+@@ -6915,7 +7185,10 @@ constvalop_long_to_double:
+     mov     r9, rINST, lsr #8           @ r9<- AA
+     and     r2, r3, #255                @ r2<- BB
+     GET_VREG r0, r2                     @ r0<- vBB
+-    ubfx    r1, r3, #8, #5                            @ optional; typically r1<- ssssssCC (sign extended)
++    @ begin replacement of ubfx    r1, r3, #8, #5                            @ optional; typically r1<- ssssssCC (sign extended)
++    lsl r1, r3, #19
++    lsr r1, r1, #27
++    @ end replacement of ubfx    r1, r3, #8, #5                            @ optional; typically r1<- ssssssCC (sign extended)
+     .if 0
+     @cmp     r1, #0                     @ is second operand zero?
+     beq     common_errDivideByZero
+@@ -6938,7 +7211,10 @@ constvalop_long_to_double:
+     mov     r2, rINST, lsr #12          @ r2<- B
+     FETCH r1, 1                         @ r1<- field byte offset
+     GET_VREG r3, r2                     @ r3<- object we're operating on
+-    ubfx    r2, rINST, #8, #4           @ r2<- A
++    @ begin replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
+     cmp     r3, #0                      @ check object for null
+     beq     common_errNullObject        @ object was null
+     ldr   r0, [r3, r1]                @ r0<- obj.field
+@@ -6955,7 +7231,10 @@ constvalop_long_to_double:
+     mov     r2, rINST, lsr #12          @ r2<- B
+     FETCH ip, 1                         @ ip<- field byte offset
+     GET_VREG r3, r2                     @ r3<- object we're operating on
+-    ubfx    r2, rINST, #8, #4           @ r2<- A
++    @ begin replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
+     cmp     r3, #0                      @ check object for null
+     beq     common_errNullObject        @ object was null
+     ldrd    r0, [r3, ip]                @ r0<- obj.field (64 bits, aligned)
+@@ -6978,7 +7257,10 @@ constvalop_long_to_double:
+     GET_VREG r0, r2                     @ r0<- object we're operating on
+     bl      artIGetObjectFromMterp      @ (obj, offset)
+     ldr     r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+-    ubfx    r2, rINST, #8, #4           @ r2<- A
++    @ begin replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
+     PREFETCH_INST 2
+     cmp     r3, #0
+     bne     MterpPossibleException      @ bail out
+@@ -6996,7 +7278,10 @@ constvalop_long_to_double:
+     mov     r2, rINST, lsr #12          @ r2<- B
+     FETCH r1, 1                         @ r1<- field byte offset
+     GET_VREG r3, r2                     @ r3<- fp[B], the object pointer
+-    ubfx    r2, rINST, #8, #4           @ r2<- A
++    @ begin replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
+     cmp     r3, #0                      @ check object for null
+     beq     common_errNullObject        @ object was null
+     GET_VREG r0, r2                     @ r0<- fp[A]
+@@ -7013,7 +7298,10 @@ constvalop_long_to_double:
+     mov     r2, rINST, lsr #12          @ r2<- B
+     FETCH r3, 1                         @ r3<- field byte offset
+     GET_VREG r2, r2                     @ r2<- fp[B], the object pointer
+-    ubfx    r0, rINST, #8, #4           @ r0<- A
++    @ begin replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
++    lsl r0, rINST, #20
++    lsr r0, r0, #28
++    @ end replacement of ubfx    r0, rINST, #8, #4           @ r0<- A
+     cmp     r2, #0                      @ check object for null
+     beq     common_errNullObject        @ object was null
+     VREG_INDEX_TO_ADDR r0, r0           @ r0<- &fp[A]
+@@ -7104,7 +7392,10 @@ constvalop_long_to_double:
+     mov     r2, rINST, lsr #12          @ r2<- B
+     FETCH r1, 1                         @ r1<- field byte offset
+     GET_VREG r3, r2                     @ r3<- fp[B], the object pointer
+-    ubfx    r2, rINST, #8, #4           @ r2<- A
++    @ begin replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
+     cmp     r3, #0                      @ check object for null
+     beq     common_errNullObject        @ object was null
+     GET_VREG r0, r2                     @ r0<- fp[A]
+@@ -7124,7 +7415,10 @@ constvalop_long_to_double:
+     mov     r2, rINST, lsr #12          @ r2<- B
+     FETCH r1, 1                         @ r1<- field byte offset
+     GET_VREG r3, r2                     @ r3<- fp[B], the object pointer
+-    ubfx    r2, rINST, #8, #4           @ r2<- A
++    @ begin replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
+     cmp     r3, #0                      @ check object for null
+     beq     common_errNullObject        @ object was null
+     GET_VREG r0, r2                     @ r0<- fp[A]
+@@ -7144,7 +7438,10 @@ constvalop_long_to_double:
+     mov     r2, rINST, lsr #12          @ r2<- B
+     FETCH r1, 1                         @ r1<- field byte offset
+     GET_VREG r3, r2                     @ r3<- fp[B], the object pointer
+-    ubfx    r2, rINST, #8, #4           @ r2<- A
++    @ begin replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
+     cmp     r3, #0                      @ check object for null
+     beq     common_errNullObject        @ object was null
+     GET_VREG r0, r2                     @ r0<- fp[A]
+@@ -7164,7 +7461,10 @@ constvalop_long_to_double:
+     mov     r2, rINST, lsr #12          @ r2<- B
+     FETCH r1, 1                         @ r1<- field byte offset
+     GET_VREG r3, r2                     @ r3<- fp[B], the object pointer
+-    ubfx    r2, rINST, #8, #4           @ r2<- A
++    @ begin replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
+     cmp     r3, #0                      @ check object for null
+     beq     common_errNullObject        @ object was null
+     GET_VREG r0, r2                     @ r0<- fp[A]
+@@ -7184,7 +7484,10 @@ constvalop_long_to_double:
+     mov     r2, rINST, lsr #12          @ r2<- B
+     FETCH r1, 1                         @ r1<- field byte offset
+     GET_VREG r3, r2                     @ r3<- object we're operating on
+-    ubfx    r2, rINST, #8, #4           @ r2<- A
++    @ begin replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
+     cmp     r3, #0                      @ check object for null
+     beq     common_errNullObject        @ object was null
+     ldrb   r0, [r3, r1]                @ r0<- obj.field
+@@ -7204,7 +7507,10 @@ constvalop_long_to_double:
+     mov     r2, rINST, lsr #12          @ r2<- B
+     FETCH r1, 1                         @ r1<- field byte offset
+     GET_VREG r3, r2                     @ r3<- object we're operating on
+-    ubfx    r2, rINST, #8, #4           @ r2<- A
++    @ begin replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
+     cmp     r3, #0                      @ check object for null
+     beq     common_errNullObject        @ object was null
+     ldrsb   r0, [r3, r1]                @ r0<- obj.field
+@@ -7224,7 +7530,10 @@ constvalop_long_to_double:
+     mov     r2, rINST, lsr #12          @ r2<- B
+     FETCH r1, 1                         @ r1<- field byte offset
+     GET_VREG r3, r2                     @ r3<- object we're operating on
+-    ubfx    r2, rINST, #8, #4           @ r2<- A
++    @ begin replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
+     cmp     r3, #0                      @ check object for null
+     beq     common_errNullObject        @ object was null
+     ldrh   r0, [r3, r1]                @ r0<- obj.field
+@@ -7244,7 +7553,10 @@ constvalop_long_to_double:
+     mov     r2, rINST, lsr #12          @ r2<- B
+     FETCH r1, 1                         @ r1<- field byte offset
+     GET_VREG r3, r2                     @ r3<- object we're operating on
+-    ubfx    r2, rINST, #8, #4           @ r2<- A
++    @ begin replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
++    lsl r2, rINST, #20
++    lsr r2, r2, #28
++    @ end replacement of ubfx    r2, rINST, #8, #4           @ r2<- A
+     cmp     r3, #0                      @ check object for null
+     beq     common_errNullObject        @ object was null
+     ldrsh   r0, [r3, r1]                @ r0<- obj.field
+@@ -7406,7 +7718,10 @@ artMterpAsmSisterStart:
+  * to modest integer.  The EABI convert function isn't doing this for us.
+  */
+ f2l_doconv:
+-    ubfx    r2, r0, #23, #8             @ grab the exponent
++    @ begin replacement of ubfx    r2, r0, #23, #8             @ grab the exponent
++    lsl r2, r0, #1
++    lsr r2, r2, #24
++    @ end replacement of ubfx    r2, r0, #23, #8             @ grab the exponent
+     cmp     r2, #0xbe                   @ MININT < x > MAXINT?
+     bhs     f2l_special_cases
+     b       __aeabi_f2lz                @ tail call to convert float to long
+@@ -7436,7 +7751,10 @@ f2l_maybeNaN:
+  * to modest integer.  The EABI convert function isn't doing this for us.
+  */
+ d2l_doconv:
+-    ubfx    r2, r1, #20, #11            @ grab the exponent
++    @ begin replacement of ubfx    r2, r1, #20, #11            @ grab the exponent
++    lsl r2, r1, #1
++    lsr r2, r2, #21
++    @ end replacement of ubfx    r2, r1, #20, #11            @ grab the exponent
+     movw    r3, #0x43e
+     cmp     r2, r3                      @ MINLONG < x > MAXLONG?
+     bhs     d2l_special_cases
+Index: android-platform-art-8.1.0+r23/runtime/interpreter/mterp/replace-ubfx.py
+===================================================================
+--- /dev/null
++++ android-platform-art-8.1.0+r23/runtime/interpreter/mterp/replace-ubfx.py
+@@ -0,0 +1,51 @@
++#!/usr/bin/python3
++#script to replace ubfx with equivilent code for older arm
++#note: these replacements will unfortunately clobber the carry
++#flag, I hope that doesn't break anything.
++#Copyright 2019 Peter Michael Green
++#
++#Permission is hereby granted, free of charge, to any person obtaining a copy of
++#this software and associated documentation files (the "Software"), to deal in 
++#the Software without restriction, including without limitation the rights to
++#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 
++#of the Software, and to permit persons to whom the Software is furnished to do 
++#so, subject to the following conditions:
++#
++#The above copyright notice and this permission notice shall be included in all 
++#copies or substantial portions of the Software.
++#
++#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 
++#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
++#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
++#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++#SOFTWARE.
++import sys
++
++f = open(sys.argv[1],"r")
++for line in f:
++    line = line.rstrip()
++    linels = line.lstrip()
++    linesplit = linels.split()
++    if (len(linesplit) > 0) and ((linesplit[0] == 'ubfx') or (linesplit[0] == 'sbfx')):
++        linestartwhitespace = line[:(len(line)-len(linels))]
++        destreg = linesplit[1][:-1]
++        sourcereg = linesplit[2][:-1]
++        lsb = int(linesplit[3][1:-1])
++        width = int(linesplit[4][1:])
++        #print(linesplit)
++        #print((destreg,sourcereg,lsb,width))
++        print(linestartwhitespace+'@ begin replacement of '+linels)
++        print(linestartwhitespace+'lsl '+destreg+', '+sourcereg+', #'+str(32-width-lsb))
++        if linesplit[0] == 'ubfx':
++            rightshift = 'lsr'
++        else:
++            rightshift = 'asr'
++        print(linestartwhitespace+rightshift+' '+destreg+', '+destreg+', #'+str(32-width))
++        print(linestartwhitespace+'@ end replacement of '+linels)
++    else:
++        print(line)
++f.close()
++
++
diff -Nru android-platform-art-8.1.0+r23/debian/patches/replace-movw.patch android-platform-art-8.1.0+r23/debian/patches/replace-movw.patch
--- android-platform-art-8.1.0+r23/debian/patches/replace-movw.patch	1970-01-01 00:00:00.000000000 +0000
+++ android-platform-art-8.1.0+r23/debian/patches/replace-movw.patch	2019-03-19 01:46:21.000000000 +0000
@@ -0,0 +1,22 @@
+Description: Replace movw with mov and orr
+Author: Peter Michael Green <plugwash@raspbian.org>
+
+--- android-platform-art-8.1.0+r23.orig/runtime/interpreter/mterp/out/mterp_arm.S
++++ android-platform-art-8.1.0+r23/runtime/interpreter/mterp/out/mterp_arm.S
+@@ -7755,12 +7755,14 @@ d2l_doconv:
+     lsl r2, r1, #1
+     lsr r2, r2, #21
+     @ end replacement of ubfx    r2, r1, #20, #11            @ grab the exponent
+-    movw    r3, #0x43e
++    mov     r3, #0x03e
++    orr     r3, #0x400
+     cmp     r2, r3                      @ MINLONG < x > MAXLONG?
+     bhs     d2l_special_cases
+     b       __aeabi_d2lz                @ tail call to convert double to long
+ d2l_special_cases:
+-    movw    r3, #0x7ff
++    mov     r3, #0x0ff
++    orr     r3, #0x700
+     cmp     r2, r3
+     beq     d2l_maybeNaN                @ NaN?
+ d2l_notNaN:
diff -Nru android-platform-art-8.1.0+r23/debian/patches/series android-platform-art-8.1.0+r23/debian/patches/series
--- android-platform-art-8.1.0+r23/debian/patches/series	2018-12-05 19:31:34.000000000 +0000
+++ android-platform-art-8.1.0+r23/debian/patches/series	2019-03-19 01:46:21.000000000 +0000
@@ -5,3 +5,6 @@
 PAGE_SIZE.patch
 atomic-exception-specification.patch
 StrideIterator-constructor.patch
+hack-out-ubfx-and-sbfx.patch
+replace-movw.patch
+#dont-tag-asm-as-armv7.patch