diff -Nru android-platform-art-10.0.0+r36/debian/changelog android-platform-art-10.0.0+r36/debian/changelog --- android-platform-art-10.0.0+r36/debian/changelog 2021-01-07 12:08:42.000000000 +0000 +++ android-platform-art-10.0.0+r36/debian/changelog 2021-01-23 10:18:40.000000000 +0000 @@ -1,3 +1,18 @@ +android-platform-art (10.0.0+r36-3+rpi1) bullseye-staging; urgency=medium + + [changes brought forward from 8.1.0+r23-3+rpi2 by Peter Michael Green at Tue, 19 Mar 2019 01:46:21 +0000] + * Replace ubfx and sbfx with shift instructions + * Replace movw with mov and orr + * Failed attempt to avoid tagging asm files as armv7 + (I'm just going to let this one through despite the apparent + armv7 content, i suspect that is a lesser evil than not having + the package at all). + + [changes introduced in 10.0.0+r36-3+rpi1 by Peter Michael Green] + * run runtime/interpreter/mterp/replace-ubfx.py on generated mterp.S file. + + -- Raspbian forward porter Sat, 23 Jan 2021 10:18:40 +0000 + android-platform-art (10.0.0+r36-3) unstable; urgency=medium * Team upload. diff -Nru android-platform-art-10.0.0+r36/debian/libart.mk android-platform-art-10.0.0+r36/debian/libart.mk --- android-platform-art-10.0.0+r36/debian/libart.mk 2021-01-06 20:50:00.000000000 +0000 +++ android-platform-art-10.0.0+r36/debian/libart.mk 2021-01-23 10:18:40.000000000 +0000 @@ -504,6 +504,7 @@ debian/out/mterp.S: runtime/interpreter/mterp/$(CPU)/*.S python3 runtime/interpreter/mterp/gen_mterp.py $@ $^ + python3 runtime/interpreter/mterp/replace-ubfx.py debian/out/mterp.S | sponge debian/out/mterp.S debian/out/asm_defines.h: debian/out/asm_defines.output python3 tools/cpp-define-generator/make_header.py $^ > $@ diff -Nru android-platform-art-10.0.0+r36/debian/patches/dont-tag-asm-as-armv7.patch android-platform-art-10.0.0+r36/debian/patches/dont-tag-asm-as-armv7.patch --- android-platform-art-10.0.0+r36/debian/patches/dont-tag-asm-as-armv7.patch 1970-01-01 00:00:00.000000000 +0000 +++ android-platform-art-10.0.0+r36/debian/patches/dont-tag-asm-as-armv7.patch 2021-01-23 10:18:40.000000000 +0000 @@ -0,0 +1,14 @@ +Description: Don't tag asm files as armv7 +Author: Peter Michael Green + +--- android-platform-art-8.1.0+r23.orig/runtime/arch/arm/asm_support_arm.S ++++ android-platform-art-8.1.0+r23/runtime/arch/arm/asm_support_arm.S +@@ -34,7 +34,7 @@ + #endif + + .syntax unified +-.arch armv7-a ++.arch armv6 + .thumb + + // Macro to generate the value of Runtime::Current into rDest. As it uses labels diff -Nru android-platform-art-10.0.0+r36/debian/patches/hack-out-ubfx-and-sbfx.patch android-platform-art-10.0.0+r36/debian/patches/hack-out-ubfx-and-sbfx.patch --- android-platform-art-10.0.0+r36/debian/patches/hack-out-ubfx-and-sbfx.patch 1970-01-01 00:00:00.000000000 +0000 +++ android-platform-art-10.0.0+r36/debian/patches/hack-out-ubfx-and-sbfx.patch 2021-01-23 10:18:40.000000000 +0000 @@ -0,0 +1,558 @@ +diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S +index b57e119..7531f9e 100644 +--- a/runtime/arch/arm/quick_entrypoints_arm.S ++++ b/runtime/arch/arm/quick_entrypoints_arm.S +@@ -714,7 +714,10 @@ ENTRY art_quick_lock_object + @ thread id did not match, go slow path. + add r3, r2, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ Increment the recursive lock count. + @ Extract the new thin lock count for overflow check. +- ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE ++ @ begin replacement of ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE ++ lsl r2, r3, #(32-(LOCK_WORD_THIN_LOCK_COUNT_SIZE)-(LOCK_WORD_THIN_LOCK_COUNT_SHIFT)) ++ lsr r2, r2, #((32-(LOCK_WORD_THIN_LOCK_COUNT_SIZE))) ++ @ end replacement of ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE + cbz r2, .Lslow_lock @ Zero as the new count indicates overflow, go slow path. + strex r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits. + cbnz r2, .Llock_strex_fail @ If strex failed, retry. +@@ -1621,7 +1624,10 @@ ENTRY art_quick_imt_conflict_trampoline + bcs .Limt_conflict_trampoline_dex_cache_miss + ldr r4, [r0, #MIRROR_CLASS_DEX_CACHE_OFFSET] // Load the DexCache (without read barrier). + UNPOISON_HEAP_REF r4 +- ubfx r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index. ++ @ begin replacement of ubfx r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index. ++ lsl r1, r12, #(32-(METHOD_DEX_CACHE_HASH_BITS)-(0)) ++ lsr r1, r1, #((32-(METHOD_DEX_CACHE_HASH_BITS))) ++ @ end replacement of ubfx r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index. + ldr r4, [r4, #MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET] // Load the resolved methods. + add r4, r4, r1, lsl #(POINTER_SIZE_SHIFT + 1) // Load DexCache method slot address. + +diff --git a/runtime/interpreter/mterp/arm/arithmetic.S b/runtime/interpreter/mterp/arm/arithmetic.S +index a6ba454..0e98485 100644 +--- a/runtime/interpreter/mterp/arm/arithmetic.S ++++ b/runtime/interpreter/mterp/arm/arithmetic.S +@@ -51,7 +51,10 @@ + */ + /* binop/2addr vA, vB */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + GET_VREG r1, r3 @ r1<- vB + GET_VREG r0, r9 @ r0<- vA + .if $chkzero +@@ -83,7 +86,10 @@ + /* binop/lit16 vA, vB, #+CCCC */ + FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended) + mov r2, rINST, lsr #12 @ r2<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + GET_VREG r0, r2 @ r0<- vB + .if $chkzero + cmp r1, #0 @ is second operand zero? +@@ -189,7 +195,10 @@ + */ + /* binop/2addr vA, vB */ + mov r1, rINST, lsr #12 @ r1<- B +- ubfx rINST, rINST, #8, #4 @ rINST<- A ++ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A ++ lsl rINST, rINST, #(32-(4)-(8)) ++ lsr rINST, rINST, #((32-(4))) ++ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A + VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] + GET_VREG_WIDE_BY_ADDR r2, r3, r1 @ r2/r3<- vBB/vBB+1 +@@ -218,7 +227,10 @@ + */ + /* unop vA, vB */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + GET_VREG r0, r3 @ r0<- vB + $preinstr @ optional op; may set condition codes + FETCH_ADVANCE_INST 1 @ advance rPC, load rINST +@@ -241,7 +253,10 @@ + */ + /* unop vA, vB */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] + GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vB/vB+1 + FETCH_ADVANCE_INST 1 @ advance rPC, load rINST +@@ -262,7 +277,10 @@ + */ + /* unop vA, vB */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx rINST, rINST, #8, #4 @ rINST<- A ++ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A ++ lsl rINST, rINST, #(32-(4)-(8)) ++ lsr rINST, rINST, #((32-(4))) ++ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] + GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vAA +@@ -285,7 +303,10 @@ + */ + /* unop vA, vB */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx rINST, rINST, #8, #4 @ rINST<- A ++ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A ++ lsl rINST, rINST, #(32-(4)-(8)) ++ lsr rINST, rINST, #((32-(4))) ++ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A + GET_VREG r0, r3 @ r0<- vB + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] + $preinstr @ optional op; may set condition codes +@@ -401,7 +422,10 @@ + * + */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + GET_VREG r1, r3 @ r1<- vB + GET_VREG r0, r9 @ r0<- vA + cmp r1, #0 @ is second operand zero? +@@ -432,7 +456,10 @@ + */ + FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended) + mov r2, rINST, lsr #12 @ r2<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + GET_VREG r0, r2 @ r0<- vB + cmp r1, #0 @ is second operand zero? + beq common_errDivideByZero +@@ -566,7 +593,10 @@ + */ + /* mul-long/2addr vA, vB */ + mov r1, rINST, lsr #12 @ r1<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B] + VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A] + GET_VREG_WIDE_BY_ADDR r2, r3, r1 @ r2/r3<- vBB/vBB+1 +@@ -659,7 +689,10 @@ + * + */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + GET_VREG r1, r3 @ r1<- vB + GET_VREG r0, r9 @ r0<- vA + cmp r1, #0 @ is second operand zero? +@@ -693,7 +726,10 @@ + */ + FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended) + mov r2, rINST, lsr #12 @ r2<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + GET_VREG r0, r2 @ r0<- vB + cmp r1, #0 @ is second operand zero? + beq common_errDivideByZero +@@ -803,7 +839,10 @@ + */ + /* shl-long/2addr vA, vB */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + GET_VREG r2, r3 @ r2<- vB + CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A] +@@ -865,7 +904,10 @@ + */ + /* shr-long/2addr vA, vB */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + GET_VREG r2, r3 @ r2<- vB + CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A] +@@ -939,7 +981,10 @@ + */ + /* ushr-long/2addr vA, vB */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + GET_VREG r2, r3 @ r2<- vB + CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A] +diff --git a/runtime/interpreter/mterp/arm/array.S b/runtime/interpreter/mterp/arm/array.S +index 7b3db61..0c12dbf 100644 +--- a/runtime/interpreter/mterp/arm/array.S ++++ b/runtime/interpreter/mterp/arm/array.S +@@ -179,7 +179,10 @@ + * Return the length of an array. + */ + mov r1, rINST, lsr #12 @ r1<- B +- ubfx r2, rINST, #8, #4 @ r2<- A ++ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A ++ lsl r2, rINST, #(32-(4)-(8)) ++ lsr r2, r2, #((32-(4))) ++ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A + GET_VREG r0, r1 @ r0<- vB (object ref) + cmp r0, #0 @ is object null? + beq common_errNullObject @ yup, fail +diff --git a/runtime/interpreter/mterp/arm/control_flow.S b/runtime/interpreter/mterp/arm/control_flow.S +index 2299ef9..89c3b93 100644 +--- a/runtime/interpreter/mterp/arm/control_flow.S ++++ b/runtime/interpreter/mterp/arm/control_flow.S +@@ -7,7 +7,10 @@ + */ + /* if-cmp vA, vB, +CCCC */ + mov r1, rINST, lsr #12 @ r1<- B +- ubfx r0, rINST, #8, #4 @ r0<- A ++ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A ++ lsl r0, rINST, #(32-(4)-(8)) ++ lsr r0, r0, #((32-(4))) ++ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A + GET_VREG r3, r1 @ r3<- vB + GET_VREG r0, r0 @ r0<- vA + FETCH_S rINST, 1 @ rINST<- branch offset, in code units +@@ -46,7 +49,10 @@ + * double to get a byte offset. + */ + /* goto +AA */ +- sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended) ++ @ begin replacement of sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended) ++ lsl rINST, rINST, #(32-(8)-(8)) ++ asr rINST, rINST, #((32-(8))) ++ @ end replacement of sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended) + b MterpCommonTakenBranchNoFlags + + %def op_goto_16(): +diff --git a/runtime/interpreter/mterp/arm/floating_point.S b/runtime/interpreter/mterp/arm/floating_point.S +index 035fc13..f2b7b3b 100644 +--- a/runtime/interpreter/mterp/arm/floating_point.S ++++ b/runtime/interpreter/mterp/arm/floating_point.S +@@ -32,7 +32,10 @@ + */ + /* binop/2addr vA, vB */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA + GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vB +@@ -79,7 +82,10 @@ + */ + /* binop/2addr vA, vB */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB + CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs + GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vB +@@ -102,7 +108,10 @@ + mov r3, rINST, lsr #12 @ r3<- B + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB + GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + FETCH_ADVANCE_INST 1 @ advance rPC, load rINST + $instr @ s1<- op + GET_INST_OPCODE ip @ extract opcode from rINST +@@ -120,7 +129,10 @@ + mov r3, rINST, lsr #12 @ r3<- B + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB + GET_VREG_DOUBLE_BY_ADDR d0, r3 @ d0<- vB +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + FETCH_ADVANCE_INST 1 @ advance rPC, load rINST + $instr @ s0<- op + GET_INST_OPCODE ip @ extract opcode from rINST +@@ -138,7 +150,10 @@ + mov r3, rINST, lsr #12 @ r3<- B + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB + GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + FETCH_ADVANCE_INST 1 @ advance rPC, load rINST + $instr @ d0<- op + CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs +@@ -334,7 +349,10 @@ + * to modest integer. The EABI convert function isn't doing this for us. + */ + d2l_doconv: +- ubfx r2, r1, #20, #11 @ grab the exponent ++ @ begin replacement of ubfx r2, r1, #20, #11 @ grab the exponent ++ lsl r2, r1, #(32-(11)-(20)) ++ lsr r2, r2, #((32-(11))) ++ @ end replacement of ubfx r2, r1, #20, #11 @ grab the exponent + movw r3, #0x43e + cmp r2, r3 @ MINLONG < x > MAXLONG? + bhs d2l_special_cases +@@ -376,7 +394,10 @@ d2l_maybeNaN: + * to modest integer. The EABI convert function isn't doing this for us. + */ + f2l_doconv: +- ubfx r2, r0, #23, #8 @ grab the exponent ++ @ begin replacement of ubfx r2, r0, #23, #8 @ grab the exponent ++ lsl r2, r0, #(32-(8)-(23)) ++ lsr r2, r2, #((32-(8))) ++ @ end replacement of ubfx r2, r0, #23, #8 @ grab the exponent + cmp r2, #0xbe @ MININT < x > MAXINT? + bhs f2l_special_cases + b __aeabi_f2lz @ tail call to convert float to long +@@ -412,7 +433,10 @@ f2l_maybeNaN: + * For: long-to-double + */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A] +diff --git a/runtime/interpreter/mterp/arm/object.S b/runtime/interpreter/mterp/arm/object.S +index a044d91..404e9ce 100644 +--- a/runtime/interpreter/mterp/arm/object.S ++++ b/runtime/interpreter/mterp/arm/object.S +@@ -37,7 +37,10 @@ + %def op_iget(is_object=False, is_wide=False, load="ldr", helper="MterpIGetU32"): + @ Fast-path which gets the field offset from thread-local cache. + add r0, rSELF, #THREAD_INTERPRETER_CACHE_OFFSET @ cache address +- ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index ++ @ begin replacement of ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index ++ lsl r1, rPC, #(32-(THREAD_INTERPRETER_CACHE_SIZE_LOG2)-(2)) ++ lsr r1, r1, #((32-(THREAD_INTERPRETER_CACHE_SIZE_LOG2))) ++ @ end replacement of ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index + add r0, r0, r1, lsl #3 @ entry address within the cache + ldrd r0, r1, [r0] @ entry key (pc) and value (offset) + mov r2, rINST, lsr #12 @ B +@@ -65,7 +68,10 @@ + # endif + #endif + % #endif +- ubfx r2, rINST, #8, #4 @ A ++ @ begin replacement of ubfx r2, rINST, #8, #4 @ A ++ lsl r2, rINST, #(32-(4)-(8)) ++ lsr r2, r2, #((32-(4))) ++ @ end replacement of ubfx r2, rINST, #8, #4 @ A + FETCH_ADVANCE_INST 2 @ advance rPC, load rINST + % if is_object: + SET_VREG_OBJECT r0, r2 @ fp[A]<- r0 +@@ -114,7 +120,10 @@ + GET_VREG r0, r2 @ r0<- object we're operating on + bl artIGetObjectFromMterp @ (obj, offset) + ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET] +- ubfx r2, rINST, #8, #4 @ r2<- A ++ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A ++ lsl r2, rINST, #(32-(4)-(8)) ++ lsr r2, r2, #((32-(4))) ++ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A + PREFETCH_INST 2 + cmp r3, #0 + bne MterpPossibleException @ bail out +@@ -129,7 +138,10 @@ + mov r2, rINST, lsr #12 @ r2<- B + FETCH r1, 1 @ r1<- field byte offset + GET_VREG r3, r2 @ r3<- object we're operating on +- ubfx r2, rINST, #8, #4 @ r2<- A ++ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A ++ lsl r2, rINST, #(32-(4)-(8)) ++ lsr r2, r2, #((32-(4))) ++ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A + cmp r3, #0 @ check object for null + beq common_errNullObject @ object was null + $load r0, [r3, r1] @ r0<- obj.field +@@ -152,7 +164,10 @@ + mov r2, rINST, lsr #12 @ r2<- B + FETCH ip, 1 @ ip<- field byte offset + GET_VREG r3, r2 @ r3<- object we're operating on +- ubfx r2, rINST, #8, #4 @ r2<- A ++ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A ++ lsl r2, rINST, #(32-(4)-(8)) ++ lsr r2, r2, #((32-(4))) ++ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A + cmp r3, #0 @ check object for null + beq common_errNullObject @ object was null + ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned) +@@ -179,7 +194,10 @@ + mov r3, rSELF @ r3<- self + bl MterpInstanceOf @ (index, &obj, method, self) + ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET] +- ubfx r9, rINST, #8, #4 @ r9<- A ++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A ++ lsl r9, rINST, #(32-(4)-(8)) ++ lsr r9, r9, #((32-(4))) ++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A + PREFETCH_INST 2 + cmp r1, #0 @ exception pending? + bne MterpException +@@ -230,7 +248,10 @@ + mov r2, rINST, lsr #12 @ r2<- B + FETCH r1, 1 @ r1<- field byte offset + GET_VREG r3, r2 @ r3<- fp[B], the object pointer +- ubfx r2, rINST, #8, #4 @ r2<- A ++ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A ++ lsl r2, rINST, #(32-(4)-(8)) ++ lsr r2, r2, #((32-(4))) ++ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A + cmp r3, #0 @ check object for null + beq common_errNullObject @ object was null + GET_VREG r0, r2 @ r0<- fp[A] +@@ -253,7 +274,10 @@ + mov r2, rINST, lsr #12 @ r2<- B + FETCH r3, 1 @ r3<- field byte offset + GET_VREG r2, r2 @ r2<- fp[B], the object pointer +- ubfx r0, rINST, #8, #4 @ r0<- A ++ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A ++ lsl r0, rINST, #(32-(4)-(8)) ++ lsr r0, r0, #((32-(4))) ++ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A + cmp r2, #0 @ check object for null + beq common_errNullObject @ object was null + VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A] +diff --git a/runtime/interpreter/mterp/arm/other.S b/runtime/interpreter/mterp/arm/other.S +index 31b9354..491219a 100644 +--- a/runtime/interpreter/mterp/arm/other.S ++++ b/runtime/interpreter/mterp/arm/other.S +@@ -45,8 +45,14 @@ + + %def op_const_4(): + /* const/4 vA, #+B */ +- sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended) +- ubfx r0, rINST, #8, #4 @ r0<- A ++ @ begin replacement of sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended) ++ lsl r1, rINST, #(32-(4)-(12)) ++ asr r1, r1, #((32-(4))) ++ @ end replacement of sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended) ++ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A ++ lsl r0, rINST, #(32-(4)-(8)) ++ lsr r0, r0, #((32-(4))) ++ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A + FETCH_ADVANCE_INST 1 @ advance rPC, load rINST + GET_INST_OPCODE ip @ ip<- opcode from rINST + SET_VREG r1, r0 @ fp[A]<- r1 +@@ -192,7 +198,10 @@ + /* for move, move-object, long-to-int */ + /* op vA, vB */ + mov r1, rINST, lsr #12 @ r1<- B from 15:12 +- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8 ++ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8 ++ lsl r0, rINST, #(32-(4)-(8)) ++ lsr r0, r0, #((32-(4))) ++ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8 + FETCH_ADVANCE_INST 1 @ advance rPC, load rINST + GET_VREG r2, r1 @ r2<- fp[B] + GET_INST_OPCODE ip @ ip<- opcode from rINST +@@ -287,7 +296,10 @@ + /* move-wide vA, vB */ + /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ + mov r3, rINST, lsr #12 @ r3<- B +- ubfx rINST, rINST, #8, #4 @ rINST<- A ++ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A ++ lsl rINST, rINST, #(32-(4)-(8)) ++ lsr rINST, rINST, #((32-(4))) ++ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A] + GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- fp[B] +diff --git a/runtime/interpreter/mterp/replace-ubfx.py b/runtime/interpreter/mterp/replace-ubfx.py +new file mode 100644 +index 0000000..8a79cc8 +--- /dev/null ++++ b/runtime/interpreter/mterp/replace-ubfx.py +@@ -0,0 +1,51 @@ ++#!/usr/bin/python3 ++#script to replace ubfx with equivilent code for older arm ++#note: these replacements will unfortunately clobber the carry ++#flag, I hope that doesn't break anything. ++#Copyright 2019 Peter Michael Green ++# ++#Permission is hereby granted, free of charge, to any person obtaining a copy of ++#this software and associated documentation files (the "Software"), to deal in ++#the Software without restriction, including without limitation the rights to ++#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ++#of the Software, and to permit persons to whom the Software is furnished to do ++#so, subject to the following conditions: ++# ++#The above copyright notice and this permission notice shall be included in all ++#copies or substantial portions of the Software. ++# ++#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++#SOFTWARE. ++import sys ++ ++f = open(sys.argv[1],"r") ++for line in f: ++ line = line.rstrip() ++ linels = line.lstrip() ++ linesplit = linels.split() ++ if (len(linesplit) > 0) and ((linesplit[0] == 'ubfx') or (linesplit[0] == 'sbfx')): ++ linestartwhitespace = line[:(len(line)-len(linels))] ++ destreg = linesplit[1][:-1] ++ sourcereg = linesplit[2][:-1] ++ lsb = linesplit[3][1:-1] ++ width = linesplit[4][1:] ++ #print(linesplit) ++ #print((destreg,sourcereg,lsb,width)) ++ print(linestartwhitespace+'@ begin replacement of '+linels) ++ print(linestartwhitespace+'lsl '+destreg+', '+sourcereg+', #(32-('+width+')-('+lsb+'))') ++ if linesplit[0] == 'ubfx': ++ rightshift = 'lsr' ++ else: ++ rightshift = 'asr' ++ print(linestartwhitespace+rightshift+' '+destreg+', '+destreg+', #((32-('+width+')))') ++ print(linestartwhitespace+'@ end replacement of '+linels) ++ else: ++ print(line) ++f.close() ++ ++ diff -Nru android-platform-art-10.0.0+r36/debian/patches/replace-movw.patch android-platform-art-10.0.0+r36/debian/patches/replace-movw.patch --- android-platform-art-10.0.0+r36/debian/patches/replace-movw.patch 1970-01-01 00:00:00.000000000 +0000 +++ android-platform-art-10.0.0+r36/debian/patches/replace-movw.patch 2021-01-23 10:18:40.000000000 +0000 @@ -0,0 +1,24 @@ +Description: Replace movw with mov and orr +Author: Peter Michael Green + +diff --git a/runtime/interpreter/mterp/arm/floating_point.S b/runtime/interpreter/mterp/arm/floating_point.S +index f2b7b3b..2f78fa9 100644 +--- a/runtime/interpreter/mterp/arm/floating_point.S ++++ b/runtime/interpreter/mterp/arm/floating_point.S +@@ -353,12 +353,14 @@ d2l_doconv: + lsl r2, r1, #(32-(11)-(20)) + lsr r2, r2, #((32-(11))) + @ end replacement of ubfx r2, r1, #20, #11 @ grab the exponent +- movw r3, #0x43e ++ mov r3, #0x03e ++ orr r3, #0x400 + cmp r2, r3 @ MINLONG < x > MAXLONG? + bhs d2l_special_cases + b __aeabi_d2lz @ tail call to convert double to long + d2l_special_cases: +- movw r3, #0x7ff ++ mov r3, #0x0ff ++ orr r3, #0x700 + cmp r2, r3 + beq d2l_maybeNaN @ NaN? + d2l_notNaN: diff -Nru android-platform-art-10.0.0+r36/debian/patches/series android-platform-art-10.0.0+r36/debian/patches/series --- android-platform-art-10.0.0+r36/debian/patches/series 2021-01-04 14:52:01.000000000 +0000 +++ android-platform-art-10.0.0+r36/debian/patches/series 2021-01-23 10:18:40.000000000 +0000 @@ -12,3 +12,6 @@ adapt-asm-output-of-clang-that-no-is-placed-before-data.patch arm-asm-instruction.patch fix-mterp-assembly-to-use-uxtw-instead-of-lsl-where-needed.patch +hack-out-ubfx-and-sbfx.patch +replace-movw.patch +#dont-tag-asm-as-armv7.patch