BaseSynchronizationLib: Fix LoongArch64 synchronization functions

REF: https://bugzilla.tianocore.org/show_bug.cgi?id=4432

There is a return value bug:
The sc.w/sc.d instruction will destroy the reg_t0,
use reg_t1 to avoid context reg_t0 being corrupted.
Adjust Check that ptr align is UINT16.
Optimize function SyncIncrement and SyncDecrement.

Cc: Michael D Kinney <michael.d.kinney@intel.com>
Cc: Liming Gao <gaoliming@byosoft.com.cn>
Cc: Zhiguang Liu <zhiguang.liu@intel.com>
Cc: Chao Li <lichao@loongson.cn>
Signed-off-by: Dongyan Qian <qiandongyan@loongson.cn>
Reviewed-by: Chao Li <lichao@loongson.cn>
This commit is contained in:
Dongyan Qian 2023-04-27 20:57:12 +08:00 committed by mergify[bot]
parent 757f502a3b
commit b65c0eed6b
2 changed files with 13 additions and 19 deletions

View File

@ -53,9 +53,9 @@ ASM_PFX(AsmInternalSyncCompareExchange32):
1: 1:
ll.w $t0, $a0, 0x0 ll.w $t0, $a0, 0x0
bne $t0, $a1, 2f bne $t0, $a1, 2f
move $t0, $a2 move $t1, $a2
sc.w $t0, $a0, 0x0 sc.w $t1, $a0, 0x0
beqz $t0, 1b beqz $t1, 1b
b 3f b 3f
2: 2:
dbar 0 dbar 0
@ -76,9 +76,9 @@ ASM_PFX(AsmInternalSyncCompareExchange64):
1: 1:
ll.d $t0, $a0, 0x0 ll.d $t0, $a0, 0x0
bne $t0, $a1, 2f bne $t0, $a1, 2f
move $t0, $a2 move $t1, $a2
sc.d $t0, $a0, 0x0 sc.d $t1, $a0, 0x0
beqz $t0, 1b beqz $t1, 1b
b 3f b 3f
2: 2:
dbar 0 dbar 0
@ -94,13 +94,10 @@ AsmInternalSyncIncrement (
) )
**/ **/
ASM_PFX(AsmInternalSyncIncrement): ASM_PFX(AsmInternalSyncIncrement):
move $t0, $a0 li.w $t0, 1
dbar 0 amadd.w $zero, $t0, $a0
ld.w $t1, $t0, 0x0
li.w $t2, 1
amadd.w $t1, $t2, $t0
ld.w $a0, $t0, 0x0 ld.w $a0, $a0, 0
jirl $zero, $ra, 0 jirl $zero, $ra, 0
/** /**
@ -111,12 +108,9 @@ AsmInternalSyncDecrement (
) )
**/ **/
ASM_PFX(AsmInternalSyncDecrement): ASM_PFX(AsmInternalSyncDecrement):
move $t0, $a0 li.w $t0, -1
dbar 0 amadd.w $zero, $t0, $a0
ld.w $t1, $t0, 0x0
li.w $t2, -1
amadd.w $t1, $t2, $t0
ld.w $a0, $t0, 0x0 ld.w $a0, $a0, 0
jirl $zero, $ra, 0 jirl $zero, $ra, 0
.end .end

View File

@ -81,7 +81,7 @@ InternalSyncCompareExchange16 (
volatile UINT32 *Ptr32; volatile UINT32 *Ptr32;
/* Check that ptr is naturally aligned */ /* Check that ptr is naturally aligned */
ASSERT (!((UINT64)Value & (sizeof (Value) - 1))); ASSERT (!((UINT64)Value & (sizeof (UINT16) - 1)));
/* Mask inputs to the correct size. */ /* Mask inputs to the correct size. */
Mask = (((~0UL) - (1UL << (0)) + 1) & (~0UL >> (64 - 1 - ((sizeof (UINT16) * 8) - 1)))); Mask = (((~0UL) - (1UL << (0)) + 1) & (~0UL >> (64 - 1 - ((sizeof (UINT16) * 8) - 1))));