summaryrefslogtreecommitdiff
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-08-15 14:47:54 -0700
committerDavid S. Miller <davem@davemloft.net>2016-10-24 11:31:58 -0700
commit83a17d2661674d8c198adc0e183418f72aabab79 (patch)
treedfda84fcce63b99268c872b9264e302e4dc8f13c /arch/sparc/kernel
parentaa95ce361ed95c72ac42dcb315166bce5cf1a014 (diff)
downloadlinux-next-83a17d2661674d8c198adc0e183418f72aabab79.tar.gz
sparc64: Prepare to move to more saner user copy exception handling.
The fixup helper function mechanism for handling user copy fault handling is not %100 accurrate, and can never be made so. We are going to transition the code to return the running return return length, which is always kept track in one or more registers of each of these routines. In order to convert them one by one, we have to allow the existing behavior to continue functioning. Therefore make all the copy code that wants the fixup helper to be used return negative one. After all of the user copy routines have been converted, this logic and the fixup helpers themselves can be removed completely. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/head_64.S23
1 files changed, 11 insertions, 12 deletions
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 603d73654295..5f17de6b5fc3 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -926,41 +926,40 @@ tlb_type: .word 0 /* Must NOT end up in BSS */
EXPORT_SYMBOL(tlb_type)
.section ".fixup",#alloc,#execinstr
- .globl __retl_efault, __ret_one, __retl_one
ENTRY(__retl_efault)
retl
mov -EFAULT, %o0
ENDPROC(__retl_efault)
-ENTRY(__retl_one)
+ENTRY(__retl_mone)
retl
- mov 1, %o0
-ENDPROC(__retl_one)
+ mov -1, %o0
+ENDPROC(__retl_mone)
-ENTRY(__retl_one_fp)
+ENTRY(__retl_mone_fp)
VISExitHalf
retl
mov 1, %o0
-ENDPROC(__retl_one_fp)
+ENDPROC(__retl_mone_fp)
-ENTRY(__ret_one_asi)
+ENTRY(__ret_mone_asi)
wr %g0, ASI_AIUS, %asi
ret
restore %g0, 1, %o0
-ENDPROC(__ret_one_asi)
+ENDPROC(__ret_mone_asi)
-ENTRY(__retl_one_asi)
+ENTRY(__retl_mone_asi)
wr %g0, ASI_AIUS, %asi
retl
mov 1, %o0
-ENDPROC(__retl_one_asi)
+ENDPROC(__retl_mone_asi)
-ENTRY(__retl_one_asi_fp)
+ENTRY(__retl_mone_asi_fp)
wr %g0, ASI_AIUS, %asi
VISExitHalf
retl
mov 1, %o0
-ENDPROC(__retl_one_asi_fp)
+ENDPROC(__retl_mone_asi_fp)
ENTRY(__retl_o1)
retl