x86/asm/entry/64: Simplify jumps in ret_from_fork
authorDenys Vlasenko <dvlasenk@redhat.com>
Tue, 7 Apr 2015 20:43:42 +0000 (22:43 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 9 Apr 2015 08:31:25 +0000 (10:31 +0200)
Replace
        test
        jz  1f
        jmp label
    1:

with
        test
        jnz label

Run-tested.

Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/1428439424-7258-6-git-send-email-dvlasenk@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/entry_64.S

index e8ddd51..a35e5e4 100644 (file)
@@ -608,18 +608,18 @@ ENTRY(ret_from_fork)
        RESTORE_EXTRA_REGS
 
        testl $3,CS(%rsp)                       # from kernel_thread?
-       jz   1f
 
        /*
         * By the time we get here, we have no idea whether our pt_regs,
         * ti flags, and ti status came from the 64-bit SYSCALL fast path,
         * the slow path, or one of the ia32entry paths.
-        * Use int_ret_from_sys_call to return, since it can safely handle
+        * Use IRET code path to return, since it can safely handle
         * all of the above.
         */
-       jmp  int_ret_from_sys_call
+       jnz     int_ret_from_sys_call
 
-1:
+       /* We came from kernel_thread */
+       /* nb: we depend on RESTORE_EXTRA_REGS above */
        movq %rbp, %rdi
        call *%rbx
        movl $0, RAX(%rsp)