summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/i387.h44
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/kernel/traps.c11
-rw-r--r--arch/x86/kernel/xsave.c2
-rw-r--r--arch/x86/kvm/vmx.c2
6 files changed, 30 insertions, 32 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 01b115d86770..f5376676f89c 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -264,21 +264,21 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
* be preemption protection *and* they need to be
* properly paired with the CR0.TS changes!
*/
-static inline int __thread_has_fpu(struct thread_info *ti)
+static inline int __thread_has_fpu(struct task_struct *tsk)
{
- return ti->status & TS_USEDFPU;
+ return tsk->thread.has_fpu;
}
/* Must be paired with an 'stts' after! */
-static inline void __thread_clear_has_fpu(struct thread_info *ti)
+static inline void __thread_clear_has_fpu(struct task_struct *tsk)
{
- ti->status &= ~TS_USEDFPU;
+ tsk->thread.has_fpu = 0;
}
/* Must be paired with a 'clts' before! */
-static inline void __thread_set_has_fpu(struct thread_info *ti)
+static inline void __thread_set_has_fpu(struct task_struct *tsk)
{
- ti->status |= TS_USEDFPU;
+ tsk->thread.has_fpu = 1;
}
/*
@@ -288,16 +288,16 @@ static inline void __thread_set_has_fpu(struct thread_info *ti)
* These generally need preemption protection to work,
* do try to avoid using these on their own.
*/
-static inline void __thread_fpu_end(struct thread_info *ti)
+static inline void __thread_fpu_end(struct task_struct *tsk)
{
- __thread_clear_has_fpu(ti);
+ __thread_clear_has_fpu(tsk);
stts();
}
-static inline void __thread_fpu_begin(struct thread_info *ti)
+static inline void __thread_fpu_begin(struct task_struct *tsk)
{
clts();
- __thread_set_has_fpu(ti);
+ __thread_set_has_fpu(tsk);
}
/*
@@ -308,21 +308,21 @@ extern int restore_i387_xstate(void __user *buf);
static inline void __unlazy_fpu(struct task_struct *tsk)
{
- if (__thread_has_fpu(task_thread_info(tsk))) {
+ if (__thread_has_fpu(tsk)) {
__save_init_fpu(tsk);
- __thread_fpu_end(task_thread_info(tsk));
+ __thread_fpu_end(tsk);
} else
tsk->fpu_counter = 0;
}
static inline void __clear_fpu(struct task_struct *tsk)
{
- if (__thread_has_fpu(task_thread_info(tsk))) {
+ if (__thread_has_fpu(tsk)) {
/* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n"
"2:\n"
_ASM_EXTABLE(1b, 2b));
- __thread_fpu_end(task_thread_info(tsk));
+ __thread_fpu_end(tsk);
}
}
@@ -337,7 +337,7 @@ static inline void __clear_fpu(struct task_struct *tsk)
*/
static inline bool interrupted_kernel_fpu_idle(void)
{
- return !__thread_has_fpu(current_thread_info()) &&
+ return !__thread_has_fpu(current) &&
(read_cr0() & X86_CR0_TS);
}
@@ -371,12 +371,12 @@ static inline bool irq_fpu_usable(void)
static inline void kernel_fpu_begin(void)
{
- struct thread_info *me = current_thread_info();
+ struct task_struct *me = current;
WARN_ON_ONCE(!irq_fpu_usable());
preempt_disable();
if (__thread_has_fpu(me)) {
- __save_init_fpu(me->task);
+ __save_init_fpu(me);
__thread_clear_has_fpu(me);
/* We do 'stts()' in kernel_fpu_end() */
} else
@@ -441,13 +441,13 @@ static inline void irq_ts_restore(int TS_state)
*/
static inline int user_has_fpu(void)
{
- return __thread_has_fpu(current_thread_info());
+ return __thread_has_fpu(current);
}
static inline void user_fpu_end(void)
{
preempt_disable();
- __thread_fpu_end(current_thread_info());
+ __thread_fpu_end(current);
preempt_enable();
}
@@ -455,7 +455,7 @@ static inline void user_fpu_begin(void)
{
preempt_disable();
if (!user_has_fpu())
- __thread_fpu_begin(current_thread_info());
+ __thread_fpu_begin(current);
preempt_enable();
}
@@ -464,10 +464,10 @@ static inline void user_fpu_begin(void)
*/
static inline void save_init_fpu(struct task_struct *tsk)
{
- WARN_ON_ONCE(!__thread_has_fpu(task_thread_info(tsk)));
+ WARN_ON_ONCE(!__thread_has_fpu(tsk));
preempt_disable();
__save_init_fpu(tsk);
- __thread_fpu_end(task_thread_info(tsk));
+ __thread_fpu_end(tsk);
preempt_enable();
}
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index aa9088c26931..f7c89e231c6c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -454,6 +454,7 @@ struct thread_struct {
unsigned long trap_no;
unsigned long error_code;
/* floating point and extended processor state */
+ unsigned long has_fpu;
struct fpu fpu;
#ifdef CONFIG_X86_32
/* Virtual 86 mode info */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index bc817cd8b443..cfd8144d5527 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -247,8 +247,6 @@ static inline struct thread_info *current_thread_info(void)
* ever touches our thread-synchronous status, so we don't
* have to worry about atomic accesses.
*/
-#define TS_USEDFPU 0x0001 /* FPU was used by this task
- this quantum (SMP) */
#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
#define TS_POLLING 0x0004 /* idle task polling need_resched,
skip sending interrupt */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 4d42300dcd2c..ad25e51f40c4 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -582,12 +582,11 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
*/
void math_state_restore(void)
{
- struct thread_info *thread = current_thread_info();
- struct task_struct *tsk = thread->task;
+ struct task_struct *tsk = current;
/* We need a safe address that is cheap to find and that is already
- in L1. We just brought in "thread->task", so use that */
-#define safe_address (thread->task)
+ in L1. We're just bringing in "tsk->thread.has_fpu", so use that */
+#define safe_address (tsk->thread.has_fpu)
if (!tsk_used_math(tsk)) {
local_irq_enable();
@@ -604,7 +603,7 @@ void math_state_restore(void)
local_irq_disable();
}
- __thread_fpu_begin(thread);
+ __thread_fpu_begin(tsk);
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
@@ -620,7 +619,7 @@ void math_state_restore(void)
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
*/
if (unlikely(restore_fpu_checking(tsk))) {
- __thread_fpu_end(thread);
+ __thread_fpu_end(tsk);
force_sig(SIGSEGV, tsk);
return;
}
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index a0bcd0dbc951..711091114119 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
if (!fx)
return;
- BUG_ON(__thread_has_fpu(task_thread_info(tsk)));
+ BUG_ON(__thread_has_fpu(tsk));
xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 36091dd04b4b..3b4c8d8ad906 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1457,7 +1457,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
#ifdef CONFIG_X86_64
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
#endif
- if (__thread_has_fpu(current_thread_info()))
+ if (__thread_has_fpu(current))
clts();
load_gdt(&__get_cpu_var(host_gdt));
}