summaryrefslogtreecommitdiff
path: root/arch/x86/ia32/ia32_signal.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-10 08:07:53 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-10 08:28:58 -0700
commitd403a6484f0341bf0624d17ece46f24f741b6a92 (patch)
treebe1c2ec69a3caa9f437e4b87ca9cac80e57fbc4d /arch/x86/ia32/ia32_signal.c
parented458df4d2470adc02762a87a9ad665d0b1a2bd4 (diff)
parente496e3d645c93206faf61ff6005995ebd08cc39c (diff)
Merge phase #1 of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
This merges phase 1 of the x86 tree, which is a collection of branches: x86/alternatives, x86/cleanups, x86/commandline, x86/crashdump, x86/debug, x86/defconfig, x86/doc, x86/exports, x86/fpu, x86/gart, x86/idle, x86/mm, x86/mtrr, x86/nmi-watchdog, x86/oprofile, x86/paravirt, x86/reboot, x86/sparse-fixes, x86/tsc, x86/urgent and x86/vmalloc and as Ingo says: "these are the easiest, purely independent x86 topics with no conflicts, in one nice Octopus merge". * 'x86-v28-for-linus-phase1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (147 commits) x86: mtrr_cleanup: treat WRPROT as UNCACHEABLE x86: mtrr_cleanup: first 1M may be covered in var mtrrs x86: mtrr_cleanup: print out correct type v2 x86: trivial printk fix in efi.c x86, debug: mtrr_cleanup print out var mtrr before change it x86: mtrr_cleanup try gran_size to less than 1M, v3 x86: mtrr_cleanup try gran_size to less than 1M, cleanup x86: change MTRR_SANITIZER to def_bool y x86, debug printouts: IOMMU setup failures should not be KERN_ERR x86: export set_memory_ro and set_memory_rw x86: mtrr_cleanup try gran_size to less than 1M x86: mtrr_cleanup prepare to make gran_size to less 1M x86: mtrr_cleanup safe to get more spare regs now x86_64: be less annoying on boot, v2 x86: mtrr_cleanup hole size should be less than half of chunk_size, v2 x86: add mtrr_cleanup_debug command line x86: mtrr_cleanup optimization, v2 x86: don't need to go to chunksize to 4G x86_64: be less annoying on boot x86, olpc: fix endian bug in openfirmware workaround ...
Diffstat (limited to 'arch/x86/ia32/ia32_signal.c')
-rw-r--r--arch/x86/ia32/ia32_signal.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 20af4c79579a..f1a2ac777faf 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -206,7 +206,7 @@ struct rt_sigframe
{ unsigned int cur; \
unsigned short pre; \
err |= __get_user(pre, &sc->seg); \
- asm volatile("movl %%" #seg ",%0" : "=r" (cur)); \
+ savesegment(seg, cur); \
pre |= mask; \
if (pre != cur) loadsegment(seg, pre); }
@@ -235,7 +235,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
*/
err |= __get_user(gs, &sc->gs);
gs |= 3;
- asm("movl %%gs,%0" : "=r" (oldgs));
+ savesegment(gs, oldgs);
if (gs != oldgs)
load_gs_index(gs);
@@ -355,14 +355,13 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
{
int tmp, err = 0;
- tmp = 0;
- __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
+ savesegment(gs, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
- __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
+ savesegment(fs, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
- __asm__("movl %%ds,%0" : "=r"(tmp): "0"(tmp));
+ savesegment(ds, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
- __asm__("movl %%es,%0" : "=r"(tmp): "0"(tmp));
+ savesegment(es, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->es);
err |= __put_user((u32)regs->di, &sc->di);
@@ -498,8 +497,8 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
regs->dx = 0;
regs->cx = 0;
- asm volatile("movl %0,%%ds" :: "r" (__USER32_DS));
- asm volatile("movl %0,%%es" :: "r" (__USER32_DS));
+ loadsegment(ds, __USER32_DS);
+ loadsegment(es, __USER32_DS);
regs->cs = __USER32_CS;
regs->ss = __USER32_DS;
@@ -591,8 +590,8 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->dx = (unsigned long) &frame->info;
regs->cx = (unsigned long) &frame->uc;
- asm volatile("movl %0,%%ds" :: "r" (__USER32_DS));
- asm volatile("movl %0,%%es" :: "r" (__USER32_DS));
+ loadsegment(ds, __USER32_DS);
+ loadsegment(es, __USER32_DS);
regs->cs = __USER32_CS;
regs->ss = __USER32_DS;