summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h47
1 files changed, 36 insertions, 11 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5c2c885ee52b..b867a4dab38a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1903,8 +1903,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
-#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
-#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
@@ -1957,17 +1955,31 @@ static inline void memalloc_noio_restore(unsigned int flags)
}
/* Per-process atomic flags. */
-#define PFA_NO_NEW_PRIVS 0x00000001 /* May not gain new privileges. */
+#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
+#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
+#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
-static inline bool task_no_new_privs(struct task_struct *p)
-{
- return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
-}
-static inline void task_set_no_new_privs(struct task_struct *p)
-{
- set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
-}
+#define TASK_PFA_TEST(name, func) \
+ static inline bool task_##func(struct task_struct *p) \
+ { return test_bit(PFA_##name, &p->atomic_flags); }
+#define TASK_PFA_SET(name, func) \
+ static inline void task_set_##func(struct task_struct *p) \
+ { set_bit(PFA_##name, &p->atomic_flags); }
+#define TASK_PFA_CLEAR(name, func) \
+ static inline void task_clear_##func(struct task_struct *p) \
+ { clear_bit(PFA_##name, &p->atomic_flags); }
+
+TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
+TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
+
+TASK_PFA_TEST(SPREAD_PAGE, spread_page)
+TASK_PFA_SET(SPREAD_PAGE, spread_page)
+TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
+
+TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
+TASK_PFA_SET(SPREAD_SLAB, spread_slab)
+TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
/*
* task->jobctl flags
@@ -2608,9 +2620,22 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
task_thread_info(p)->task = p;
}
+/*
+ * Return the address of the last usable long on the stack.
+ *
+ * When the stack grows down, this is just above the thread
+ * info struct. Going any lower will corrupt the threadinfo.
+ *
+ * When the stack grows up, this is the highest address.
+ * Beyond that position, we corrupt data on the next page.
+ */
static inline unsigned long *end_of_stack(struct task_struct *p)
{
+#ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
+#else
return (unsigned long *)(task_thread_info(p) + 1);
+#endif
}
#endif