aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c8
-rw-r--r--kernel/cgroup.c1
-rw-r--r--kernel/cred.c293
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/futex.c47
-rw-r--r--kernel/kmod.c5
-rw-r--r--kernel/lockdep.c792
-rw-r--r--kernel/lockdep_internals.h2
-rw-r--r--kernel/lockdep_proc.c128
-rw-r--r--kernel/perf_counter.c3
-rw-r--r--kernel/printk.c175
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcutree.c7
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/spinlock.c230
-rw-r--r--kernel/sysctl.c1
-rw-r--r--kernel/workqueue.c7
18 files changed, 1117 insertions, 596 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index 9f3391090b3e..9a4715a2f6bf 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -491,13 +491,17 @@ static void do_acct_process(struct bsd_acct_struct *acct,
u64 run_time;
struct timespec uptime;
struct tty_struct *tty;
+ const struct cred *orig_cred;
+
+ /* Perform file operations on behalf of whoever enabled accounting */
+ orig_cred = override_creds(file->f_cred);
/*
* First check to see if there is enough free_space to continue
* the process accounting system.
*/
if (!check_free_space(acct, file))
- return;
+ goto out;
/*
* Fill the accounting struct with the needed info as recorded
@@ -578,6 +582,8 @@ static void do_acct_process(struct bsd_acct_struct *acct,
sizeof(acct_t), &file->f_pos);
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
set_fs(fs);
+out:
+ revert_creds(orig_cred);
}
/**
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index b6eadfe30e7b..c7ece8f027f2 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -600,6 +600,7 @@ static struct inode_operations cgroup_dir_inode_operations;
static struct file_operations proc_cgroupstats_operations;
static struct backing_dev_info cgroup_backing_dev_info = {
+ .name = "cgroup",
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
diff --git a/kernel/cred.c b/kernel/cred.c
index 1bb4d7e5d616..006fcab009d5 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -18,6 +18,18 @@
#include <linux/cn_proc.h>
#include "cred-internals.h"
+#if 0
+#define kdebug(FMT, ...) \
+ printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__)
+#else
+static inline __attribute__((format(printf, 1, 2)))
+void no_printk(const char *fmt, ...)
+{
+}
+#define kdebug(FMT, ...) \
+ no_printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__)
+#endif
+
static struct kmem_cache *cred_jar;
/*
@@ -36,6 +48,10 @@ static struct thread_group_cred init_tgcred = {
*/
struct cred init_cred = {
.usage = ATOMIC_INIT(4),
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ .subscribers = ATOMIC_INIT(2),
+ .magic = CRED_MAGIC,
+#endif
.securebits = SECUREBITS_DEFAULT,
.cap_inheritable = CAP_INIT_INH_SET,
.cap_permitted = CAP_FULL_SET,
@@ -48,6 +64,31 @@ struct cred init_cred = {
#endif
};
+static inline void set_cred_subscribers(struct cred *cred, int n)
+{
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ atomic_set(&cred->subscribers, n);
+#endif
+}
+
+static inline int read_cred_subscribers(const struct cred *cred)
+{
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ return atomic_read(&cred->subscribers);
+#else
+ return 0;
+#endif
+}
+
+static inline void alter_cred_subscribers(const struct cred *_cred, int n)
+{
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ struct cred *cred = (struct cred *) _cred;
+
+ atomic_add(n, &cred->subscribers);
+#endif
+}
+
/*
* Dispose of the shared task group credentials
*/
@@ -85,9 +126,22 @@ static void put_cred_rcu(struct rcu_head *rcu)
{
struct cred *cred = container_of(rcu, struct cred, rcu);
+ kdebug("put_cred_rcu(%p)", cred);
+
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ if (cred->magic != CRED_MAGIC_DEAD ||
+ atomic_read(&cred->usage) != 0 ||
+ read_cred_subscribers(cred) != 0)
+ panic("CRED: put_cred_rcu() sees %p with"
+ " mag %x, put %p, usage %d, subscr %d\n",
+ cred, cred->magic, cred->put_addr,
+ atomic_read(&cred->usage),
+ read_cred_subscribers(cred));
+#else
if (atomic_read(&cred->usage) != 0)
panic("CRED: put_cred_rcu() sees %p with usage %d\n",
cred, atomic_read(&cred->usage));
+#endif
security_cred_free(cred);
key_put(cred->thread_keyring);
@@ -106,12 +160,90 @@ static void put_cred_rcu(struct rcu_head *rcu)
*/
void __put_cred(struct cred *cred)
{
+ kdebug("__put_cred(%p{%d,%d})", cred,
+ atomic_read(&cred->usage),
+ read_cred_subscribers(cred));
+
BUG_ON(atomic_read(&cred->usage) != 0);
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ BUG_ON(read_cred_subscribers(cred) != 0);
+ cred->magic = CRED_MAGIC_DEAD;
+ cred->put_addr = __builtin_return_address(0);
+#endif
+ BUG_ON(cred == current->cred);
+ BUG_ON(cred == current->real_cred);
call_rcu(&cred->rcu, put_cred_rcu);
}
EXPORT_SYMBOL(__put_cred);
+/*
+ * Clean up a task's credentials when it exits
+ */
+void exit_creds(struct task_struct *tsk)
+{
+ struct cred *cred;
+
+ kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
+ atomic_read(&tsk->cred->usage),
+ read_cred_subscribers(tsk->cred));
+
+ cred = (struct cred *) tsk->real_cred;
+ tsk->real_cred = NULL;
+ validate_creds(cred);
+ alter_cred_subscribers(cred, -1);
+ put_cred(cred);
+
+ cred = (struct cred *) tsk->cred;
+ tsk->cred = NULL;
+ validate_creds(cred);
+ alter_cred_subscribers(cred, -1);
+ put_cred(cred);
+
+ cred = (struct cred *) tsk->replacement_session_keyring;
+ if (cred) {
+ tsk->replacement_session_keyring = NULL;
+ validate_creds(cred);
+ put_cred(cred);
+ }
+}
+
+/*
+ * Allocate blank credentials, such that the credentials can be filled in at a
+ * later date without risk of ENOMEM.
+ */
+struct cred *cred_alloc_blank(void)
+{
+ struct cred *new;
+
+ new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+#ifdef CONFIG_KEYS
+ new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL);
+ if (!new->tgcred) {
+ kfree(new);
+ return NULL;
+ }
+ atomic_set(&new->tgcred->usage, 1);
+#endif
+
+ atomic_set(&new->usage, 1);
+
+ if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
+ goto error;
+
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ new->magic = CRED_MAGIC;
+#endif
+ return new;
+
+error:
+ abort_creds(new);
+ return NULL;
+}
+
/**
* prepare_creds - Prepare a new set of credentials for modification
*
@@ -132,16 +264,19 @@ struct cred *prepare_creds(void)
const struct cred *old;
struct cred *new;
- BUG_ON(atomic_read(&task->real_cred->usage) < 1);
+ validate_process_creds();
new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
if (!new)
return NULL;
+ kdebug("prepare_creds() alloc %p", new);
+
old = task->cred;
memcpy(new, old, sizeof(struct cred));
atomic_set(&new->usage, 1);
+ set_cred_subscribers(new, 0);
get_group_info(new->group_info);
get_uid(new->user);
@@ -157,6 +292,7 @@ struct cred *prepare_creds(void)
if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
goto error;
+ validate_creds(new);
return new;
error:
@@ -229,9 +365,12 @@ struct cred *prepare_usermodehelper_creds(void)
if (!new)
return NULL;
+ kdebug("prepare_usermodehelper_creds() alloc %p", new);
+
memcpy(new, &init_cred, sizeof(struct cred));
atomic_set(&new->usage, 1);
+ set_cred_subscribers(new, 0);
get_group_info(new->group_info);
get_uid(new->user);
@@ -250,6 +389,7 @@ struct cred *prepare_usermodehelper_creds(void)
#endif
if (security_prepare_creds(new, &init_cred, GFP_ATOMIC) < 0)
goto error;
+ validate_creds(new);
BUG_ON(atomic_read(&new->usage) != 1);
return new;
@@ -286,6 +426,10 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
) {
p->real_cred = get_cred(p->cred);
get_cred(p->cred);
+ alter_cred_subscribers(p->cred, 2);
+ kdebug("share_creds(%p{%d,%d})",
+ p->cred, atomic_read(&p->cred->usage),
+ read_cred_subscribers(p->cred));
atomic_inc(&p->cred->user->processes);
return 0;
}
@@ -331,6 +475,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
atomic_inc(&new->user->processes);
p->cred = p->real_cred = get_cred(new);
+ alter_cred_subscribers(new, 2);
+ validate_creds(new);
return 0;
error_put:
@@ -355,13 +501,20 @@ error_put:
int commit_creds(struct cred *new)
{
struct task_struct *task = current;
- const struct cred *old;
+ const struct cred *old = task->real_cred;
- BUG_ON(task->cred != task->real_cred);
- BUG_ON(atomic_read(&task->real_cred->usage) < 2);
+ kdebug("commit_creds(%p{%d,%d})", new,
+ atomic_read(&new->usage),
+ read_cred_subscribers(new));
+
+ BUG_ON(task->cred != old);
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ BUG_ON(read_cred_subscribers(old) < 2);
+ validate_creds(old);
+ validate_creds(new);
+#endif
BUG_ON(atomic_read(&new->usage) < 1);
- old = task->real_cred;
security_commit_creds(new, old);
get_cred(new); /* we will require a ref for the subj creds too */
@@ -390,12 +543,14 @@ int commit_creds(struct cred *new)
* cheaply with the new uid cache, so if it matters
* we should be checking for it. -DaveM
*/
+ alter_cred_subscribers(new, 2);
if (new->user != old->user)
atomic_inc(&new->user->processes);
rcu_assign_pointer(task->real_cred, new);
rcu_assign_pointer(task->cred, new);
if (new->user != old->user)
atomic_dec(&old->user->processes);
+ alter_cred_subscribers(old, -2);
sched_switch_user(task);
@@ -428,6 +583,13 @@ EXPORT_SYMBOL(commit_creds);
*/
void abort_creds(struct cred *new)
{
+ kdebug("abort_creds(%p{%d,%d})", new,
+ atomic_read(&new->usage),
+ read_cred_subscribers(new));
+
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ BUG_ON(read_cred_subscribers(new) != 0);
+#endif
BUG_ON(atomic_read(&new->usage) < 1);
put_cred(new);
}
@@ -444,7 +606,20 @@ const struct cred *override_creds(const struct cred *new)
{
const struct cred *old = current->cred;
- rcu_assign_pointer(current->cred, get_cred(new));
+ kdebug("override_creds(%p{%d,%d})", new,
+ atomic_read(&new->usage),
+ read_cred_subscribers(new));
+
+ validate_creds(old);
+ validate_creds(new);
+ get_cred(new);
+ alter_cred_subscribers(new, 1);
+ rcu_assign_pointer(current->cred, new);
+ alter_cred_subscribers(old, -1);
+
+ kdebug("override_creds() = %p{%d,%d}", old,
+ atomic_read(&old->usage),
+ read_cred_subscribers(old));
return old;
}
EXPORT_SYMBOL(override_creds);
@@ -460,7 +635,15 @@ void revert_creds(const struct cred *old)
{
const struct cred *override = current->cred;
+ kdebug("revert_creds(%p{%d,%d})", old,
+ atomic_read(&old->usage),
+ read_cred_subscribers(old));
+
+ validate_creds(old);
+ validate_creds(override);
+ alter_cred_subscribers(old, 1);
rcu_assign_pointer(current->cred, old);
+ alter_cred_subscribers(override, -1);
put_cred(override);
}
EXPORT_SYMBOL(revert_creds);
@@ -502,11 +685,15 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
if (!new)
return NULL;
+ kdebug("prepare_kernel_cred() alloc %p", new);
+
if (daemon)
old = get_task_cred(daemon);
else
old = get_cred(&init_cred);
+ validate_creds(old);
+
*new = *old;
get_uid(new->user);
get_group_info(new->group_info);
@@ -526,7 +713,9 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
goto error;
atomic_set(&new->usage, 1);
+ set_cred_subscribers(new, 0);
put_cred(old);
+ validate_creds(new);
return new;
error:
@@ -589,3 +778,95 @@ int set_create_files_as(struct cred *new, struct inode *inode)
return security_kernel_create_files_as(new, inode);
}
EXPORT_SYMBOL(set_create_files_as);
+
+#ifdef CONFIG_DEBUG_CREDENTIALS
+
+/*
+ * dump invalid credentials
+ */
+static void dump_invalid_creds(const struct cred *cred, const char *label,
+ const struct task_struct *tsk)
+{
+ printk(KERN_ERR "CRED: %s credentials: %p %s%s%s\n",
+ label, cred,
+ cred == &init_cred ? "[init]" : "",
+ cred == tsk->real_cred ? "[real]" : "",
+ cred == tsk->cred ? "[eff]" : "");
+ printk(KERN_ERR "CRED: ->magic=%x, put_addr=%p\n",
+ cred->magic, cred->put_addr);
+ printk(KERN_ERR "CRED: ->usage=%d, subscr=%d\n",
+ atomic_read(&cred->usage),
+ read_cred_subscribers(cred));
+ printk(KERN_ERR "CRED: ->*uid = { %d,%d,%d,%d }\n",
+ cred->uid, cred->euid, cred->suid, cred->fsuid);
+ printk(KERN_ERR "CRED: ->*gid = { %d,%d,%d,%d }\n",
+ cred->gid, cred->egid, cred->sgid, cred->fsgid);
+#ifdef CONFIG_SECURITY
+ printk(KERN_ERR "CRED: ->security is %p\n", cred->security);
+ if ((unsigned long) cred->security >= PAGE_SIZE &&
+ (((unsigned long) cred->security & 0xffffff00) !=
+ (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8)))
+ printk(KERN_ERR "CRED: ->security {%x, %x}\n",
+ ((u32*)cred->security)[0],
+ ((u32*)cred->security)[1]);
+#endif
+}
+
+/*
+ * report use of invalid credentials
+ */
+void __invalid_creds(const struct cred *cred, const char *file, unsigned line)
+{
+ printk(KERN_ERR "CRED: Invalid credentials\n");
+ printk(KERN_ERR "CRED: At %s:%u\n", file, line);
+ dump_invalid_creds(cred, "Specified", current);
+ BUG();
+}
+EXPORT_SYMBOL(__invalid_creds);
+
+/*
+ * check the credentials on a process
+ */
+void __validate_process_creds(struct task_struct *tsk,
+ const char *file, unsigned line)
+{
+ if (tsk->cred == tsk->real_cred) {
+ if (unlikely(read_cred_subscribers(tsk->cred) < 2 ||
+ creds_are_invalid(tsk->cred)))
+ goto invalid_creds;
+ } else {
+ if (unlikely(read_cred_subscribers(tsk->real_cred) < 1 ||
+ read_cred_subscribers(tsk->cred) < 1 ||
+ creds_are_invalid(tsk->real_cred) ||
+ creds_are_invalid(tsk->cred)))
+ goto invalid_creds;
+ }
+ return;
+
+invalid_creds:
+ printk(KERN_ERR "CRED: Invalid process credentials\n");
+ printk(KERN_ERR "CRED: At %s:%u\n", file, line);
+
+ dump_invalid_creds(tsk->real_cred, "Real", tsk);
+ if (tsk->cred != tsk->real_cred)
+ dump_invalid_creds(tsk->cred, "Effective", tsk);
+ else
+ printk(KERN_ERR "CRED: Effective creds == Real creds\n");
+ BUG();
+}
+EXPORT_SYMBOL(__validate_process_creds);
+
+/*
+ * check creds for do_exit()
+ */
+void validate_creds_for_do_exit(struct task_struct *tsk)
+{
+ kdebug("validate_creds_for_do_exit(%p,%p{%d,%d})",
+ tsk->real_cred, tsk->cred,
+ atomic_read(&tsk->cred->usage),
+ read_cred_subscribers(tsk->cred));
+
+ __validate_process_creds(tsk, __FILE__, __LINE__);
+}
+
+#endif /* CONFIG_DEBUG_CREDENTIALS */
diff --git a/kernel/exit.c b/kernel/exit.c
index 263f95ed7201..ae5d8660ddff 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -901,6 +901,8 @@ NORET_TYPE void do_exit(long code)
tracehook_report_exit(&code);
+ validate_creds_for_do_exit(tsk);
+
/*
* We're taking recursive faults here in do_exit. Safest is to just
* leave this task alone and wait for reboot.
@@ -1009,6 +1011,8 @@ NORET_TYPE void do_exit(long code)
if (tsk->splice_pipe)
__free_pipe_info(tsk->splice_pipe);
+ validate_creds_for_do_exit(tsk);
+
preempt_disable();
exit_rcu();
/* causes final put_task_struct in finish_task_switch(). */
diff --git a/kernel/fork.c b/kernel/fork.c
index 637520ca0386..bfee931ee3fb 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -152,8 +152,7 @@ void __put_task_struct(struct task_struct *tsk)
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
- put_cred(tsk->real_cred);
- put_cred(tsk->cred);
+ exit_creds(tsk);
delayacct_tsk_free(tsk);
if (!profile_handoff_task(tsk))
@@ -1294,8 +1293,7 @@ bad_fork_cleanup_put_domain:
module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
- put_cred(p->real_cred);
- put_cred(p->cred);
+ exit_creds(p);
bad_fork_free:
free_task(p);
fork_out:
diff --git a/kernel/futex.c b/kernel/futex.c
index e18cfbdc7190..248dd119a86e 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -115,6 +115,9 @@ struct futex_q {
/* rt_waiter storage for requeue_pi: */
struct rt_mutex_waiter *rt_waiter;
+ /* The expected requeue pi target futex key: */
+ union futex_key *requeue_pi_key;
+
/* Bitset for the optional bitmasked wakeup */
u32 bitset;
};
@@ -1089,6 +1092,10 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
if (!top_waiter)
return 0;
+ /* Ensure we requeue to the expected futex. */
+ if (!match_futex(top_waiter->requeue_pi_key, key2))
+ return -EINVAL;
+
/*
* Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
* the contended case or if set_waiters is 1. The pi_state is returned
@@ -1276,6 +1283,12 @@ retry_private:
continue;
}
+ /* Ensure we requeue to the expected futex for requeue_pi. */
+ if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
+ ret = -EINVAL;
+ break;
+ }
+
/*
* Requeue nr_requeue waiters and possibly one more in the case
* of requeue_pi if we couldn't acquire the lock atomically.
@@ -1751,6 +1764,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
q.pi_state = NULL;
q.bitset = bitset;
q.rt_waiter = NULL;
+ q.requeue_pi_key = NULL;
if (abs_time) {
to = &timeout;
@@ -1858,6 +1872,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
q.pi_state = NULL;
q.rt_waiter = NULL;
+ q.requeue_pi_key = NULL;
retry:
q.key = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
@@ -2118,11 +2133,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
* We call schedule in futex_wait_queue_me() when we enqueue and return there
* via the following:
* 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
- * 2) wakeup on uaddr2 after a requeue and subsequent unlock
- * 3) signal (before or after requeue)
- * 4) timeout (before or after requeue)
+ * 2) wakeup on uaddr2 after a requeue
+ * 3) signal
+ * 4) timeout
*
- * If 3, we setup a restart_block with futex_wait_requeue_pi() as the function.
+ * If 3, cleanup and return -ERESTARTNOINTR.
*
* If 2, we may then block on trying to take the rt_mutex and return via:
* 5) successful lock
@@ -2130,7 +2145,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
* 7) timeout
* 8) other lock acquisition failure
*
- * If 6, we setup a restart_block with futex_lock_pi() as the function.
+ * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
*
* If 4 or 7, we cleanup and return with -ETIMEDOUT.
*
@@ -2169,15 +2184,16 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
debug_rt_mutex_init_waiter(&rt_waiter);
rt_waiter.task = NULL;
- q.pi_state = NULL;
- q.bitset = bitset;
- q.rt_waiter = &rt_waiter;
-
key2 = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
+ q.pi_state = NULL;
+ q.bitset = bitset;
+ q.rt_waiter = &rt_waiter;
+ q.requeue_pi_key = &key2;
+
/* Prepare to wait on uaddr. */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret)
@@ -2248,14 +2264,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
- * We've already been requeued, but we have no way to
- * restart by calling futex_lock_pi() directly. We
- * could restart the syscall, but that will look at
- * the user space value and return right away. So we
- * drop back with EWOULDBLOCK to tell user space that
- * "val" has been changed. That's the same what the
- * restart of the syscall would do in
- * futex_wait_setup().
+ * We've already been requeued, but cannot restart by calling
+ * futex_lock_pi() directly. We could restart this syscall, but
+ * it would detect that the user space "val" changed and return
+ * -EWOULDBLOCK. Save the overhead of the restart and return
+ * -EWOULDBLOCK directly.
*/
ret = -EWOULDBLOCK;
}
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 385c31a1bdbf..4e8cae2e9148 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -78,6 +78,10 @@ int __request_module(bool wait, const char *fmt, ...)
#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
static int kmod_loop_msg;
+ ret = security_kernel_module_request();
+ if (ret)
+ return ret;
+
va_start(args, fmt);
ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
va_end(args);
@@ -462,6 +466,7 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
int retval = 0;
BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
+ validate_creds(sub_info->cred);
helper_lock();
if (sub_info->path[0] == '\0')
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8bbeef996c76..f74d2d7aa605 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -42,6 +42,7 @@
#include <linux/hash.h>
#include <linux/ftrace.h>
#include <linux/stringify.h>
+#include <linux/bitops.h>
#include <asm/sections.h>
@@ -366,11 +367,21 @@ static int save_trace(struct stack_trace *trace)
save_stack_trace(trace);
+ /*
+ * Some daft arches put -1 at the end to indicate its a full trace.
+ *
+ * <rant> this is buggy anyway, since it takes a whole extra entry so a
+ * complete trace that maxes out the entries provided will be reported
+ * as incomplete, friggin useless </rant>
+ */
+ if (trace->entries[trace->nr_entries-1] == ULONG_MAX)
+ trace->nr_entries--;
+
trace->max_entries = trace->nr_entries;
nr_stack_trace_entries += trace->nr_entries;
- if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
+ if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
if (!debug_locks_off_graph_unlock())
return 0;
@@ -388,20 +399,6 @@ unsigned int nr_hardirq_chains;
unsigned int nr_softirq_chains;
unsigned int nr_process_chains;
unsigned int max_lockdep_depth;
-unsigned int max_recursion_depth;
-
-static unsigned int lockdep_dependency_gen_id;
-
-static bool lockdep_dependency_visit(struct lock_class *source,
- unsigned int depth)
-{
- if (!depth)
- lockdep_dependency_gen_id++;
- if (source->dep_gen_id == lockdep_dependency_gen_id)
- return true;
- source->dep_gen_id = lockdep_dependency_gen_id;
- return false;
-}
#ifdef CONFIG_DEBUG_LOCKDEP
/*
@@ -431,11 +428,8 @@ atomic_t redundant_softirqs_on;
atomic_t redundant_softirqs_off;
atomic_t nr_unused_locks;
atomic_t nr_cyclic_checks;
-atomic_t nr_cyclic_check_recursions;
atomic_t nr_find_usage_forwards_checks;
-atomic_t nr_find_usage_forwards_recursions;
atomic_t nr_find_usage_backwards_checks;
-atomic_t nr_find_usage_backwards_recursions;
#endif
/*
@@ -551,58 +545,6 @@ static void lockdep_print_held_locks(struct task_struct *curr)
}
}
-static void print_lock_class_header(struct lock_class *class, int depth)
-{
- int bit;
-
- printk("%*s->", depth, "");
- print_lock_name(class);
- printk(" ops: %lu", class->ops);
- printk(" {\n");
-
- for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
- if (class->usage_mask & (1 << bit)) {
- int len = depth;
-
- len += printk("%*s %s", depth, "", usage_str[bit]);
- len += printk(" at:\n");
- print_stack_trace(class->usage_traces + bit, len);
- }
- }
- printk("%*s }\n", depth, "");
-
- printk("%*s ... key at: ",depth,"");
- print_ip_sym((unsigned long)class->key);
-}
-
-/*
- * printk all lock dependencies starting at <entry>:
- */
-static void __used
-print_lock_dependencies(struct lock_class *class, int depth)
-{
- struct lock_list *entry;
-
- if (lockdep_dependency_visit(class, depth))
- return;
-
- if (DEBUG_LOCKS_WARN_ON(depth >= 20))
- return;
-
- print_lock_class_header(class, depth);
-
- list_for_each_entry(entry, &class->locks_after, entry) {
- if (DEBUG_LOCKS_WARN_ON(!entry->class))
- return;
-
- print_lock_dependencies(entry->class, depth + 1);
-
- printk("%*s ... acquired at:\n",depth,"");
- print_stack_trace(&entry->trace, 2);
- printk("\n");
- }
-}
-
static void print_kernel_version(void)
{
printk("%s %.*s\n", init_utsname()->release,
@@ -898,22 +840,203 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
}
/*
+ * For good efficiency of modular, we use power of 2
+ */
+#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
+#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
+
+/*
+ * The circular_queue and helpers is used to implement the
+ * breadth-first search(BFS)algorithem, by which we can build
+ * the shortest path from the next lock to be acquired to the
+ * previous held lock if there is a circular between them.
+ */
+struct circular_queue {
+ unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
+ unsigned int front, rear;
+};
+
+static struct circular_queue lock_cq;
+
+unsigned int max_bfs_queue_depth;
+
+static unsigned int lockdep_dependency_gen_id;
+
+static inline void __cq_init(struct circular_queue *cq)
+{
+ cq->front = cq->rear = 0;
+ lockdep_dependency_gen_id++;
+}
+
+static inline int __cq_empty(struct circular_queue *cq)
+{
+ return (cq->front == cq->rear);
+}
+
+static inline int __cq_full(struct circular_queue *cq)
+{
+ return ((cq->rear + 1) & CQ_MASK) == cq->front;
+}
+
+static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+{
+ if (__cq_full(cq))
+ return -1;
+
+ cq->element[cq->rear] = elem;
+ cq->rear = (cq->rear + 1) & CQ_MASK;
+ return 0;
+}
+
+static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+{
+ if (__cq_empty(cq))
+ return -1;
+
+ *elem = cq->element[cq->front];
+ cq->front = (cq->front + 1) & CQ_MASK;
+ return 0;
+}
+
+static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
+{
+ return (cq->rear - cq->front) & CQ_MASK;
+}
+
+static inline void mark_lock_accessed(struct lock_list *lock,
+ struct lock_list *parent)
+{
+ unsigned long nr;
+
+ nr = lock - list_entries;
+ WARN_ON(nr >= nr_list_entries);
+ lock->parent = parent;
+ lock->class->dep_gen_id = lockdep_dependency_gen_id;
+}
+
+static inline unsigned long lock_accessed(struct lock_list *lock)
+{
+ unsigned long nr;
+
+ nr = lock - list_entries;
+ WARN_ON(nr >= nr_list_entries);
+ return lock->class->dep_gen_id == lockdep_dependency_gen_id;
+}
+
+static inline struct lock_list *get_lock_parent(struct lock_list *child)
+{
+ return child->parent;
+}
+
+static inline int get_lock_depth(struct lock_list *child)
+{
+ int depth = 0;
+ struct lock_list *parent;
+
+ while ((parent = get_lock_parent(child))) {
+ child = parent;
+ depth++;
+ }
+ return depth;
+}
+
+static int __bfs(struct lock_list *source_entry,
+ void *data,
+ int (*match)(struct lock_list *entry, void *data),
+ struct lock_list **target_entry,
+ int forward)
+{
+ struct lock_list *entry;
+ struct list_head *head;
+ struct circular_queue *cq = &lock_cq;
+ int ret = 1;
+
+ if (match(source_entry, data)) {
+ *target_entry = source_entry;
+ ret = 0;
+ goto exit;
+ }
+
+ if (forward)
+ head = &source_entry->class->locks_after;
+ else
+ head = &source_entry->class->locks_before;
+
+ if (list_empty(head))
+ goto exit;
+
+ __cq_init(cq);
+ __cq_enqueue(cq, (unsigned long)source_entry);
+
+ while (!__cq_empty(cq)) {
+ struct lock_list *lock;
+
+ __cq_dequeue(cq, (unsigned long *)&lock);
+
+ if (!lock->class) {
+ ret = -2;
+ goto exit;
+ }
+
+ if (forward)
+ head = &lock->class->locks_after;
+ else
+ head = &lock->class->locks_before;
+
+ list_for_each_entry(entry, head, entry) {
+ if (!lock_accessed(entry)) {
+ unsigned int cq_depth;
+ mark_lock_accessed(entry, lock);
+ if (match(entry, data)) {
+ *target_entry = entry;
+ ret = 0;
+ goto exit;
+ }
+
+ if (__cq_enqueue(cq, (unsigned long)entry)) {
+ ret = -1;
+ goto exit;
+ }
+ cq_depth = __cq_get_elem_count(cq);
+ if (max_bfs_queue_depth < cq_depth)
+ max_bfs_queue_depth = cq_depth;
+ }
+ }
+ }
+exit:
+ return ret;
+}
+
+static inline int __bfs_forwards(struct lock_list *src_entry,
+ void *data,
+ int (*match)(struct lock_list *entry, void *data),
+ struct lock_list **target_entry)
+{
+ return __bfs(src_entry, data, match, target_entry, 1);
+
+}
+
+static inline int __bfs_backwards(struct lock_list *src_entry,
+ void *data,
+ int (*match)(struct lock_list *entry, void *data),
+ struct lock_list **target_entry)
+{
+ return __bfs(src_entry, data, match, target_entry, 0);
+
+}
+
+/*
* Recursive, forwards-direction lock-dependency checking, used for
* both noncyclic checking and for hardirq-unsafe/softirq-unsafe
* checking.
- *
- * (to keep the stackframe of the recursive functions small we
- * use these global variables, and we also mark various helper
- * functions as noinline.)
*/
-static struct held_lock *check_source, *check_target;
/*
* Print a dependency chain entry (this is only done when a deadlock
* has been detected):
*/
static noinline int
-print_circular_bug_entry(struct lock_list *target, unsigned int depth)
+print_circular_bug_entry(struct lock_list *target, int depth)
{
if (debug_locks_silent)
return 0;
@@ -930,11 +1053,13 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
* header first:
*/
static noinline int
-print_circular_bug_header(struct lock_list *entry, unsigned int depth)
+print_circular_bug_header(struct lock_list *entry, unsigned int depth,
+ struct held_lock *check_src,
+ struct held_lock *check_tgt)
{
struct task_struct *curr = current;
- if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+ if (debug_locks_silent)
return 0;
printk("\n=======================================================\n");
@@ -943,9 +1068,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
printk( "-------------------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
- print_lock(check_source);
+ print_lock(check_src);
printk("\nbut task is already holding lock:\n");
- print_lock(check_target);
+ print_lock(check_tgt);
printk("\nwhich lock already depends on the new lock.\n\n");
printk("\nthe existing dependency chain (in reverse order) is:\n");
@@ -954,19 +1079,36 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
return 0;
}
-static noinline int print_circular_bug_tail(void)
+static inline int class_equal(struct lock_list *entry, void *data)
+{
+ return entry->class == data;
+}
+
+static noinline int print_circular_bug(struct lock_list *this,
+ struct lock_list *target,
+ struct held_lock *check_src,
+ struct held_lock *check_tgt)
{
struct task_struct *curr = current;
- struct lock_list this;
+ struct lock_list *parent;
+ int depth;
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
- this.class = hlock_class(check_source);
- if (!save_trace(&this.trace))
+ if (!save_trace(&this->trace))
return 0;
- print_circular_bug_entry(&this, 0);
+ depth = get_lock_depth(target);
+
+ print_circular_bug_header(target, depth, check_src, check_tgt);
+
+ parent = get_lock_parent(target);
+
+ while (parent) {
+ print_circular_bug_entry(parent, --depth);
+ parent = get_lock_parent(parent);
+ }
printk("\nother info that might help us debug this:\n\n");
lockdep_print_held_locks(curr);
@@ -977,73 +1119,69 @@ static noinline int print_circular_bug_tail(void)
return 0;
}
-#define RECURSION_LIMIT 40
-
-static int noinline print_infinite_recursion_bug(void)
+static noinline int print_bfs_bug(int ret)
{
if (!debug_locks_off_graph_unlock())
return 0;
- WARN_ON(1);
+ WARN(1, "lockdep bfs error:%d\n", ret);
return 0;
}
-unsigned long __lockdep_count_forward_deps(struct lock_class *class,
- unsigned int depth)
+static int noop_count(struct lock_list *entry, void *data)
{
- struct lock_list *entry;
- unsigned long ret = 1;
+ (*(unsigned long *)data)++;
+ return 0;
+}
- if (lockdep_dependency_visit(class, depth))
- return 0;
+unsigned long __lockdep_count_forward_deps(struct lock_list *this)
+{
+ unsigned long count = 0;
+ struct lock_list *uninitialized_var(target_entry);
- /*
- * Recurse this class's dependency list:
- */
- list_for_each_entry(entry, &class->locks_after, entry)
- ret += __lockdep_count_forward_deps(entry->class, depth + 1);
+ __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
- return ret;
+ return count;
}
-
unsigned long lockdep_count_forward_deps(struct lock_class *class)
{
unsigned long ret, flags;
+ struct lock_list this;
+
+ this.parent = NULL;
+ this.class = class;
local_irq_save(flags);
__raw_spin_lock(&lockdep_lock);
- ret = __lockdep_count_forward_deps(class, 0);
+ ret = __lockdep_count_forward_deps(&this);
__raw_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
return ret;
}
-unsigned long __lockdep_count_backward_deps(struct lock_class *class,
- unsigned int depth)
+unsigned long __lockdep_count_backward_deps(struct lock_list *this)
{
- struct lock_list *entry;
- unsigned long ret = 1;
+ unsigned long count = 0;
+ struct lock_list *uninitialized_var(target_entry);
- if (lockdep_dependency_visit(class, depth))
- return 0;
- /*
- * Recurse this class's dependency list:
- */
- list_for_each_entry(entry, &class->locks_before, entry)
- ret += __lockdep_count_backward_deps(entry->class, depth + 1);
+ __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
- return ret;
+ return count;
}
unsigned long lockdep_count_backward_deps(struct lock_class *class)
{
unsigned long ret, flags;
+ struct lock_list this;
+
+ this.parent = NULL;
+ this.class = class;
local_irq_save(flags);
__raw_spin_lock(&lockdep_lock);
- ret = __lockdep_count_backward_deps(class, 0);
+ ret = __lockdep_count_backward_deps(&this);
__raw_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
@@ -1055,29 +1193,16 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
* lead to <target>. Print an error and return 0 if it does.
*/
static noinline int
-check_noncircular(struct lock_class *source, unsigned int depth)
+check_noncircular(struct lock_list *root, struct lock_class *target,
+ struct lock_list **target_entry)
{
- struct lock_list *entry;
+ int result;
- if (lockdep_dependency_visit(source, depth))
- return 1;
+ debug_atomic_inc(&nr_cyclic_checks);
- debug_atomic_inc(&nr_cyclic_check_recursions);
- if (depth > max_recursion_depth)
- max_recursion_depth = depth;
- if (depth >= RECURSION_LIMIT)
- return print_infinite_recursion_bug();
- /*
- * Check this lock's dependency list:
- */
- list_for_each_entry(entry, &source->locks_after, entry) {
- if (entry->class == hlock_class(check_target))
- return print_circular_bug_header(entry, depth+1);
- debug_atomic_inc(&nr_cyclic_checks);
- if (!check_noncircular(entry->class, depth+1))
- return print_circular_bug_entry(entry, depth+1);
- }
- return 1;
+ result = __bfs_forwards(root, target, class_equal, target_entry);
+
+ return result;
}
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
@@ -1086,103 +1211,121 @@ check_noncircular(struct lock_class *source, unsigned int depth)
* proving that two subgraphs can be connected by a new dependency
* without creating any illegal irq-safe -> irq-unsafe lock dependency.
*/
-static enum lock_usage_bit find_usage_bit;
-static struct lock_class *forwards_match, *backwards_match;
+
+static inline int usage_match(struct lock_list *entry, void *bit)
+{
+ return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
+}
+
+
/*
* Find a node in the forwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
*
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <forwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
*
- * Return 1 otherwise and keep <forwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
*/
-static noinline int
-find_usage_forwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
+ struct lock_list **target_entry)
{
- struct lock_list *entry;
- int ret;
-
- if (lockdep_dependency_visit(source, depth))
- return 1;
-
- if (depth > max_recursion_depth)
- max_recursion_depth = depth;
- if (depth >= RECURSION_LIMIT)
- return print_infinite_recursion_bug();
+ int result;
debug_atomic_inc(&nr_find_usage_forwards_checks);
- if (source->usage_mask & (1 << find_usage_bit)) {
- forwards_match = source;
- return 2;
- }
- /*
- * Check this lock's dependency list:
- */
- list_for_each_entry(entry, &source->locks_after, entry) {
- debug_atomic_inc(&nr_find_usage_forwards_recursions);
- ret = find_usage_forwards(entry->class, depth+1);
- if (ret == 2 || ret == 0)
- return ret;
- }
- return 1;
+ result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+
+ return result;
}
/*
* Find a node in the backwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
*
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <backwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
*
- * Return 1 otherwise and keep <backwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
*/
-static noinline int
-find_usage_backwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
+ struct lock_list **target_entry)
{
- struct lock_list *entry;
- int ret;
+ int result;
- if (lockdep_dependency_visit(source, depth))
- return 1;
+ debug_atomic_inc(&nr_find_usage_backwards_checks);
- if (!__raw_spin_is_locked(&lockdep_lock))
- return DEBUG_LOCKS_WARN_ON(1);
+ result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
- if (depth > max_recursion_depth)
- max_recursion_depth = depth;
- if (depth >= RECURSION_LIMIT)
- return print_infinite_recursion_bug();
+ return result;
+}
- debug_atomic_inc(&nr_find_usage_backwards_checks);
- if (source->usage_mask & (1 << find_usage_bit)) {
- backwards_match = source;
- return 2;
- }
+static void print_lock_class_header(struct lock_class *class, int depth)
+{
+ int bit;
- if (!source && debug_locks_off_graph_unlock()) {
- WARN_ON(1);
- return 0;
- }
+ printk("%*s->", depth, "");
+ print_lock_name(class);
+ printk(" ops: %lu", class->ops);
+ printk(" {\n");
- /*
- * Check this lock's dependency list:
- */
- list_for_each_entry(entry, &source->locks_before, entry) {
- debug_atomic_inc(&nr_find_usage_backwards_recursions);
- ret = find_usage_backwards(entry->class, depth+1);
- if (ret == 2 || ret == 0)
- return ret;
+ for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
+ if (class->usage_mask & (1 << bit)) {
+ int len = depth;
+
+ len += printk("%*s %s", depth, "", usage_str[bit]);
+ len += printk(" at:\n");
+ print_stack_trace(class->usage_traces + bit, len);
+ }
}
- return 1;
+ printk("%*s }\n", depth, "");
+
+ printk("%*s ... key at: ",depth,"");
+ print_ip_sym((unsigned long)class->key);
+}
+
+/*
+ * printk the shortest lock dependencies from @start to @end in reverse order:
+ */
+static void __used
+print_shortest_lock_dependencies(struct lock_list *leaf,
+ struct lock_list *root)
+{
+ struct lock_list *entry = leaf;
+ int depth;
+
+ /*compute depth from generated tree by BFS*/
+ depth = get_lock_depth(leaf);
+
+ do {
+ print_lock_class_header(entry->class, depth);
+ printk("%*s ... acquired at:\n", depth, "");
+ print_stack_trace(&entry->trace, 2);
+ printk("\n");
+
+ if (depth == 0 && (entry != root)) {
+ printk("lockdep:%s bad BFS generated tree\n", __func__);
+ break;
+ }
+
+ entry = get_lock_parent(entry);
+ depth--;
+ } while (entry && (depth >= 0));
+
+ return;
}
static int
print_bad_irq_dependency(struct task_struct *curr,
+ struct lock_list *prev_root,
+ struct lock_list *next_root,
+ struct lock_list *backwards_entry,
+ struct lock_list *forwards_entry,
struct held_lock *prev,
struct held_lock *next,
enum lock_usage_bit bit1,
@@ -1215,26 +1358,32 @@ print_bad_irq_dependency(struct task_struct *curr,
printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
irqclass);
- print_lock_name(backwards_match);
+ print_lock_name(backwards_entry->class);
printk("\n... which became %s-irq-safe at:\n", irqclass);
- print_stack_trace(backwards_match->usage_traces + bit1, 1);
+ print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
printk("\nto a %s-irq-unsafe lock:\n", irqclass);
- print_lock_name(forwards_match);
+ print_lock_name(forwards_entry->class);
printk("\n... which became %s-irq-unsafe at:\n", irqclass);
printk("...");
- print_stack_trace(forwards_match->usage_traces + bit2, 1);
+ print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
printk("\nother info that might help us debug this:\n\n");
lockdep_print_held_locks(curr);
- printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
- print_lock_dependencies(backwards_match, 0);
+ printk("\nthe dependencies between %s-irq-safe lock", irqclass);
+ printk(" and the holding lock:\n");
+ if (!save_trace(&prev_root->trace))
+ return 0;
+ print_shortest_lock_dependencies(backwards_entry, prev_root);
- printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
- print_lock_dependencies(forwards_match, 0);
+ printk("\nthe dependencies between the lock to be acquired");
+ printk(" and %s-irq-unsafe lock:\n", irqclass);
+ if (!save_trace(&next_root->trace))
+ return 0;
+ print_shortest_lock_dependencies(forwards_entry, next_root);
printk("\nstack backtrace:\n");
dump_stack();
@@ -1248,19 +1397,30 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
enum lock_usage_bit bit_forwards, const char *irqclass)
{
int ret;
+ struct lock_list this, that;
+ struct lock_list *uninitialized_var(target_entry);
+ struct lock_list *uninitialized_var(target_entry1);
- find_usage_bit = bit_backwards;
- /* fills in <backwards_match> */
- ret = find_usage_backwards(hlock_class(prev), 0);
- if (!ret || ret == 1)
+ this.parent = NULL;
+
+ this.class = hlock_class(prev);
+ ret = find_usage_backwards(&this, bit_backwards, &target_entry);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (ret == 1)
return ret;
- find_usage_bit = bit_forwards;
- ret = find_usage_forwards(hlock_class(next), 0);
- if (!ret || ret == 1)
+ that.parent = NULL;
+ that.class = hlock_class(next);
+ ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (ret == 1)
return ret;
- /* ret == 2 */
- return print_bad_irq_dependency(curr, prev, next,
+
+ return print_bad_irq_dependency(curr, &this, &that,
+ target_entry, target_entry1,
+ prev, next,
bit_backwards, bit_forwards, irqclass);
}
@@ -1472,6 +1632,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
{
struct lock_list *entry;
int ret;
+ struct lock_list this;
+ struct lock_list *uninitialized_var(target_entry);
/*
* Prove that the new <prev> -> <next> dependency would not
@@ -1482,10 +1644,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* We are using global variables to control the recursion, to
* keep the stackframe size of the recursive functions low:
*/
- check_source = next;
- check_target = prev;
- if (!(check_noncircular(hlock_class(next), 0)))
- return print_circular_bug_tail();
+ this.class = hlock_class(next);
+ this.parent = NULL;
+ ret = check_noncircular(&this, hlock_class(prev), &target_entry);
+ if (unlikely(!ret))
+ return print_circular_bug(&this, target_entry, next, prev);
+ else if (unlikely(ret < 0))
+ return print_bfs_bug(ret);
if (!check_prev_add_irq(curr, prev, next))
return 0;
@@ -1884,7 +2049,8 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
* print irq inversion bug:
*/
static int
-print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
+print_irq_inversion_bug(struct task_struct *curr,
+ struct lock_list *root, struct lock_list *other,
struct held_lock *this, int forwards,
const char *irqclass)
{
@@ -1902,17 +2068,16 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
else
printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
- print_lock_name(other);
+ print_lock_name(other->class);
printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
printk("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
- printk("\nthe first lock's dependencies:\n");
- print_lock_dependencies(hlock_class(this), 0);
-
- printk("\nthe second lock's dependencies:\n");
- print_lock_dependencies(other, 0);
+ printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
+ if (!save_trace(&root->trace))
+ return 0;
+ print_shortest_lock_dependencies(other, root);
printk("\nstack backtrace:\n");
dump_stack();
@@ -1929,14 +2094,19 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit bit, const char *irqclass)
{
int ret;
-
- find_usage_bit = bit;
- /* fills in <forwards_match> */
- ret = find_usage_forwards(hlock_class(this), 0);
- if (!ret || ret == 1)
+ struct lock_list root;
+ struct lock_list *uninitialized_var(target_entry);
+
+ root.parent = NULL;
+ root.class = hlock_class(this);
+ ret = find_usage_forwards(&root, bit, &target_entry);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (ret == 1)
return ret;
- return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
+ return print_irq_inversion_bug(curr, &root, target_entry,
+ this, 1, irqclass);
}
/*
@@ -1948,14 +2118,19 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit bit, const char *irqclass)
{
int ret;
-
- find_usage_bit = bit;
- /* fills in <backwards_match> */
- ret = find_usage_backwards(hlock_class(this), 0);
- if (!ret || ret == 1)
+ struct lock_list root;
+ struct lock_list *uninitialized_var(target_entry);
+
+ root.parent = NULL;
+ root.class = hlock_class(this);
+ ret = find_usage_backwards(&root, bit, &target_entry);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (ret == 1)
return ret;
- return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
+ return print_irq_inversion_bug(curr, &root, target_entry,
+ this, 1, irqclass);
}
void print_irqtrace_events(struct task_struct *curr)
@@ -2530,13 +2705,15 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
*/
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, int hardirqs_off,
- struct lockdep_map *nest_lock, unsigned long ip)
+ struct lockdep_map *nest_lock, unsigned long ip,
+ int references)
{
struct task_struct *curr = current;
struct lock_class *class = NULL;
struct held_lock *hlock;
unsigned int depth, id;
int chain_head = 0;
+ int class_idx;
u64 chain_key;
if (!prove_locking)
@@ -2584,10 +2761,24 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
return 0;
+ class_idx = class - lock_classes + 1;
+
+ if (depth) {
+ hlock = curr->held_locks + depth - 1;
+ if (hlock->class_idx == class_idx && nest_lock) {
+ if (hlock->references)
+ hlock->references++;
+ else
+ hlock->references = 2;
+
+ return 1;
+ }
+ }
+
hlock = curr->held_locks + depth;
if (DEBUG_LOCKS_WARN_ON(!class))
return 0;
- hlock->class_idx = class - lock_classes + 1;
+ hlock->class_idx = class_idx;
hlock->acquire_ip = ip;
hlock->instance = lock;
hlock->nest_lock = nest_lock;
@@ -2595,6 +2786,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
hlock->read = read;
hlock->check = check;
hlock->hardirqs_off = !!hardirqs_off;
+ hlock->references = references;
#ifdef CONFIG_LOCK_STAT
hlock->waittime_stamp = 0;
hlock->holdtime_stamp = sched_clock();
@@ -2703,6 +2895,30 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
return 1;
}
+static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
+{
+ if (hlock->instance == lock)
+ return 1;
+
+ if (hlock->references) {
+ struct lock_class *class = lock->class_cache;
+
+ if (!class)
+ class = look_up_lock_class(lock, 0);
+
+ if (DEBUG_LOCKS_WARN_ON(!class))
+ return 0;
+
+ if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
+ return 0;
+
+ if (hlock->class_idx == class - lock_classes + 1)
+ return 1;
+ }
+
+ return 0;
+}
+
static int
__lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
@@ -2726,7 +2942,7 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
- if (hlock->instance == lock)
+ if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
@@ -2745,7 +2961,8 @@ found_it:
if (!__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass, hlock->trylock,
hlock->read, hlock->check, hlock->hardirqs_off,
- hlock->nest_lock, hlock->acquire_ip))
+ hlock->nest_lock, hlock->acquire_ip,
+ hlock->references))
return 0;
}
@@ -2784,20 +3001,34 @@ lock_release_non_nested(struct task_struct *curr,
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
- if (hlock->instance == lock)
+ if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
return print_unlock_inbalance_bug(curr, lock, ip);
found_it:
- lock_release_holdtime(hlock);
+ if (hlock->instance == lock)
+ lock_release_holdtime(hlock);
+
+ if (hlock->references) {
+ hlock->references--;
+ if (hlock->references) {
+ /*
+ * We had, and after removing one, still have
+ * references, the current lock stack is still
+ * valid. We're done!
+ */
+ return 1;
+ }
+ }
/*
* We have the right lock to unlock, 'hlock' points to it.
* Now we remove it from the stack, and add back the other
* entries (if any), recalculating the hash along the way:
*/
+
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
@@ -2806,7 +3037,8 @@ found_it:
if (!__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass, hlock->trylock,
hlock->read, hlock->check, hlock->hardirqs_off,
- hlock->nest_lock, hlock->acquire_ip))
+ hlock->nest_lock, hlock->acquire_ip,
+ hlock->references))
return 0;
}
@@ -2836,7 +3068,7 @@ static int lock_release_nested(struct task_struct *curr,
/*
* Is the unlock non-nested:
*/
- if (hlock->instance != lock)
+ if (hlock->instance != lock || hlock->references)
return lock_release_non_nested(curr, lock, ip);
curr->lockdep_depth--;
@@ -2881,6 +3113,21 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
check_chain_key(curr);
}
+static int __lock_is_held(struct lockdep_map *lock)
+{
+ struct task_struct *curr = current;
+ int i;
+
+ for (i = 0; i < curr->lockdep_depth; i++) {
+ struct held_lock *hlock = curr->held_locks + i;
+
+ if (match_held_lock(hlock, lock))
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* Check whether we follow the irq-flags state precisely:
*/
@@ -2957,7 +3204,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
current->lockdep_recursion = 1;
__lock_acquire(lock, subclass, trylock, read, check,
- irqs_disabled_flags(flags), nest_lock, ip);
+ irqs_disabled_flags(flags), nest_lock, ip, 0);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
@@ -2982,6 +3229,26 @@ void lock_release(struct lockdep_map *lock, int nested,
}
EXPORT_SYMBOL_GPL(lock_release);
+int lock_is_held(struct lockdep_map *lock)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (unlikely(current->lockdep_recursion))
+ return ret;
+
+ raw_local_irq_save(flags);
+ check_flags(flags);
+
+ current->lockdep_recursion = 1;
+ ret = __lock_is_held(lock);
+ current->lockdep_recursion = 0;
+ raw_local_irq_restore(flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lock_is_held);
+
void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
{
current->lockdep_reclaim_gfp = gfp_mask;
@@ -3041,7 +3308,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
- if (hlock->instance == lock)
+ if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
@@ -3049,6 +3316,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
return;
found_it:
+ if (hlock->instance != lock)
+ return;
+
hlock->waittime_stamp = sched_clock();
contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
@@ -3088,7 +3358,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
- if (hlock->instance == lock)
+ if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
@@ -3096,6 +3366,9 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
return;
found_it:
+ if (hlock->instance != lock)
+ return;
+
cpu = smp_processor_id();
if (hlock->waittime_stamp) {
now = sched_clock();
@@ -3326,7 +3599,12 @@ void __init lockdep_info(void)
sizeof(struct list_head) * CLASSHASH_SIZE +
sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
- sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
+ sizeof(struct list_head) * CHAINHASH_SIZE
+#ifdef CONFIG_PROVE_LOCKING
+ + sizeof(struct circular_queue)
+#endif
+ ) / 1024
+ );
printk(" per task-struct memory footprint: %lu bytes\n",
sizeof(struct held_lock) * MAX_LOCK_DEPTH);
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 699a2ac3a0d7..a2ee95ad1313 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -91,6 +91,8 @@ extern unsigned int nr_process_chains;
extern unsigned int max_lockdep_depth;
extern unsigned int max_recursion_depth;
+extern unsigned int max_bfs_queue_depth;
+
#ifdef CONFIG_PROVE_LOCKING
extern unsigned long lockdep_count_forward_deps(struct lock_class *);
extern unsigned long lockdep_count_backward_deps(struct lock_class *);
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index e94caa666dba..d4b3dbc79fdb 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -25,38 +25,12 @@
static void *l_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct lock_class *class;
-
- (*pos)++;
-
- if (v == SEQ_START_TOKEN)
- class = m->private;
- else {
- class = v;
-
- if (class->lock_entry.next != &all_lock_classes)
- class = list_entry(class->lock_entry.next,
- struct lock_class, lock_entry);
- else
- class = NULL;
- }
-
- return class;
+ return seq_list_next(v, &all_lock_classes, pos);
}
static void *l_start(struct seq_file *m, loff_t *pos)
{
- struct lock_class *class;
- loff_t i = 0;
-
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- list_for_each_entry(class, &all_lock_classes, lock_entry) {
- if (++i == *pos)
- return class;
- }
- return NULL;
+ return seq_list_start_head(&all_lock_classes, *pos);
}
static void l_stop(struct seq_file *m, void *v)
@@ -82,11 +56,11 @@ static void print_name(struct seq_file *m, struct lock_class *class)
static int l_show(struct seq_file *m, void *v)
{
- struct lock_class *class = v;
+ struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
struct lock_list *entry;
char usage[LOCK_USAGE_CHARS];
- if (v == SEQ_START_TOKEN) {
+ if (v == &all_lock_classes) {
seq_printf(m, "all lock classes:\n");
return 0;
}
@@ -128,17 +102,7 @@ static const struct seq_operations lockdep_ops = {
static int lockdep_open(struct inode *inode, struct file *file)
{
- int res = seq_open(file, &lockdep_ops);
- if (!res) {
- struct seq_file *m = file->private_data;
-
- if (!list_empty(&all_lock_classes))
- m->private = list_entry(all_lock_classes.next,
- struct lock_class, lock_entry);
- else
- m->private = NULL;
- }
- return res;
+ return seq_open(file, &lockdep_ops);
}
static const struct file_operations proc_lockdep_operations = {
@@ -149,37 +113,23 @@ static const struct file_operations proc_lockdep_operations = {
};
#ifdef CONFIG_PROVE_LOCKING
-static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
-{
- struct lock_chain *chain;
-
- (*pos)++;
-
- if (v == SEQ_START_TOKEN)
- chain = m->private;
- else {
- chain = v;
-
- if (*pos < nr_lock_chains)
- chain = lock_chains + *pos;
- else
- chain = NULL;
- }
-
- return chain;
-}
-
static void *lc_start(struct seq_file *m, loff_t *pos)
{
if (*pos == 0)
return SEQ_START_TOKEN;
- if (*pos < nr_lock_chains)
- return lock_chains + *pos;
+ if (*pos - 1 < nr_lock_chains)
+ return lock_chains + (*pos - 1);
return NULL;
}
+static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return lc_start(m, pos);
+}
+
static void lc_stop(struct seq_file *m, void *v)
{
}
@@ -220,16 +170,7 @@ static const struct seq_operations lockdep_chains_ops = {
static int lockdep_chains_open(struct inode *inode, struct file *file)
{
- int res = seq_open(file, &lockdep_chains_ops);
- if (!res) {
- struct seq_file *m = file->private_data;
-
- if (nr_lock_chains)
- m->private = lock_chains;
- else
- m->private = NULL;
- }
- return res;
+ return seq_open(file, &lockdep_chains_ops);
}
static const struct file_operations proc_lockdep_chains_operations = {
@@ -258,16 +199,10 @@ static void lockdep_stats_debug_show(struct seq_file *m)
debug_atomic_read(&chain_lookup_hits));
seq_printf(m, " cyclic checks: %11u\n",
debug_atomic_read(&nr_cyclic_checks));
- seq_printf(m, " cyclic-check recursions: %11u\n",
- debug_atomic_read(&nr_cyclic_check_recursions));
seq_printf(m, " find-mask forwards checks: %11u\n",
debug_atomic_read(&nr_find_usage_forwards_checks));
- seq_printf(m, " find-mask forwards recursions: %11u\n",
- debug_atomic_read(&nr_find_usage_forwards_recursions));
seq_printf(m, " find-mask backwards checks: %11u\n",
debug_atomic_read(&nr_find_usage_backwards_checks));
- seq_printf(m, " find-mask backwards recursions:%11u\n",
- debug_atomic_read(&nr_find_usage_backwards_recursions));
seq_printf(m, " hardirq on events: %11u\n", hi1);
seq_printf(m, " hardirq off events: %11u\n", hi2);
@@ -409,8 +344,10 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
nr_unused);
seq_printf(m, " max locking depth: %11u\n",
max_lockdep_depth);
- seq_printf(m, " max recursion depth: %11u\n",
- max_recursion_depth);
+#ifdef CONFIG_PROVE_LOCKING
+ seq_printf(m, " max bfs queue depth: %11u\n",
+ max_bfs_queue_depth);
+#endif
lockdep_stats_debug_show(m);
seq_printf(m, " debug_locks: %11u\n",
debug_locks);
@@ -438,7 +375,6 @@ struct lock_stat_data {
};
struct lock_stat_seq {
- struct lock_stat_data *iter;
struct lock_stat_data *iter_end;
struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
};
@@ -626,34 +562,22 @@ static void seq_header(struct seq_file *m)
static void *ls_start(struct seq_file *m, loff_t *pos)
{
struct lock_stat_seq *data = m->private;
+ struct lock_stat_data *iter;
if (*pos == 0)
return SEQ_START_TOKEN;
- data->iter = data->stats + *pos;
- if (data->iter >= data->iter_end)
- data->iter = NULL;
+ iter = data->stats + (*pos - 1);
+ if (iter >= data->iter_end)
+ iter = NULL;
- return data->iter;
+ return iter;
}
static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct lock_stat_seq *data = m->private;
-
(*pos)++;
-
- if (v == SEQ_START_TOKEN)
- data->iter = data->stats;
- else {
- data->iter = v;
- data->iter++;
- }
-
- if (data->iter == data->iter_end)
- data->iter = NULL;
-
- return data->iter;
+ return ls_start(m, pos);
}
static void ls_stop(struct seq_file *m, void *v)
@@ -691,7 +615,6 @@ static int lock_stat_open(struct inode *inode, struct file *file)
struct lock_stat_data *iter = data->stats;
struct seq_file *m = file->private_data;
- data->iter = iter;
list_for_each_entry(class, &all_lock_classes, lock_entry) {
iter->class = class;
iter->stats = lock_stats(class);
@@ -699,7 +622,7 @@ static int lock_stat_open(struct inode *inode, struct file *file)
}
data->iter_end = iter;
- sort(data->stats, data->iter_end - data->iter,
+ sort(data->stats, data->iter_end - data->stats,
sizeof(struct lock_stat_data),
lock_stat_cmp, NULL);
@@ -734,7 +657,6 @@ static int lock_stat_release(struct inode *inode, struct file *file)
struct seq_file *seq = file->private_data;
vfree(seq->private);
- seq->private = NULL;
return seq_release(inode, file);
}
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index f274e1959885..d7cbc579fc80 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -50,7 +50,7 @@ static atomic_t nr_task_counters __read_mostly;
* 1 - disallow cpu counters to unpriv
* 2 - disallow kernel profiling to unpriv
*/
-int sysctl_perf_counter_paranoid __read_mostly;
+int sysctl_perf_counter_paranoid __read_mostly = 1;
static inline bool perf_paranoid_cpu(void)
{
@@ -4066,6 +4066,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
hwc->sample_period = attr->sample_period;
if (attr->freq && attr->sample_freq)
hwc->sample_period = 1;
+ hwc->last_period = hwc->sample_period;
atomic64_set(&hwc->period_left, hwc->sample_period);
diff --git a/kernel/printk.c b/kernel/printk.c
index b4d97b54c1ec..e10d193a833a 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -37,6 +37,12 @@
#include <asm/uaccess.h>
/*
+ * for_each_console() allows you to iterate on each console
+ */
+#define for_each_console(con) \
+ for (con = console_drivers; con != NULL; con = con->next)
+
+/*
* Architectures can override it:
*/
void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
@@ -61,6 +67,8 @@ int console_printk[4] = {
DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
};
+static int saved_console_loglevel = -1;
+
/*
* Low level drivers may need that to know if they can schedule in
* their unblank() callback or not. So let's export it.
@@ -372,10 +380,15 @@ int do_syslog(int type, char __user *buf, int len)
logged_chars = 0;
break;
case 6: /* Disable logging to console */
+ if (saved_console_loglevel == -1)
+ saved_console_loglevel = console_loglevel;
console_loglevel = minimum_console_loglevel;
break;
case 7: /* Enable logging to console */
- console_loglevel = default_console_loglevel;
+ if (saved_console_loglevel != -1) {
+ console_loglevel = saved_console_loglevel;
+ saved_console_loglevel = -1;
+ }
break;
case 8: /* Set level of messages printed to console */
error = -EINVAL;
@@ -384,6 +397,8 @@ int do_syslog(int type, char __user *buf, int len)
if (len < minimum_console_loglevel)
len = minimum_console_loglevel;
console_loglevel = len;
+ /* Implicitly re-enable logging to console */
+ saved_console_loglevel = -1;
error = 0;
break;
case 9: /* Number of chars in the log buffer */
@@ -412,7 +427,7 @@ static void __call_console_drivers(unsigned start, unsigned end)
{
struct console *con;
- for (con = console_drivers; con; con = con->next) {
+ for_each_console(con) {
if ((con->flags & CON_ENABLED) && con->write &&
(cpu_online(smp_processor_id()) ||
(con->flags & CON_ANYTIME)))
@@ -544,7 +559,7 @@ static int have_callable_console(void)
{
struct console *con;
- for (con = console_drivers; con; con = con->next)
+ for_each_console(con)
if (con->flags & CON_ANYTIME)
return 1;
@@ -1082,7 +1097,7 @@ void console_unblank(void)
console_locked = 1;
console_may_schedule = 0;
- for (c = console_drivers; c != NULL; c = c->next)
+ for_each_console(c)
if ((c->flags & CON_ENABLED) && c->unblank)
c->unblank();
release_console_sem();
@@ -1097,7 +1112,7 @@ struct tty_driver *console_device(int *index)
struct tty_driver *driver = NULL;
acquire_console_sem();
- for (c = console_drivers; c != NULL; c = c->next) {
+ for_each_console(c) {
if (!c->device)
continue;
driver = c->device(c, index);
@@ -1134,25 +1149,49 @@ EXPORT_SYMBOL(console_start);
* to register the console printing procedure with printk() and to
* print any messages that were printed by the kernel before the
* console driver was initialized.
+ *
+ * This can happen pretty early during the boot process (because of
+ * early_printk) - sometimes before setup_arch() completes - be careful
+ * of what kernel features are used - they may not be initialised yet.
+ *
+ * There are two types of consoles - bootconsoles (early_printk) and
+ * "real" consoles (everything which is not a bootconsole) which are
+ * handled differently.
+ * - Any number of bootconsoles can be registered at any time.
+ * - As soon as a "real" console is registered, all bootconsoles
+ * will be unregistered automatically.
+ * - Once a "real" console is registered, any attempt to register a
+ * bootconsoles will be rejected
*/
-void register_console(struct console *console)
+void register_console(struct console *newcon)
{
int i;
unsigned long flags;
- struct console *bootconsole = NULL;
+ struct console *bcon = NULL;
- if (console_drivers) {
- if (console->flags & CON_BOOT)
- return;
- if (console_drivers->flags & CON_BOOT)
- bootconsole = console_drivers;
+ /*
+ * before we register a new CON_BOOT console, make sure we don't
+ * already have a valid console
+ */
+ if (console_drivers && newcon->flags & CON_BOOT) {
+ /* find the last or real console */
+ for_each_console(bcon) {
+ if (!(bcon->flags & CON_BOOT)) {
+ printk(KERN_INFO "Too late to register bootconsole %s%d\n",
+ newcon->name, newcon->index);
+ return;
+ }
+ }
}
- if (preferred_console < 0 || bootconsole || !console_drivers)
+ if (console_drivers && console_drivers->flags & CON_BOOT)
+ bcon = console_drivers;
+
+ if (preferred_console < 0 || bcon || !console_drivers)
preferred_console = selected_console;
- if (console->early_setup)
- console->early_setup();
+ if (newcon->early_setup)
+ newcon->early_setup();
/*
* See if we want to use this console driver. If we
@@ -1160,13 +1199,13 @@ void register_console(struct console *console)
* that registers here.
*/
if (preferred_console < 0) {
- if (console->index < 0)
- console->index = 0;
- if (console->setup == NULL ||
- console->setup(console, NULL) == 0) {
- console->flags |= CON_ENABLED;
- if (console->device) {
- console->flags |= CON_CONSDEV;
+ if (newcon->index < 0)
+ newcon->index = 0;
+ if (newcon->setup == NULL ||
+ newcon->setup(newcon, NULL) == 0) {
+ newcon->flags |= CON_ENABLED;
+ if (newcon->device) {
+ newcon->flags |= CON_CONSDEV;
preferred_console = 0;
}
}
@@ -1178,64 +1217,62 @@ void register_console(struct console *console)
*/
for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0];
i++) {
- if (strcmp(console_cmdline[i].name, console->name) != 0)
+ if (strcmp(console_cmdline[i].name, newcon->name) != 0)
continue;
- if (console->index >= 0 &&
- console->index != console_cmdline[i].index)
+ if (newcon->index >= 0 &&
+ newcon->index != console_cmdline[i].index)
continue;
- if (console->index < 0)
- console->index = console_cmdline[i].index;
+ if (newcon->index < 0)
+ newcon->index = console_cmdline[i].index;
#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
if (console_cmdline[i].brl_options) {
- console->flags |= CON_BRL;
- braille_register_console(console,
+ newcon->flags |= CON_BRL;
+ braille_register_console(newcon,
console_cmdline[i].index,
console_cmdline[i].options,
console_cmdline[i].brl_options);
return;
}
#endif
- if (console->setup &&
- console->setup(console, console_cmdline[i].options) != 0)
+ if (newcon->setup &&
+ newcon->setup(newcon, console_cmdline[i].options) != 0)
break;
- console->flags |= CON_ENABLED;
- console->index = console_cmdline[i].index;
+ newcon->flags |= CON_ENABLED;
+ newcon->index = console_cmdline[i].index;
if (i == selected_console) {
- console->flags |= CON_CONSDEV;
+ newcon->flags |= CON_CONSDEV;
preferred_console = selected_console;
}
break;
}
- if (!(console->flags & CON_ENABLED))
+ if (!(newcon->flags & CON_ENABLED))
return;
- if (bootconsole && (console->flags & CON_CONSDEV)) {
- printk(KERN_INFO "console handover: boot [%s%d] -> real [%s%d]\n",
- bootconsole->name, bootconsole->index,
- console->name, console->index);
- unregister_console(bootconsole);
- console->flags &= ~CON_PRINTBUFFER;
- } else {
- printk(KERN_INFO "console [%s%d] enabled\n",
- console->name, console->index);
- }
+ /*
+ * If we have a bootconsole, and are switching to a real console,
+ * don't print everything out again, since when the boot console, and
+ * the real console are the same physical device, it's annoying to
+ * see the beginning boot messages twice
+ */
+ if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV))
+ newcon->flags &= ~CON_PRINTBUFFER;
/*
* Put this console in the list - keep the
* preferred driver at the head of the list.
*/
acquire_console_sem();
- if ((console->flags & CON_CONSDEV) || console_drivers == NULL) {
- console->next = console_drivers;
- console_drivers = console;
- if (console->next)
- console->next->flags &= ~CON_CONSDEV;
+ if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
+ newcon->next = console_drivers;
+ console_drivers = newcon;
+ if (newcon->next)
+ newcon->next->flags &= ~CON_CONSDEV;
} else {
- console->next = console_drivers->next;
- console_drivers->next = console;
+ newcon->next = console_drivers->next;
+ console_drivers->next = newcon;
}
- if (console->flags & CON_PRINTBUFFER) {
+ if (newcon->flags & CON_PRINTBUFFER) {
/*
* release_console_sem() will print out the buffered messages
* for us.
@@ -1245,6 +1282,28 @@ void register_console(struct console *console)
spin_unlock_irqrestore(&logbuf_lock, flags);
}
release_console_sem();
+
+ /*
+ * By unregistering the bootconsoles after we enable the real console
+ * we get the "console xxx enabled" message on all the consoles -
+ * boot consoles, real consoles, etc - this is to ensure that end
+ * users know there might be something in the kernel's log buffer that
+ * went to the bootconsole (that they do not see on the real console)
+ */
+ if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
+ /* we need to iterate through twice, to make sure we print
+ * everything out, before we unregister the console(s)
+ */
+ printk(KERN_INFO "console [%s%d] enabled, bootconsole disabled\n",
+ newcon->name, newcon->index);
+ for_each_console(bcon)
+ if (bcon->flags & CON_BOOT)
+ unregister_console(bcon);
+ } else {
+ printk(KERN_INFO "%sconsole [%s%d] enabled\n",
+ (newcon->flags & CON_BOOT) ? "boot" : "" ,
+ newcon->name, newcon->index);
+ }
}
EXPORT_SYMBOL(register_console);
@@ -1287,11 +1346,13 @@ EXPORT_SYMBOL(unregister_console);
static int __init disable_boot_consoles(void)
{
- if (console_drivers != NULL) {
- if (console_drivers->flags & CON_BOOT) {
+ struct console *con;
+
+ for_each_console(con) {
+ if (con->flags & CON_BOOT) {
printk(KERN_INFO "turn off boot console %s%d\n",
- console_drivers->name, console_drivers->index);
- return unregister_console(console_drivers);
+ con->name, con->index);
+ unregister_console(con);
}
}
return 0;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 082c320e4dbf..307c285af59e 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -152,7 +152,7 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
if (!dumpable && !capable(CAP_SYS_PTRACE))
return -EPERM;
- return security_ptrace_may_access(task, mode);
+ return security_ptrace_access_check(task, mode);
}
bool ptrace_may_access(struct task_struct *task, unsigned int mode)
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 71bc79791cd9..6b11b07cfe7f 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -35,6 +35,7 @@
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/nmi.h>
#include <asm/atomic.h>
#include <linux/bitops.h>
#include <linux/module.h>
@@ -505,6 +506,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
}
printk(" (detected by %d, t=%ld jiffies)\n",
smp_processor_id(), (long)(jiffies - rsp->gp_start));
+ trigger_all_cpu_backtrace();
+
force_quiescent_state(rsp, 0); /* Kick them all. */
}
@@ -515,12 +518,14 @@ static void print_cpu_stall(struct rcu_state *rsp)
printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
smp_processor_id(), jiffies - rsp->gp_start);
- dump_stack();
+ trigger_all_cpu_backtrace();
+
spin_lock_irqsave(&rnp->lock, flags);
if ((long)(jiffies - rsp->jiffies_stall) >= 0)
rsp->jiffies_stall =
jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
spin_unlock_irqrestore(&rnp->lock, flags);
+
set_need_resched(); /* kick ourselves to get things going. */
}
diff --git a/kernel/sched.c b/kernel/sched.c
index c9beca67a53e..4066241ae9f4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6609,6 +6609,8 @@ int cond_resched_lock(spinlock_t *lock)
int resched = should_resched();
int ret = 0;
+ lockdep_assert_held(lock);
+
if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
if (resched)
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 7932653c4ebd..5ddab730cb2f 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -21,44 +21,29 @@
#include <linux/debug_locks.h>
#include <linux/module.h>
+#ifndef _spin_trylock
int __lockfunc _spin_trylock(spinlock_t *lock)
{
- preempt_disable();
- if (_raw_spin_trylock(lock)) {
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
-
- preempt_enable();
- return 0;
+ return __spin_trylock(lock);
}
EXPORT_SYMBOL(_spin_trylock);
+#endif
+#ifndef _read_trylock
int __lockfunc _read_trylock(rwlock_t *lock)
{
- preempt_disable();
- if (_raw_read_trylock(lock)) {
- rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
-
- preempt_enable();
- return 0;
+ return __read_trylock(lock);
}
EXPORT_SYMBOL(_read_trylock);
+#endif
+#ifndef _write_trylock
int __lockfunc _write_trylock(rwlock_t *lock)
{
- preempt_disable();
- if (_raw_write_trylock(lock)) {
- rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
-
- preempt_enable();
- return 0;
+ return __write_trylock(lock);
}
EXPORT_SYMBOL(_write_trylock);
+#endif
/*
* If lockdep is enabled then we use the non-preemption spin-ops
@@ -67,132 +52,101 @@ EXPORT_SYMBOL(_write_trylock);
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+#ifndef _read_lock
void __lockfunc _read_lock(rwlock_t *lock)
{
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+ __read_lock(lock);
}
EXPORT_SYMBOL(_read_lock);
+#endif
+#ifndef _spin_lock_irqsave
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- /*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_spin_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#ifdef CONFIG_LOCKDEP
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-#else
- _raw_spin_lock_flags(lock, &flags);
-#endif
- return flags;
+ return __spin_lock_irqsave(lock);
}
EXPORT_SYMBOL(_spin_lock_irqsave);
+#endif
+#ifndef _spin_lock_irq
void __lockfunc _spin_lock_irq(spinlock_t *lock)
{
- local_irq_disable();
- preempt_disable();
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ __spin_lock_irq(lock);
}
EXPORT_SYMBOL(_spin_lock_irq);
+#endif
+#ifndef _spin_lock_bh
void __lockfunc _spin_lock_bh(spinlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ __spin_lock_bh(lock);
}
EXPORT_SYMBOL(_spin_lock_bh);
+#endif
+#ifndef _read_lock_irqsave
unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
- _raw_read_lock_flags, &flags);
- return flags;
+ return __read_lock_irqsave(lock);
}
EXPORT_SYMBOL(_read_lock_irqsave);
+#endif
+#ifndef _read_lock_irq
void __lockfunc _read_lock_irq(rwlock_t *lock)
{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+ __read_lock_irq(lock);
}
EXPORT_SYMBOL(_read_lock_irq);
+#endif
+#ifndef _read_lock_bh
void __lockfunc _read_lock_bh(rwlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+ __read_lock_bh(lock);
}
EXPORT_SYMBOL(_read_lock_bh);
+#endif
+#ifndef _write_lock_irqsave
unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
- _raw_write_lock_flags, &flags);
- return flags;
+ return __write_lock_irqsave(lock);
}
EXPORT_SYMBOL(_write_lock_irqsave);
+#endif
+#ifndef _write_lock_irq
void __lockfunc _write_lock_irq(rwlock_t *lock)
{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ __write_lock_irq(lock);
}
EXPORT_SYMBOL(_write_lock_irq);
+#endif
+#ifndef _write_lock_bh
void __lockfunc _write_lock_bh(rwlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ __write_lock_bh(lock);
}
EXPORT_SYMBOL(_write_lock_bh);
+#endif
+#ifndef _spin_lock
void __lockfunc _spin_lock(spinlock_t *lock)
{
- preempt_disable();
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ __spin_lock(lock);
}
-
EXPORT_SYMBOL(_spin_lock);
+#endif
+#ifndef _write_lock
void __lockfunc _write_lock(rwlock_t *lock)
{
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ __write_lock(lock);
}
-
EXPORT_SYMBOL(_write_lock);
+#endif
#else /* CONFIG_PREEMPT: */
@@ -318,125 +272,109 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
#endif
+#ifndef _spin_unlock
void __lockfunc _spin_unlock(spinlock_t *lock)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable();
+ __spin_unlock(lock);
}
EXPORT_SYMBOL(_spin_unlock);
+#endif
+#ifndef _write_unlock
void __lockfunc _write_unlock(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable();
+ __write_unlock(lock);
}
EXPORT_SYMBOL(_write_unlock);
+#endif
+#ifndef _read_unlock
void __lockfunc _read_unlock(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- preempt_enable();
+ __read_unlock(lock);
}
EXPORT_SYMBOL(_read_unlock);
+#endif
+#ifndef _spin_unlock_irqrestore
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
+ __spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_spin_unlock_irqrestore);
+#endif
+#ifndef _spin_unlock_irq
void __lockfunc _spin_unlock_irq(spinlock_t *lock)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- local_irq_enable();
- preempt_enable();
+ __spin_unlock_irq(lock);
}
EXPORT_SYMBOL(_spin_unlock_irq);
+#endif
+#ifndef _spin_unlock_bh
void __lockfunc _spin_unlock_bh(spinlock_t *lock)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __spin_unlock_bh(lock);
}
EXPORT_SYMBOL(_spin_unlock_bh);
+#endif
+#ifndef _read_unlock_irqrestore
void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
+ __read_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_read_unlock_irqrestore);
+#endif
+#ifndef _read_unlock_irq
void __lockfunc _read_unlock_irq(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_enable();
- preempt_enable();
+ __read_unlock_irq(lock);
}
EXPORT_SYMBOL(_read_unlock_irq);
+#endif
+#ifndef _read_unlock_bh
void __lockfunc _read_unlock_bh(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __read_unlock_bh(lock);
}
EXPORT_SYMBOL(_read_unlock_bh);
+#endif
+#ifndef _write_unlock_irqrestore
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
+ __write_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_write_unlock_irqrestore);
+#endif
+#ifndef _write_unlock_irq
void __lockfunc _write_unlock_irq(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_enable();
- preempt_enable();
+ __write_unlock_irq(lock);
}
EXPORT_SYMBOL(_write_unlock_irq);
+#endif
+#ifndef _write_unlock_bh
void __lockfunc _write_unlock_bh(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __write_unlock_bh(lock);
}
EXPORT_SYMBOL(_write_unlock_bh);
+#endif
+#ifndef _spin_trylock_bh
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
- if (_raw_spin_trylock(lock)) {
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
-
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
- return 0;
+ return __spin_trylock_bh(lock);
}
EXPORT_SYMBOL(_spin_trylock_bh);
+#endif
notrace int in_lock_functions(unsigned long addr)
{
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 58be76017fd0..71d8dc7f9920 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -49,7 +49,6 @@
#include <linux/acpi.h>
#include <linux/reboot.h>
#include <linux/ftrace.h>
-#include <linux/security.h>
#include <linux/slow-work.h>
#include <linux/perf_counter.h>
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0668795d8818..3c44b56b0da7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -600,7 +600,12 @@ static struct workqueue_struct *keventd_wq __read_mostly;
* schedule_work - put work task in global workqueue
* @work: job to be done
*
- * This puts a job in the kernel-global workqueue.
+ * Returns zero if @work was already on the kernel-global workqueue and
+ * non-zero otherwise.
+ *
+ * This puts a job in the kernel-global workqueue if it was not already
+ * queued and leaves it in the same position on the kernel-global
+ * workqueue otherwise.
*/
int schedule_work(struct work_struct *work)
{