dect
/
linux-2.6
Archived
13
0
Fork 0

TOMOYO: Fix lockdep warning.

Currently TOMOYO holds SRCU lock upon open() and releases it upon close()
because list elements stored in the "struct tomoyo_io_buffer" instances are
accessed until close() is called. However, such SRCU usage causes lockdep to
complain about leaving the kernel with SRCU lock held.

This patch solves the warning by holding/releasing SRCU upon each
read()/write(). This patch is doing something similar to calling kfree()
without calling synchronize_srcu(), by selectively deferring kfree() by keeping
track of the "struct tomoyo_io_buffer" instances.

Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: James Morris <jmorris@namei.org>
This commit is contained in:
Tetsuo Handa 2011-06-26 23:20:55 +09:00 committed by James Morris
parent 5625f2e326
commit 2e503bbb43
3 changed files with 276 additions and 51 deletions

View File

@ -1820,9 +1820,7 @@ static void tomoyo_read_self_domain(struct tomoyo_io_buffer *head)
* @type: Type of interface.
* @file: Pointer to "struct file".
*
* Associates policy handler and returns 0 on success, -ENOMEM otherwise.
*
* Caller acquires tomoyo_read_lock().
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_open_control(const u8 type, struct file *file)
{
@ -1921,9 +1919,6 @@ int tomoyo_open_control(const u8 type, struct file *file)
return -ENOMEM;
}
}
if (type != TOMOYO_QUERY && type != TOMOYO_AUDIT)
head->reader_idx = tomoyo_read_lock();
file->private_data = head;
/*
* If the file is /sys/kernel/security/tomoyo/query , increment the
* observer counter.
@ -1932,6 +1927,8 @@ int tomoyo_open_control(const u8 type, struct file *file)
*/
if (type == TOMOYO_QUERY)
atomic_inc(&tomoyo_query_observers);
file->private_data = head;
tomoyo_notify_gc(head, true);
return 0;
}
@ -2000,13 +1997,12 @@ static inline bool tomoyo_has_more_namespace(struct tomoyo_io_buffer *head)
* @buffer_len: Size of @buffer.
*
* Returns bytes read on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
int tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
const int buffer_len)
{
int len;
int idx;
if (!head->read)
return -ENOSYS;
@ -2014,6 +2010,7 @@ int tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
return -EINTR;
head->read_user_buf = buffer;
head->read_user_buf_avail = buffer_len;
idx = tomoyo_read_lock();
if (tomoyo_flush(head))
/* Call the policy handler. */
do {
@ -2021,6 +2018,7 @@ int tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
head->read(head);
} while (tomoyo_flush(head) &&
tomoyo_has_more_namespace(head));
tomoyo_read_unlock(idx);
len = head->read_user_buf - buffer;
mutex_unlock(&head->io_sem);
return len;
@ -2071,8 +2069,6 @@ static int tomoyo_parse_policy(struct tomoyo_io_buffer *head, char *line)
* @buffer_len: Size of @buffer.
*
* Returns @buffer_len on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
int tomoyo_write_control(struct tomoyo_io_buffer *head,
const char __user *buffer, const int buffer_len)
@ -2080,12 +2076,14 @@ int tomoyo_write_control(struct tomoyo_io_buffer *head,
int error = buffer_len;
size_t avail_len = buffer_len;
char *cp0 = head->write_buf;
int idx;
if (!head->write)
return -ENOSYS;
if (!access_ok(VERIFY_READ, buffer, buffer_len))
return -EFAULT;
if (mutex_lock_interruptible(&head->io_sem))
return -EINTR;
idx = tomoyo_read_lock();
/* Read a line and dispatch it to the policy handler. */
while (avail_len > 0) {
char c;
@ -2148,6 +2146,7 @@ int tomoyo_write_control(struct tomoyo_io_buffer *head,
}
}
out:
tomoyo_read_unlock(idx);
mutex_unlock(&head->io_sem);
return error;
}
@ -2157,30 +2156,18 @@ out:
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Releases memory and returns 0.
*
* Caller looses tomoyo_read_lock().
* Returns 0.
*/
int tomoyo_close_control(struct tomoyo_io_buffer *head)
{
const bool is_write = !!head->write_buf;
/*
* If the file is /sys/kernel/security/tomoyo/query , decrement the
* observer counter.
*/
if (head->type == TOMOYO_QUERY)
atomic_dec(&tomoyo_query_observers);
else if (head->type != TOMOYO_AUDIT)
tomoyo_read_unlock(head->reader_idx);
/* Release memory used for policy I/O. */
kfree(head->read_buf);
head->read_buf = NULL;
kfree(head->write_buf);
head->write_buf = NULL;
kfree(head);
if (is_write)
tomoyo_run_gc();
if (head->type == TOMOYO_QUERY &&
atomic_dec_and_test(&tomoyo_query_observers))
wake_up_all(&tomoyo_answer_wait);
tomoyo_notify_gc(head, false);
return 0;
}

View File

@ -441,8 +441,6 @@ struct tomoyo_io_buffer {
int (*poll) (struct file *file, poll_table *wait);
/* Exclusive lock for this structure. */
struct mutex io_sem;
/* Index returned by tomoyo_read_lock(). */
int reader_idx;
char __user *read_user_buf;
int read_user_buf_avail;
struct {
@ -480,6 +478,10 @@ struct tomoyo_io_buffer {
int writebuf_size;
/* Type of this interface. */
u8 type;
/* Users counter protected by tomoyo_io_buffer_list_lock. */
u8 users;
/* List for telling GC not to kfree() elements. */
struct list_head list;
};
/*
@ -651,7 +653,7 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm);
void tomoyo_print_ulong(char *buffer, const int buffer_len,
const unsigned long value, const u8 type);
void tomoyo_put_name_union(struct tomoyo_name_union *ptr);
void tomoyo_run_gc(void);
void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register);
void tomoyo_memory_free(void *ptr);
int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
struct tomoyo_acl_param *param,

View File

@ -11,13 +11,123 @@
#include <linux/kthread.h>
#include <linux/slab.h>
/* The list for "struct tomoyo_io_buffer". */
static LIST_HEAD(tomoyo_io_buffer_list);
/* Lock for protecting tomoyo_io_buffer_list. */
static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock);
/* Size of an element. */
static const u8 tomoyo_element_size[TOMOYO_MAX_POLICY] = {
[TOMOYO_ID_GROUP] = sizeof(struct tomoyo_group),
[TOMOYO_ID_PATH_GROUP] = sizeof(struct tomoyo_path_group),
[TOMOYO_ID_NUMBER_GROUP] = sizeof(struct tomoyo_number_group),
[TOMOYO_ID_AGGREGATOR] = sizeof(struct tomoyo_aggregator),
[TOMOYO_ID_TRANSITION_CONTROL] =
sizeof(struct tomoyo_transition_control),
[TOMOYO_ID_MANAGER] = sizeof(struct tomoyo_manager),
/* [TOMOYO_ID_NAME] = "struct tomoyo_name"->size, */
/* [TOMOYO_ID_ACL] =
tomoyo_acl_size["struct tomoyo_acl_info"->type], */
[TOMOYO_ID_DOMAIN] = sizeof(struct tomoyo_domain_info),
};
/* Size of a domain ACL element. */
static const u8 tomoyo_acl_size[] = {
[TOMOYO_TYPE_PATH_ACL] = sizeof(struct tomoyo_path_acl),
[TOMOYO_TYPE_PATH2_ACL] = sizeof(struct tomoyo_path2_acl),
[TOMOYO_TYPE_PATH_NUMBER_ACL] = sizeof(struct tomoyo_path_number_acl),
[TOMOYO_TYPE_MKDEV_ACL] = sizeof(struct tomoyo_mkdev_acl),
[TOMOYO_TYPE_MOUNT_ACL] = sizeof(struct tomoyo_mount_acl),
};
/**
* tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not.
*
* @element: Pointer to "struct list_head".
*
* Returns true if @element is used by /sys/kernel/security/tomoyo/ users,
* false otherwise.
*/
static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element)
{
struct tomoyo_io_buffer *head;
bool in_use = false;
spin_lock(&tomoyo_io_buffer_list_lock);
list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
head->users++;
spin_unlock(&tomoyo_io_buffer_list_lock);
if (mutex_lock_interruptible(&head->io_sem)) {
in_use = true;
goto out;
}
if (head->r.domain == element || head->r.group == element ||
head->r.acl == element || &head->w.domain->list == element)
in_use = true;
mutex_unlock(&head->io_sem);
out:
spin_lock(&tomoyo_io_buffer_list_lock);
head->users--;
if (in_use)
break;
}
spin_unlock(&tomoyo_io_buffer_list_lock);
return in_use;
}
/**
* tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not.
*
* @string: String to check.
* @size: Memory allocated for @string .
*
* Returns true if @string is used by /sys/kernel/security/tomoyo/ users,
* false otherwise.
*/
static bool tomoyo_name_used_by_io_buffer(const char *string,
const size_t size)
{
struct tomoyo_io_buffer *head;
bool in_use = false;
spin_lock(&tomoyo_io_buffer_list_lock);
list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
int i;
head->users++;
spin_unlock(&tomoyo_io_buffer_list_lock);
if (mutex_lock_interruptible(&head->io_sem)) {
in_use = true;
goto out;
}
for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) {
const char *w = head->r.w[i];
if (w < string || w > string + size)
continue;
in_use = true;
break;
}
mutex_unlock(&head->io_sem);
out:
spin_lock(&tomoyo_io_buffer_list_lock);
head->users--;
if (in_use)
break;
}
spin_unlock(&tomoyo_io_buffer_list_lock);
return in_use;
}
/* Structure for garbage collection. */
struct tomoyo_gc {
struct list_head list;
enum tomoyo_policy_id type;
size_t size;
struct list_head *element;
};
static LIST_HEAD(tomoyo_gc_queue);
static DEFINE_MUTEX(tomoyo_gc_mutex);
/* List of entries to be deleted. */
static LIST_HEAD(tomoyo_gc_list);
/* Length of tomoyo_gc_list. */
static int tomoyo_gc_list_len;
/**
* tomoyo_add_to_gc - Add an entry to to be deleted list.
@ -43,10 +153,42 @@ static bool tomoyo_add_to_gc(const int type, struct list_head *element)
if (!entry)
return false;
entry->type = type;
if (type == TOMOYO_ID_ACL)
entry->size = tomoyo_acl_size[
container_of(element,
typeof(struct tomoyo_acl_info),
list)->type];
else if (type == TOMOYO_ID_NAME)
entry->size = strlen(container_of(element,
typeof(struct tomoyo_name),
head.list)->entry.name) + 1;
else
entry->size = tomoyo_element_size[type];
entry->element = element;
list_add(&entry->list, &tomoyo_gc_queue);
list_add(&entry->list, &tomoyo_gc_list);
list_del_rcu(element);
return true;
return tomoyo_gc_list_len++ < 128;
}
/**
* tomoyo_element_linked_by_gc - Validate next element of an entry.
*
* @element: Pointer to an element.
* @size: Size of @element in byte.
*
* Returns true if @element is linked by other elements in the garbage
* collector's queue, false otherwise.
*/
static bool tomoyo_element_linked_by_gc(const u8 *element, const size_t size)
{
struct tomoyo_gc *p;
list_for_each_entry(p, &tomoyo_gc_list, list) {
const u8 *ptr = (const u8 *) p->element->next;
if (ptr < element || element + size < ptr)
continue;
return true;
}
return false;
}
/**
@ -151,6 +293,13 @@ static void tomoyo_del_acl(struct list_head *element)
}
}
/**
* tomoyo_del_domain - Delete members in "struct tomoyo_domain_info".
*
* @element: Pointer to "struct list_head".
*
* Returns true if deleted, false otherwise.
*/
static bool tomoyo_del_domain(struct list_head *element)
{
struct tomoyo_domain_info *domain =
@ -360,13 +509,44 @@ unlock:
mutex_unlock(&tomoyo_policy_lock);
}
static void tomoyo_kfree_entry(void)
/**
* tomoyo_kfree_entry - Delete entries in tomoyo_gc_list.
*
* Returns true if some entries were kfree()d, false otherwise.
*/
static bool tomoyo_kfree_entry(void)
{
struct tomoyo_gc *p;
struct tomoyo_gc *tmp;
bool result = false;
list_for_each_entry_safe(p, tmp, &tomoyo_gc_queue, list) {
list_for_each_entry_safe(p, tmp, &tomoyo_gc_list, list) {
struct list_head *element = p->element;
/*
* list_del_rcu() in tomoyo_add_to_gc() guarantees that the
* list element became no longer reachable from the list which
* the element was originally on (e.g. tomoyo_domain_list).
* Also, synchronize_srcu() in tomoyo_gc_thread() guarantees
* that the list element became no longer referenced by syscall
* users.
*
* However, there are three users which may still be using the
* list element. We need to defer until all of these users
* forget the list element.
*
* Firstly, defer until "struct tomoyo_io_buffer"->r.{domain,
* group,acl} and "struct tomoyo_io_buffer"->w.domain forget
* the list element.
*/
if (tomoyo_struct_used_by_io_buffer(element))
continue;
/*
* Secondly, defer until all other elements in the
* tomoyo_gc_list list forget the list element.
*/
if (tomoyo_element_linked_by_gc((const u8 *) element, p->size))
continue;
switch (p->type) {
case TOMOYO_ID_TRANSITION_CONTROL:
tomoyo_del_transition_control(element);
@ -378,6 +558,14 @@ static void tomoyo_kfree_entry(void)
tomoyo_del_manager(element);
break;
case TOMOYO_ID_NAME:
/*
* Thirdly, defer until all "struct tomoyo_io_buffer"
* ->r.w[] forget the list element.
*/
if (tomoyo_name_used_by_io_buffer(
container_of(element, typeof(struct tomoyo_name),
head.list)->entry.name, p->size))
continue;
tomoyo_del_name(element);
break;
case TOMOYO_ID_ACL:
@ -402,7 +590,10 @@ static void tomoyo_kfree_entry(void)
tomoyo_memory_free(element);
list_del(&p->list);
kfree(p);
tomoyo_gc_list_len--;
result = true;
}
return result;
}
/**
@ -418,25 +609,70 @@ static void tomoyo_kfree_entry(void)
*/
static int tomoyo_gc_thread(void *unused)
{
/* Garbage collector thread is exclusive. */
static DEFINE_MUTEX(tomoyo_gc_mutex);
if (!mutex_trylock(&tomoyo_gc_mutex))
goto out;
daemonize("GC for TOMOYO");
if (mutex_trylock(&tomoyo_gc_mutex)) {
int i;
for (i = 0; i < 10; i++) {
tomoyo_collect_entry();
if (list_empty(&tomoyo_gc_queue))
break;
synchronize_srcu(&tomoyo_ss);
tomoyo_kfree_entry();
do {
tomoyo_collect_entry();
if (list_empty(&tomoyo_gc_list))
break;
synchronize_srcu(&tomoyo_ss);
} while (tomoyo_kfree_entry());
{
struct tomoyo_io_buffer *head;
struct tomoyo_io_buffer *tmp;
spin_lock(&tomoyo_io_buffer_list_lock);
list_for_each_entry_safe(head, tmp, &tomoyo_io_buffer_list,
list) {
if (head->users)
continue;
list_del(&head->list);
kfree(head->read_buf);
kfree(head->write_buf);
kfree(head);
}
mutex_unlock(&tomoyo_gc_mutex);
spin_unlock(&tomoyo_io_buffer_list_lock);
}
do_exit(0);
mutex_unlock(&tomoyo_gc_mutex);
out:
/* This acts as do_exit(0). */
return 0;
}
void tomoyo_run_gc(void)
/**
* tomoyo_notify_gc - Register/unregister /sys/kernel/security/tomoyo/ users.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @is_register: True if register, false if unregister.
*
* Returns nothing.
*/
void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register)
{
struct task_struct *task = kthread_create(tomoyo_gc_thread, NULL,
"GC for TOMOYO");
if (!IS_ERR(task))
wake_up_process(task);
bool is_write = false;
spin_lock(&tomoyo_io_buffer_list_lock);
if (is_register) {
head->users = 1;
list_add(&head->list, &tomoyo_io_buffer_list);
} else {
is_write = head->write_buf != NULL;
if (!--head->users) {
list_del(&head->list);
kfree(head->read_buf);
kfree(head->write_buf);
kfree(head);
}
}
spin_unlock(&tomoyo_io_buffer_list_lock);
if (is_write) {
struct task_struct *task = kthread_create(tomoyo_gc_thread,
NULL,
"GC for TOMOYO");
if (!IS_ERR(task))
wake_up_process(task);
}
}