summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--device/ds_routines.c2
-rw-r--r--kern/thread.c6
-rw-r--r--kern/thread.h5
-rw-r--r--vm/vm_map.c63
-rw-r--r--vm/vm_map.h8
-rw-r--r--vm/vm_pageout.c2
6 files changed, 49 insertions, 37 deletions
diff --git a/device/ds_routines.c b/device/ds_routines.c
index 445e7ae1..1fabec3c 100644
--- a/device/ds_routines.c
+++ b/device/ds_routines.c
@@ -1512,7 +1512,7 @@ void io_done_thread(void)
/*
* Set thread privileges and highest priority.
*/
- current_thread()->vm_privilege = TRUE;
+ current_thread()->vm_privilege = 1;
stack_privilege(current_thread());
thread_set_own_priority(0);
diff --git a/kern/thread.c b/kern/thread.c
index 7db1f3d2..0ac7c535 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -342,7 +342,7 @@ void thread_init(void)
/* thread_template.sched_stamp (later) */
thread_template.recover = (vm_offset_t) 0;
- thread_template.vm_privilege = FALSE;
+ thread_template.vm_privilege = 0;
thread_template.user_stop_count = 1;
@@ -2233,11 +2233,11 @@ thread_wire(
thread_lock(thread);
if (wired) {
- thread->vm_privilege = TRUE;
+ thread->vm_privilege = 1;
stack_privilege(thread);
}
else {
- thread->vm_privilege = FALSE;
+ thread->vm_privilege = 0;
/*XXX stack_unprivilege(thread); */
thread->stack_privilege = 0;
}
diff --git a/kern/thread.h b/kern/thread.h
index 7106fd2d..f0ed71a8 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -77,7 +77,6 @@ struct thread {
struct {
unsigned state:16;
unsigned wake_active:1;
- unsigned vm_privilege:1;
unsigned active:1;
};
event_t event_key;
@@ -146,8 +145,8 @@ struct thread {
/* VM global variables */
vm_offset_t recover; /* page fault recovery (copyin/out) */
- /* Defined above */
- /* boolean_t vm_privilege; Can use reserved memory? */
+ unsigned int vm_privilege; /* Can use reserved memory?
+ Implemented as a counter */
/* User-visible scheduling state */
int user_stop_count; /* outstanding stops */
diff --git a/vm/vm_map.c b/vm/vm_map.c
index f52e7c76..b1c1b4e0 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -222,29 +222,15 @@ vm_map_t vm_map_create(
return(result);
}
-/*
- * vm_map_entry_create: [ internal use only ]
- *
- * Allocates a VM map entry for insertion in the
- * given map (or map copy). No fields are filled.
- */
-#define vm_map_entry_create(map) \
- _vm_map_entry_create(&(map)->hdr)
-
-#define vm_map_copy_entry_create(copy) \
- _vm_map_entry_create(&(copy)->cpy_hdr)
-
-vm_map_entry_t _vm_map_entry_create(map_header)
- const struct vm_map_header *map_header;
+void vm_map_lock(struct vm_map *map)
{
- vm_map_entry_t entry;
- boolean_t vm_privilege;
+ lock_write(&map->lock);
/*
- * XXX Map entry creation may occur while a map is locked,
+ * XXX Memory allocation may occur while a map is locked,
* for example when clipping entries. If the system is running
- * low on memory, allocating an entry may block until pages
- * are available. But if a map used by the default pager is
+ * low on memory, allocating may block until pages are
+ * available. But if a map used by the default pager is
* kept locked, a deadlock occurs.
*
* This workaround temporarily elevates the current thread
@@ -252,19 +238,50 @@ vm_map_entry_t _vm_map_entry_create(map_header)
* so regardless of the map for convenience, and because it's
* currently impossible to predict which map the default pager
* may depend on.
+ *
+ * This workaround isn't reliable, and only makes exhaustion
+ * less likely. In particular pageout may cause lots of data
+ * to be passed between the kernel and the pagers, often
+ * in the form of large copy maps. Making the minimum
+ * number of pages depend on the total number of pages
+ * should make exhaustion even less likely.
*/
if (current_thread()) {
- vm_privilege = current_thread()->vm_privilege;
- current_thread()->vm_privilege = TRUE;
+ current_thread()->vm_privilege++;
+ assert(current_thread()->vm_privilege != 0);
}
- entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache);
+ map->timestamp++;
+}
+void vm_map_unlock(struct vm_map *map)
+{
if (current_thread()) {
- current_thread()->vm_privilege = vm_privilege;
+ current_thread()->vm_privilege--;
}
+ lock_write_done(&map->lock);
+}
+
+/*
+ * vm_map_entry_create: [ internal use only ]
+ *
+ * Allocates a VM map entry for insertion in the
+ * given map (or map copy). No fields are filled.
+ */
+#define vm_map_entry_create(map) \
+ _vm_map_entry_create(&(map)->hdr)
+
+#define vm_map_copy_entry_create(copy) \
+ _vm_map_entry_create(&(copy)->cpy_hdr)
+
+vm_map_entry_t _vm_map_entry_create(map_header)
+ const struct vm_map_header *map_header;
+{
+ vm_map_entry_t entry;
+
+ entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache);
if (entry == VM_MAP_ENTRY_NULL)
panic("vm_map_entry_create");
diff --git a/vm/vm_map.h b/vm/vm_map.h
index dad07139..537c36ef 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -352,13 +352,9 @@ MACRO_BEGIN \
(map)->timestamp = 0; \
MACRO_END
-#define vm_map_lock(map) \
-MACRO_BEGIN \
- lock_write(&(map)->lock); \
- (map)->timestamp++; \
-MACRO_END
+void vm_map_lock(struct vm_map *map);
+void vm_map_unlock(struct vm_map *map);
-#define vm_map_unlock(map) lock_write_done(&(map)->lock)
#define vm_map_lock_read(map) lock_read(&(map)->lock)
#define vm_map_unlock_read(map) lock_read_done(&(map)->lock)
#define vm_map_lock_write_to_read(map) \
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index f420804b..e0bd9880 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -918,7 +918,7 @@ void vm_pageout(void)
{
unsigned long free_after_reserve;
- current_thread()->vm_privilege = TRUE;
+ current_thread()->vm_privilege = 1;
stack_privilege(current_thread());
thread_set_own_priority(0);