summaryrefslogtreecommitdiff
path: root/vm/vm_map.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm/vm_map.c')
-rw-r--r--vm/vm_map.c63
1 files changed, 40 insertions, 23 deletions
diff --git a/vm/vm_map.c b/vm/vm_map.c
index f52e7c76..b1c1b4e0 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -222,29 +222,15 @@ vm_map_t vm_map_create(
return(result);
}
-/*
- * vm_map_entry_create: [ internal use only ]
- *
- * Allocates a VM map entry for insertion in the
- * given map (or map copy). No fields are filled.
- */
-#define vm_map_entry_create(map) \
- _vm_map_entry_create(&(map)->hdr)
-
-#define vm_map_copy_entry_create(copy) \
- _vm_map_entry_create(&(copy)->cpy_hdr)
-
-vm_map_entry_t _vm_map_entry_create(map_header)
- const struct vm_map_header *map_header;
+void vm_map_lock(struct vm_map *map)
{
- vm_map_entry_t entry;
- boolean_t vm_privilege;
+ lock_write(&map->lock);
/*
- * XXX Map entry creation may occur while a map is locked,
+ * XXX Memory allocation may occur while a map is locked,
* for example when clipping entries. If the system is running
- * low on memory, allocating an entry may block until pages
- * are available. But if a map used by the default pager is
+ * low on memory, allocating may block until pages are
+ * available. But if a map used by the default pager is
* kept locked, a deadlock occurs.
*
* This workaround temporarily elevates the current thread
@@ -252,19 +238,50 @@ vm_map_entry_t _vm_map_entry_create(map_header)
* so regardless of the map for convenience, and because it's
* currently impossible to predict which map the default pager
* may depend on.
+ *
+ * This workaround isn't reliable, and only makes exhaustion
+ * less likely. In particular pageout may cause lots of data
+ * to be passed between the kernel and the pagers, often
+ * in the form of large copy maps. Making the minimum
+ * number of pages depend on the total number of pages
+ * should make exhaustion even less likely.
*/
if (current_thread()) {
- vm_privilege = current_thread()->vm_privilege;
- current_thread()->vm_privilege = TRUE;
+ current_thread()->vm_privilege++;
+ assert(current_thread()->vm_privilege != 0);
}
- entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache);
+ map->timestamp++;
+}
+void vm_map_unlock(struct vm_map *map)
+{
if (current_thread()) {
- current_thread()->vm_privilege = vm_privilege;
+ current_thread()->vm_privilege--;
}
+ lock_write_done(&map->lock);
+}
+
+/*
+ * vm_map_entry_create: [ internal use only ]
+ *
+ * Allocates a VM map entry for insertion in the
+ * given map (or map copy). No fields are filled.
+ */
+#define vm_map_entry_create(map) \
+ _vm_map_entry_create(&(map)->hdr)
+
+#define vm_map_copy_entry_create(copy) \
+ _vm_map_entry_create(&(copy)->cpy_hdr)
+
+vm_map_entry_t _vm_map_entry_create(map_header)
+ const struct vm_map_header *map_header;
+{
+ vm_map_entry_t entry;
+
+ entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache);
if (entry == VM_MAP_ENTRY_NULL)
panic("vm_map_entry_create");