:Matt,
:
: So far so good. It boots up, and it stayed up for about 30
:minutes while I did a "make -j4 buildworld". Unfortunately, the CPU
:temperature rose above 60 degress Celcius, and the machine shut down :-)
:I'll give it another go later, making sure it can stay up for at least
:24 hours :-)
:
:Adam
The config looks fine. I did some testing and found some issues,
but it's still weird that it would crash at boot rather then later on.
In anycase, please try this patch with the MAX_MAPENT returned back to
its original value and tell me if it works. I rewrote the code so
the vm_map_entry_t reserve is only used when allocating memory for more
vm_map_entry_t structures.
-Matt
Matthew Dillon
<dillon@backplane.com>
Index: sys/globaldata.h
===================================================================
RCS file: /cvs/src/sys/sys/globaldata.h,v
retrieving revision 1.32
diff -u -r1.32 globaldata.h
--- sys/globaldata.h 16 Jul 2004 05:51:57 -0000 1.32
+++ sys/globaldata.h 25 Oct 2004 08:40:30 -0000
@@ -143,7 +143,7 @@
struct thread gd_schedthread; /* userland scheduler helper */
struct thread gd_idlethread;
SLGlobalData gd_slab; /* slab allocator */
- int gd_vme_kdeficit; /* vm_map_entry reservation */
+ int gd_unused02;
int gd_vme_avail; /* vm_map_entry reservation */
struct vm_map_entry *gd_vme_base; /* vm_map_entry reservation */
struct systimerq gd_systimerq; /* per-cpu system timers */
Index: vm/vm_map.c
===================================================================
RCS file: /cvs/src/sys/vm/vm_map.c,v
retrieving revision 1.33
diff -u -r1.33 vm_map.c
--- vm/vm_map.c 12 Oct 2004 19:21:16 -0000 1.33
+++ vm/vm_map.c 25 Oct 2004 09:00:11 -0000
@@ -185,7 +185,8 @@
void
vm_init2(void)
{
- zinitna(mapentzone, &mapentobj, NULL, 0, 0, ZONE_USE_RESERVE, 1);
+ zinitna(mapentzone, &mapentobj, NULL, 0, 0,
+ ZONE_USE_RESERVE | ZONE_SPECIAL, 1);
zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1);
vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
pmap_init2();
@@ -329,14 +330,15 @@
}
/*
- * vm_map_entry_cpu_init:
+ * vm_map_entry_reserve_cpu_init:
*
* Set an initial negative count so the first attempt to reserve
* space preloads a bunch of vm_map_entry's for this cpu. This
* routine is called in early boot so we cannot just call
* vm_map_entry_reserve().
*
- * May be called for a gd other then mycpu.
+ * May be called for a gd other then mycpu, but may only be called
+ * during early boot.
*/
void
vm_map_entry_reserve_cpu_init(globaldata_t gd)
@@ -404,14 +406,18 @@
/*
* vm_map_entry_kreserve:
*
- * Reserve map entry structures for use in kernel_map or (if it exists)
- * kmem_map. These entries have *ALREADY* been reserved on a per-cpu
- * basis when the map was inited. This function is used by zalloc()
- * to avoid a recursion when zalloc() itself needs to allocate additional
- * kernel memory.
- *
- * This function should only be used when the caller intends to later
- * call vm_map_entry_reserve() to 'normalize' the reserve cache.
+ * Reserve map entry structures for use in kernel_map itself. These
+ * entries have *ALREADY* been reserved on a per-cpu basis when the map
+ * was inited. This function is used by zalloc() to avoid a recursion
+ * when zalloc() itself needs to allocate additional kernel memory.
+ *
+ * This function works like the normal reserve but does not load the
+ * vm_map_entry cache (because that would result in an infinite
+ * recursion). Note that gd_vme_avail may go negative. This is expected.
+ *
+ * Any caller of this function must be sure to renormalize after
+ * potentially eating entries to ensure that the reserve supply
+ * remains intact.
*/
int
vm_map_entry_kreserve(int count)
@@ -419,7 +425,7 @@
struct globaldata *gd = mycpu;
crit_enter();
- gd->gd_vme_kdeficit += count;
+ gd->gd_vme_avail -= count;
crit_exit();
KKASSERT(gd->gd_vme_base != NULL);
return(count);
@@ -428,14 +434,9 @@
/*
* vm_map_entry_krelease:
*
- * Release previously reserved map entries for kernel_map or kmem_map
- * use. This routine determines how many entries were actually used and
- * replentishes the kernel reserve supply from vme_avail.
- *
- * If there is insufficient supply vme_avail will go negative, which is
- * ok. We cannot safely call zalloc in this function without getting
- * into a recursion deadlock. zalloc() will call vm_map_entry_reserve()
- * to regenerate the lost entries.
+ * Release previously reserved map entries for kernel_map. We do not
+ * attempt to clean up like the normal release function as this would
+ * cause an unnecessary (but probably not fatal) deep procedure call.
*/
void
vm_map_entry_krelease(int count)
@@ -443,9 +444,7 @@
struct globaldata *gd = mycpu;
crit_enter();
- gd->gd_vme_kdeficit -= count;
- gd->gd_vme_avail -= gd->gd_vme_kdeficit; /* can go negative */
- gd->gd_vme_kdeficit = 0;
+ gd->gd_vme_avail += count;
crit_exit();
}
Index: vm/vm_zone.c
===================================================================
RCS file: /cvs/src/sys/vm/vm_zone.c,v
retrieving revision 1.16
diff -u -r1.16 vm_zone.c
--- vm/vm_zone.c 18 Sep 2004 22:00:37 -0000 1.16
+++ vm/vm_zone.c 25 Oct 2004 09:00:19 -0000
@@ -308,6 +308,10 @@
panic("zget: null zone");
if (z->zflags & ZONE_INTERRUPT) {
+ /*
+ * Interrupt zones do not mess with the kernel_map, they
+ * simply populate an existing mapping.
+ */
nbytes = z->zpagecount * PAGE_SIZE;
nbytes -= nbytes % z->zsize;
item = (char *) z->zkva + nbytes;
@@ -329,16 +333,37 @@
vmstats.v_wire_count++;
}
nitems = ((z->zpagecount * PAGE_SIZE) - nbytes) / z->zsize;
- } else {
+ } else if (z->zflags & ZONE_SPECIAL) {
+ /*
+ * The special zone is the one used for vm_map_entry_t's.
+ * We have to avoid an infinite recursion in
+ * vm_map_entry_reserve() by using vm_map_entry_kreserve()
+ * instead. The map entries are pre-reserved by the kernel
+ * by vm_map_entry_reserve_cpu_init().
+ */
nbytes = z->zalloc * PAGE_SIZE;
- {
- item = (void *)kmem_alloc3(kernel_map, nbytes, KM_KRESERVE);
- /* note: z might be modified due to blocking */
- if (item != NULL)
- zone_kern_pages += z->zalloc;
+ item = (void *)kmem_alloc3(kernel_map, nbytes, KM_KRESERVE);
+
+ /* note: z might be modified due to blocking */
+ if (item != NULL) {
+ zone_kern_pages += z->zalloc;
+ bzero(item, nbytes);
+ } else {
+ nbytes = 0;
}
+ nitems = nbytes / z->zsize;
+ } else {
+ /*
+ * Otherwise allocate KVA from the kernel_map.
+ */
+ nbytes = z->zalloc * PAGE_SIZE;
+
+ item = (void *)kmem_alloc3(kernel_map, nbytes, 0);
+
+ /* note: z might be modified due to blocking */
if (item != NULL) {
+ zone_kern_pages += z->zalloc;
bzero(item, nbytes);
} else {
nbytes = 0;
@@ -377,10 +402,12 @@
}
/*
- * Recover any reserve missing due to a zalloc/kreserve/krelease
- * recursion.
+ * A special zone may have used a kernel-reserved vm_map_entry. If
+ * so we have to be sure to recover our reserve so we don't run out.
+ * We will panic if we run out.
*/
- vm_map_entry_reserve(0);
+ if (z->zflags & ZONE_SPECIAL)
+ vm_map_entry_reserve(0);
return item;
}
Index: vm/vm_zone.h
===================================================================
RCS file: /cvs/src/sys/vm/vm_zone.h,v
retrieving revision 1.5
diff -u -r1.5 vm_zone.h
--- vm/vm_zone.h 27 Aug 2003 01:43:08 -0000 1.5
+++ vm/vm_zone.h 25 Oct 2004 08:49:21 -0000
@@ -21,6 +21,7 @@
#define ZONE_INTERRUPT 0x0001 /* If you need to allocate at int time */
#define ZONE_PANICFAIL 0x0002 /* panic if the zalloc fails */
+#define ZONE_SPECIAL 0x0004 /* special vm_map_entry zone, see zget() */
#define ZONE_BOOT 0x0010 /* Internal flag used by zbootinit */
#define ZONE_USE_RESERVE 0x0020 /* use reserve memory if necessary */