svn commit: trunk/uClibc: libc/stdlib/malloc libc/stdlib/malloc-si etc...

kraj at uclibc.org kraj at uclibc.org
Sat Oct 11 08:52:58 UTC 2008


Author: kraj
Date: 2008-10-11 01:52:58 -0700 (Sat, 11 Oct 2008)
New Revision: 23660

Log:
Fix bug 4994 hangs on read(). I have tested the patch extensibly on ARM/LT.old.
Thank you Chase Douglas for reporting it and for the patch.



Modified:
   trunk/uClibc/libc/stdlib/malloc-simple/alloc.c
   trunk/uClibc/libc/stdlib/malloc/free.c
   trunk/uClibc/libc/stdlib/malloc/heap.h
   trunk/uClibc/libc/stdlib/malloc/heap_alloc.c
   trunk/uClibc/libc/stdlib/malloc/heap_alloc_at.c
   trunk/uClibc/libc/stdlib/malloc/heap_free.c
   trunk/uClibc/libc/stdlib/malloc/malloc.c
   trunk/uClibc/libc/stdlib/malloc/malloc.h
   trunk/uClibc/libc/stdlib/malloc/memalign.c
   trunk/uClibc/libc/stdlib/malloc/realloc.c
   trunk/uClibc/libpthread/linuxthreads.old/ptfork.c
   trunk/uClibc/libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h


Changeset:
Modified: trunk/uClibc/libc/stdlib/malloc/free.c
===================================================================
--- trunk/uClibc/libc/stdlib/malloc/free.c	2008-10-10 20:22:28 UTC (rev 23659)
+++ trunk/uClibc/libc/stdlib/malloc/free.c	2008-10-11 08:52:58 UTC (rev 23660)
@@ -22,7 +22,7 @@
 #include "heap.h"
 
 static void
-free_to_heap (void *mem, struct heap *heap)
+free_to_heap (void *mem, struct heap_free_area *heap, malloc_mutex_t *heap_lock)
 {
   size_t size;
   struct heap_free_area *fa;
@@ -39,7 +39,7 @@
   size = MALLOC_SIZE (mem);
   mem = MALLOC_BASE (mem);
 
-  __heap_lock (heap);
+  __pthread_mutex_lock (heap_lock);
 
   /* Put MEM back in the heap, and get the free-area it was placed in.  */
   fa = __heap_free (heap, mem, size);
@@ -48,7 +48,7 @@
      unmapped.  */
   if (HEAP_FREE_AREA_SIZE (fa) < MALLOC_UNMAP_THRESHOLD)
     /* Nope, nothing left to do, just release the lock.  */
-    __heap_unlock (heap);
+    __pthread_mutex_unlock (heap_lock);
   else
     /* Yup, try to unmap FA.  */
     {
@@ -81,7 +81,7 @@
 	  MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)",
 			start, end, end - start);
 	  __malloc_unlock_sbrk ();
-	  __heap_unlock (heap);
+	  __pthread_mutex_unlock (heap_lock);
 	  return;
 	}
 #endif
@@ -108,7 +108,7 @@
 #ifdef MALLOC_USE_SBRK
 
       /* Release the heap lock; we're still holding the sbrk lock.  */
-      __heap_unlock (heap);
+      __pthread_mutex_unlock (heap_lock);
       /* Lower the brk.  */
       sbrk (start - end);
       /* Release the sbrk lock too; now we hold no locks.  */
@@ -172,15 +172,15 @@
 	      /* We have to unlock the heap before we recurse to free the mmb
 		 descriptor, because we might be unmapping from the mmb
 		 heap.  */
-	      __heap_unlock (heap);
+              __pthread_mutex_unlock (heap_lock);
 
 	      /* Release the descriptor block we used.  */
-	      free_to_heap (mmb, &__malloc_mmb_heap);
+	      free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
 
 	      /* Do the actual munmap.  */
 	      munmap ((void *)mmb_start, mmb_end - mmb_start);
 
-	      __heap_lock (heap);
+              __pthread_mutex_lock (heap_lock);
 
 #  ifdef __UCLIBC_HAS_THREADS__
 	      /* In a multi-threaded program, it's possible that PREV_MMB has
@@ -213,7 +213,7 @@
 	}
 
       /* Finally release the lock for good.  */
-      __heap_unlock (heap);
+      __pthread_mutex_unlock (heap_lock);
 
       MALLOC_MMB_DEBUG_INDENT (-1);
 
@@ -243,7 +243,7 @@
 	}
 
       /* Release the heap lock before we do the system call.  */
-      __heap_unlock (heap);
+      __pthread_mutex_unlock (heap_lock);
 
       if (unmap_end > unmap_start)
 	/* Finally, actually unmap the memory.  */
@@ -260,5 +260,5 @@
 void
 free (void *mem)
 {
-  free_to_heap (mem, &__malloc_heap);
+  free_to_heap (mem, __malloc_heap, &__malloc_heap_lock);
 }

Modified: trunk/uClibc/libc/stdlib/malloc/heap.h
===================================================================
--- trunk/uClibc/libc/stdlib/malloc/heap.h	2008-10-10 20:22:28 UTC (rev 23659)
+++ trunk/uClibc/libc/stdlib/malloc/heap.h	2008-10-11 08:52:58 UTC (rev 23660)
@@ -29,32 +29,12 @@
 #define HEAP_GRANULARITY	(__alignof__ (HEAP_GRANULARITY_TYPE))
 
 
-/* A heap is a collection of memory blocks, from which smaller blocks
-   of memory can be allocated.  */
-struct heap
-{
-  /* A list of memory in the heap available for allocation.  */
-  struct heap_free_area *free_areas;
-
-#ifdef HEAP_USE_LOCKING
-  /* A lock that can be used by callers to control access to the heap.
-     The heap code _does not_ use this lock, it's merely here for the
-     convenience of users!  */
-  pthread_mutex_t lock;
-#endif
-};
-
 /* The HEAP_INIT macro can be used as a static initializer for a heap
    variable.  The HEAP_INIT_WITH_FA variant is used to initialize a heap
    with an initial static free-area; its argument FA should be declared
    using HEAP_DECLARE_STATIC_FREE_AREA.  */
-#ifdef HEAP_USE_LOCKING
-# define HEAP_INIT 		{ 0, PTHREAD_MUTEX_INITIALIZER }
-# define HEAP_INIT_WITH_FA(fa)	{ &fa._fa, PTHREAD_MUTEX_INITIALIZER }
-#else
-# define HEAP_INIT 		{ 0 }
-# define HEAP_INIT_WITH_FA(fa) 	{ &fa._fa }
-#endif
+# define HEAP_INIT 		0
+# define HEAP_INIT_WITH_FA(fa)	&fa._fa
 
 /* A free-list area `header'.  These are actually stored at the _ends_ of
    free areas (to make allocating from the beginning of the area simpler),
@@ -129,27 +109,23 @@
 #endif
 
 /* Output a text representation of HEAP to stderr, labelling it with STR.  */
-extern void __heap_dump (struct heap *heap, const char *str);
+extern void __heap_dump (struct heap_free_area *heap, const char *str);
 
 /* Do some consistency checks on HEAP.  If they fail, output an error
    message to stderr, and exit.  STR is printed with the failure message.  */
-extern void __heap_check (struct heap *heap, const char *str);
+extern void __heap_check (struct heap_free_area *heap, const char *str);
 
 
-#define __heap_lock(heap)	__pthread_mutex_lock (&(heap)->lock)
-#define __heap_unlock(heap)	__pthread_mutex_unlock (&(heap)->lock)
-
-
 /* Delete the free-area FA from HEAP.  */
 static __inline__ void
-__heap_delete (struct heap *heap, struct heap_free_area *fa)
+__heap_delete (struct heap_free_area *heap, struct heap_free_area *fa)
 {
   if (fa->next)
     fa->next->prev = fa->prev;
   if (fa->prev)
     fa->prev->next = fa->next;
   else
-    heap->free_areas = fa->next;
+    heap = fa->next;
 }
 
 
@@ -157,7 +133,7 @@
    HEAP.  PREV and NEXT may be 0; if PREV is 0, FA is installed as the
    first free-area.  */
 static __inline__ void
-__heap_link_free_area (struct heap *heap, struct heap_free_area *fa,
+__heap_link_free_area (struct heap_free_area *heap, struct heap_free_area *fa,
 		       struct heap_free_area *prev,
 		       struct heap_free_area *next)
 {
@@ -167,7 +143,7 @@
   if (prev)
     prev->next = fa;
   else
-    heap->free_areas = fa;
+    heap = fa;
   if (next)
     next->prev = fa;
 }
@@ -176,14 +152,14 @@
    PREV may be 0, in which case FA is installed as the first free-area (but
    FA may not be 0).  */
 static __inline__ void
-__heap_link_free_area_after (struct heap *heap,
+__heap_link_free_area_after (struct heap_free_area *heap,
 			     struct heap_free_area *fa,
 			     struct heap_free_area *prev)
 {
   if (prev)
     prev->next = fa;
   else
-    heap->free_areas = fa;
+    heap = fa;
   fa->prev = prev;
 }
 
@@ -192,7 +168,7 @@
    PREV and NEXT may be 0; if PREV is 0, MEM is installed as the first
    free-area.  */
 static __inline__ struct heap_free_area *
-__heap_add_free_area (struct heap *heap, void *mem, size_t size,
+__heap_add_free_area (struct heap_free_area *heap, void *mem, size_t size,
 		      struct heap_free_area *prev,
 		      struct heap_free_area *next)
 {
@@ -210,7 +186,7 @@
 /* Allocate SIZE bytes from the front of the free-area FA in HEAP, and
    return the amount actually allocated (which may be more than SIZE).  */
 static __inline__ size_t
-__heap_free_area_alloc (struct heap *heap,
+__heap_free_area_alloc (struct heap_free_area *heap,
 			struct heap_free_area *fa, size_t size)
 {
   size_t fa_size = fa->size;
@@ -234,16 +210,16 @@
 /* Allocate and return a block at least *SIZE bytes long from HEAP.
    *SIZE is adjusted to reflect the actual amount allocated (which may be
    greater than requested).  */
-extern void *__heap_alloc (struct heap *heap, size_t *size);
+extern void *__heap_alloc (struct heap_free_area *heap, size_t *size);
 
 /* Allocate SIZE bytes at address MEM in HEAP.  Return the actual size
    allocated, or 0 if we failed.  */
-extern size_t __heap_alloc_at (struct heap *heap, void *mem, size_t size);
+extern size_t __heap_alloc_at (struct heap_free_area *heap, void *mem, size_t size);
 
 /* Return the memory area MEM of size SIZE to HEAP.
    Returns the heap free area into which the memory was placed.  */
-extern struct heap_free_area *__heap_free (struct heap *heap,
+extern struct heap_free_area *__heap_free (struct heap_free_area *heap,
 					   void *mem, size_t size);
 
 /* Return true if HEAP contains absolutely no memory.  */
-#define __heap_is_empty(heap) (! (heap)->free_areas)
+#define __heap_is_empty(heap) (! (heap))

Modified: trunk/uClibc/libc/stdlib/malloc/heap_alloc.c
===================================================================
--- trunk/uClibc/libc/stdlib/malloc/heap_alloc.c	2008-10-10 20:22:28 UTC (rev 23659)
+++ trunk/uClibc/libc/stdlib/malloc/heap_alloc.c	2008-10-11 08:52:58 UTC (rev 23660)
@@ -20,7 +20,7 @@
    *SIZE is adjusted to reflect the actual amount allocated (which may be
    greater than requested).  */
 void *
-__heap_alloc (struct heap *heap, size_t *size)
+__heap_alloc (struct heap_free_area *heap, size_t *size)
 {
   struct heap_free_area *fa;
   size_t _size = *size;
@@ -36,7 +36,7 @@
   HEAP_DEBUG (heap, "before __heap_alloc");
 
   /* Look for a free area that can contain _SIZE bytes.  */
-  for (fa = heap->free_areas; fa; fa = fa->next)
+  for (fa = heap; fa; fa = fa->next)
     if (fa->size >= _size)
       {
 	/* Found one!  */

Modified: trunk/uClibc/libc/stdlib/malloc/heap_alloc_at.c
===================================================================
--- trunk/uClibc/libc/stdlib/malloc/heap_alloc_at.c	2008-10-10 20:22:28 UTC (rev 23659)
+++ trunk/uClibc/libc/stdlib/malloc/heap_alloc_at.c	2008-10-11 08:52:58 UTC (rev 23660)
@@ -19,7 +19,7 @@
 /* Allocate SIZE bytes at address MEM in HEAP.  Return the actual size
    allocated, or 0 if we failed.  */
 size_t
-__heap_alloc_at (struct heap *heap, void *mem, size_t size)
+__heap_alloc_at (struct heap_free_area *heap, void *mem, size_t size)
 {
   struct heap_free_area *fa;
   size_t alloced = 0;
@@ -29,7 +29,7 @@
   HEAP_DEBUG (heap, "before __heap_alloc_at");
 
   /* Look for a free area that can contain SIZE bytes.  */
-  for (fa = heap->free_areas; fa; fa = fa->next)
+  for (fa = heap; fa; fa = fa->next)
     {
       void *fa_mem = HEAP_FREE_AREA_START (fa);
       if (fa_mem <= mem)

Modified: trunk/uClibc/libc/stdlib/malloc/heap_free.c
===================================================================
--- trunk/uClibc/libc/stdlib/malloc/heap_free.c	2008-10-10 20:22:28 UTC (rev 23659)
+++ trunk/uClibc/libc/stdlib/malloc/heap_free.c	2008-10-11 08:52:58 UTC (rev 23660)
@@ -18,7 +18,7 @@
 
 /* Return the block of memory at MEM, of size SIZE, to HEAP.  */
 struct heap_free_area *
-__heap_free (struct heap *heap, void *mem, size_t size)
+__heap_free (struct heap_free_area *heap, void *mem, size_t size)
 {
   struct heap_free_area *fa, *prev_fa;
   void *end = (char *)mem + size;
@@ -32,7 +32,7 @@
      in the free-list when it becomes fragmented and long.  [A better
      implemention would use a balanced tree or something for the free-list,
      though that bloats the code-size and complexity quite a bit.]  */
-  for (prev_fa = 0, fa = heap->free_areas; fa; prev_fa = fa, fa = fa->next)
+  for (prev_fa = 0, fa = heap; fa; prev_fa = fa, fa = fa->next)
     if (unlikely (HEAP_FREE_AREA_END (fa) >= mem))
       break;
 

Modified: trunk/uClibc/libc/stdlib/malloc/malloc.c
===================================================================
--- trunk/uClibc/libc/stdlib/malloc/malloc.c	2008-10-10 20:22:28 UTC (rev 23659)
+++ trunk/uClibc/libc/stdlib/malloc/malloc.c	2008-10-11 08:52:58 UTC (rev 23660)
@@ -26,7 +26,8 @@
 /* The malloc heap.  We provide a bit of initial static space so that
    programs can do a little mallocing without mmaping in more space.  */
 HEAP_DECLARE_STATIC_FREE_AREA (initial_fa, 256);
-struct heap __malloc_heap = HEAP_INIT_WITH_FA (initial_fa);
+struct heap_free_area *__malloc_heap = HEAP_INIT_WITH_FA (initial_fa);
+malloc_mutex_t __malloc_heap_lock = PTHREAD_MUTEX_INITIALIZER;
 
 #if defined(MALLOC_USE_LOCKING) && defined(MALLOC_USE_SBRK)
 /* A lock protecting our use of sbrk.  */
@@ -43,12 +44,13 @@
    them from the main heap, but that tends to cause heap fragmentation in
    annoying ways.  */
 HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
-struct heap __malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
+struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
+malloc_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER;
 #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
 
 
 static void *
-malloc_from_heap (size_t size, struct heap *heap)
+malloc_from_heap (size_t size, struct heap_free_area *heap, malloc_mutex_t *heap_lock)
 {
   void *mem;
 
@@ -57,12 +59,12 @@
   /* Include extra space to record the size of the allocated block.  */
   size += MALLOC_HEADER_SIZE;
 
-  __heap_lock (heap);
+  __pthread_mutex_lock (heap_lock);
 
   /* First try to get memory that's already in our heap.  */
   mem = __heap_alloc (heap, &size);
 
-  __heap_unlock (heap);
+  __pthread_mutex_unlock (heap_lock);
 
   if (unlikely (! mem))
     /* We couldn't allocate from the heap, so grab some more
@@ -126,7 +128,7 @@
 			(long)block, (long)block + block_size, block_size);
 
 	  /* Get back the heap lock.  */
-	  __heap_lock (heap);
+	  __pthread_mutex_lock (heap_lock);
 
 	  /* Put BLOCK into the heap.  */
 	  __heap_free (heap, block, block_size);
@@ -136,7 +138,7 @@
 	  /* Try again to allocate.  */
 	  mem = __heap_alloc (heap, &size);
 
-	  __heap_unlock (heap);
+	  __pthread_mutex_unlock (heap_lock);
 
 #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
 	  /* Insert a record of BLOCK in sorted order into the
@@ -148,7 +150,7 @@
 	    if (block < mmb->mem)
 	      break;
 
-	  new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap);
+	  new_mmb = malloc_from_heap (sizeof *new_mmb, __malloc_mmb_heap, &__malloc_mmb_heap_lock);
 	  new_mmb->next = mmb;
 	  new_mmb->mem = block;
 	  new_mmb->size = block_size;
@@ -207,7 +209,7 @@
   if (unlikely(((unsigned long)size > (unsigned long)(MALLOC_HEADER_SIZE*-2))))
     goto oom;
 
-  mem = malloc_from_heap (size, &__malloc_heap);
+  mem = malloc_from_heap (size, __malloc_heap, &__malloc_heap_lock);
   if (unlikely (!mem))
     {
     oom:

Modified: trunk/uClibc/libc/stdlib/malloc/malloc.h
===================================================================
--- trunk/uClibc/libc/stdlib/malloc/malloc.h	2008-10-10 20:22:28 UTC (rev 23659)
+++ trunk/uClibc/libc/stdlib/malloc/malloc.h	2008-10-11 08:52:58 UTC (rev 23660)
@@ -221,4 +221,8 @@
 
 
 /* The malloc heap.  */
-extern struct heap __malloc_heap;
+extern struct heap_free_area *__malloc_heap;
+extern malloc_mutex_t __malloc_heap_lock;
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+extern malloc_mutex_t __malloc_mmb_heap_lock;
+#endif

Modified: trunk/uClibc/libc/stdlib/malloc/memalign.c
===================================================================
--- trunk/uClibc/libc/stdlib/malloc/memalign.c	2008-10-10 20:22:28 UTC (rev 23659)
+++ trunk/uClibc/libc/stdlib/malloc/memalign.c	2008-10-11 08:52:58 UTC (rev 23660)
@@ -36,7 +36,7 @@
 {
   void *mem, *base;
   unsigned long tot_addr, tot_end_addr, addr, end_addr;
-  struct heap *heap = &__malloc_heap;
+  struct heap_free_area *heap = __malloc_heap;
 
   /* Make SIZE something we like.  */
   size = HEAP_ADJUST_SIZE (size);

Modified: trunk/uClibc/libc/stdlib/malloc/realloc.c
===================================================================
--- trunk/uClibc/libc/stdlib/malloc/realloc.c	2008-10-10 20:22:28 UTC (rev 23659)
+++ trunk/uClibc/libc/stdlib/malloc/realloc.c	2008-10-11 08:52:58 UTC (rev 23660)
@@ -59,9 +59,9 @@
     {
       size_t extra = new_size - size;
 
-      __heap_lock (&__malloc_heap);
-      extra = __heap_alloc_at (&__malloc_heap, base_mem + size, extra);
-      __heap_unlock (&__malloc_heap);
+      __pthread_mutex_lock (&__malloc_heap_lock);
+      extra = __heap_alloc_at (__malloc_heap, base_mem + size, extra);
+      __pthread_mutex_unlock (&__malloc_heap_lock);
 
       if (extra)
 	/* Record the changed size.  */
@@ -82,9 +82,9 @@
   else if (new_size + MALLOC_REALLOC_MIN_FREE_SIZE <= size)
     /* Shrink the block.  */
     {
-      __heap_lock (&__malloc_heap);
-      __heap_free (&__malloc_heap, base_mem + new_size, size - new_size);
-      __heap_unlock (&__malloc_heap);
+      __pthread_mutex_lock (&__malloc_heap_lock);
+      __heap_free (__malloc_heap, base_mem + new_size, size - new_size);
+      __pthread_mutex_unlock (&__malloc_heap_lock);
       MALLOC_SET_SIZE (base_mem, new_size);
     }
 

Modified: trunk/uClibc/libc/stdlib/malloc-simple/alloc.c
===================================================================
--- trunk/uClibc/libc/stdlib/malloc-simple/alloc.c	2008-10-10 20:22:28 UTC (rev 23659)
+++ trunk/uClibc/libc/stdlib/malloc-simple/alloc.c	2008-10-11 08:52:58 UTC (rev 23660)
@@ -116,7 +116,7 @@
 #ifdef L_memalign
 
 #include <bits/uClibc_mutex.h>
-__UCLIBC_MUTEX_STATIC(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
+__UCLIBC_MUTEX_INIT(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
 #define __MALLOC_LOCK		__UCLIBC_MUTEX_LOCK(__malloc_lock)
 #define __MALLOC_UNLOCK		__UCLIBC_MUTEX_UNLOCK(__malloc_lock)
 

Modified: trunk/uClibc/libpthread/linuxthreads.old/ptfork.c
===================================================================
--- trunk/uClibc/libpthread/linuxthreads.old/ptfork.c	2008-10-10 20:22:28 UTC (rev 23659)
+++ trunk/uClibc/libpthread/linuxthreads.old/ptfork.c	2008-10-11 08:52:58 UTC (rev 23660)
@@ -20,6 +20,7 @@
 
 #ifdef __ARCH_USE_MMU__
 
+#include <bits/uClibc_mutex.h>
 #include <stddef.h>
 #include <stdlib.h>
 #include <unistd.h>
@@ -36,6 +37,16 @@
 static struct handler_list * pthread_atfork_parent = NULL;
 static struct handler_list * pthread_atfork_child = NULL;
 
+#ifdef __MALLOC__
+__UCLIBC_MUTEX_EXTERN(__malloc_heap_lock);
+__UCLIBC_MUTEX_EXTERN(__malloc_sbrk_lock);
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+__UCLIBC_MUTEX_EXTERN(__malloc_mmb_heap_lock);
+#endif
+#elif defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
+__UCLIBC_MUTEX_EXTERN(__malloc_lock);
+#endif
+
 static void pthread_insert_list(struct handler_list ** list,
                                 void (*handler)(void),
                                 struct handler_list * newlist,
@@ -78,6 +89,10 @@
   for (/*nothing*/; list != NULL; list = list->next) (list->handler)();
 }
 
+void __pthread_once_fork_prepare(void);
+void __pthread_once_fork_child(void);
+void __pthread_once_fork_parent(void);
+
 extern __typeof(fork) __libc_fork;
 
 pid_t __fork(void) attribute_hidden;
@@ -90,14 +105,47 @@
   prepare = pthread_atfork_prepare;
   child = pthread_atfork_child;
   parent = pthread_atfork_parent;
-  __pthread_mutex_unlock(&pthread_atfork_lock);
   pthread_call_handlers(prepare);
+
+  __pthread_once_fork_prepare();
+#ifdef __MALLOC__
+  __pthread_mutex_lock(&__malloc_sbrk_lock);
+  __pthread_mutex_lock(&__malloc_heap_lock);
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+  __pthread_mutex_lock(&__malloc_mmb_heap_lock);
+#endif
+#elif defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
+  __pthread_mutex_lock(&__malloc_lock);
+#endif
+
   pid = __libc_fork();
   if (pid == 0) {
+#if defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
+    __libc_lock_init_recursive(__malloc_lock);
+#elif defined(__MALLOC__)
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+    __libc_lock_init_adaptive(__malloc_mmb_heap_lock);
+#endif
+    __libc_lock_init_adaptive(__malloc_heap_lock);
+    __libc_lock_init(__malloc_sbrk_lock);
+#endif
+    __libc_lock_init_adaptive(pthread_atfork_lock);
     __pthread_reset_main_thread();
     __fresetlockfiles();
+    __pthread_once_fork_child();
     pthread_call_handlers(child);
   } else {
+#if defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
+    __pthread_mutex_unlock(&__malloc_lock);
+#elif defined(__MALLOC__)
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+    __pthread_mutex_unlock(&__malloc_mmb_heap_lock);
+#endif
+    __pthread_mutex_unlock(&__malloc_heap_lock);
+    __pthread_mutex_unlock(&__malloc_sbrk_lock);
+#endif
+    __pthread_mutex_unlock(&pthread_atfork_lock);
+    __pthread_once_fork_parent();
     pthread_call_handlers(parent);
   }
   return pid;

Modified: trunk/uClibc/libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h
===================================================================
--- trunk/uClibc/libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h	2008-10-10 20:22:28 UTC (rev 23659)
+++ trunk/uClibc/libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h	2008-10-11 08:52:58 UTC (rev 23660)
@@ -30,7 +30,7 @@
 /* Mutex type.  */
 #if defined(_LIBC) || defined(_IO_MTSAFE_IO)
 typedef pthread_mutex_t __libc_lock_t;
-typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
+typedef pthread_mutex_t __libc_lock_recursive_t;
 # ifdef __USE_UNIX98
 typedef pthread_rwlock_t __libc_rwlock_t;
 # else
@@ -132,15 +132,39 @@
 #define __libc_rwlock_init(NAME) \
   (__libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0));
 
+/* Same as last but this time we initialize an adaptive mutex.  */
+#if defined _LIBC && !defined NOT_IN_libc && defined SHARED
+#define __libc_lock_init_adaptive(NAME) \
+  ({									      \
+    (NAME).__m_count = 0;						      \
+    (NAME).__m_owner = NULL;						      \
+    (NAME).__m_kind = PTHREAD_MUTEX_ADAPTIVE_NP;			      \
+    (NAME).__m_lock.__status = 0;					      \
+    (NAME).__m_lock.__spinlock = __LT_SPINLOCK_INIT;			      \
+    0; })
+#else
+#define __libc_lock_init_adaptive(NAME) \
+  do {									      \
+    if (__pthread_mutex_init != NULL)					      \
+      {									      \
+	pthread_mutexattr_t __attr;					      \
+	__pthread_mutexattr_init (&__attr);				      \
+	__pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_ADAPTIVE_NP);     \
+	__pthread_mutex_init (&(NAME), &__attr);			      \
+	__pthread_mutexattr_destroy (&__attr);				      \
+      }									      \
+  } while (0);
+#endif
+
 /* Same as last but this time we initialize a recursive mutex.  */
 #if defined _LIBC && !defined NOT_IN_libc && defined SHARED
 #define __libc_lock_init_recursive(NAME) \
   ({									      \
-    (NAME).mutex.__m_count = 0;						      \
-    (NAME).mutex.__m_owner = NULL;					      \
-    (NAME).mutex.__m_kind = PTHREAD_MUTEX_RECURSIVE_NP;			      \
-    (NAME).mutex.__m_lock.__status = 0;					      \
-    (NAME).mutex.__m_lock.__spinlock = __LT_SPINLOCK_INIT;		      \
+    (NAME).__m_count = 0;						      \
+    (NAME).__m_owner = NULL;					      \
+    (NAME).__m_kind = PTHREAD_MUTEX_RECURSIVE_NP;			      \
+    (NAME).__m_lock.__status = 0;					      \
+    (NAME).__m_lock.__spinlock = __LT_SPINLOCK_INIT;		      \
     0; })
 #else
 #define __libc_lock_init_recursive(NAME) \
@@ -150,7 +174,7 @@
 	pthread_mutexattr_t __attr;					      \
 	__pthread_mutexattr_init (&__attr);				      \
 	__pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
-	__pthread_mutex_init (&(NAME).mutex, &__attr);			      \
+	__pthread_mutex_init (&(NAME), &__attr);			      \
 	__pthread_mutexattr_destroy (&__attr);				      \
       }									      \
   } while (0);




More information about the uClibc-cvs mailing list