--- a/lib/mm/memlock.c
|
|
+++ b/lib/mm/memlock.c
|
|
@@ -187,12 +187,15 @@ static void _allocate_memory(void)
|
|
* memory on free(), this is good enough for our purposes.
|
|
*/
|
|
while (missing > 0) {
|
|
+#ifdef __GLIBC__
|
|
struct mallinfo inf = mallinfo();
|
|
hblks = inf.hblks;
|
|
+#endif
|
|
|
|
if ((areas[area] = malloc(_size_malloc_tmp)))
|
|
_touch_memory(areas[area], _size_malloc_tmp);
|
|
|
|
+#ifdef __GLIBC__
|
|
inf = mallinfo();
|
|
|
|
if (hblks < inf.hblks) {
|
|
@@ -202,9 +205,12 @@ static void _allocate_memory(void)
|
|
free(areas[area]);
|
|
_size_malloc_tmp /= 2;
|
|
} else {
|
|
+#endif
|
|
++ area;
|
|
missing -= _size_malloc_tmp;
|
|
+#ifdef __GLIBC__
|
|
}
|
|
+#endif
|
|
|
|
if (area == max_areas && missing > 0) {
|
|
/* Too bad. Warn the user and proceed, as things are
|
|
@@ -525,8 +531,13 @@ static void _lock_mem(struct cmd_context
|
|
* will not block memory locked thread
|
|
* Note: assuming _memlock_count_daemon is updated before _memlock_count
|
|
*/
|
|
+#ifdef __GLIBC__
|
|
_use_mlockall = _memlock_count_daemon ? 1 :
|
|
find_config_tree_bool(cmd, activation_use_mlockall_CFG, NULL);
|
|
+#else
|
|
+ /* always use mlockall on musl */
|
|
+ _use_mlockall = 1;
|
|
+#endif
|
|
|
|
if (!_use_mlockall) {
|
|
if (!*_procselfmaps &&
|