8 typedef struct Range Range
;
9 typedef struct Area Area
;
10 typedef struct Filemap Filemap
;
11 typedef struct Futex Futex
;
12 typedef struct Seg Seg
;
13 typedef struct Space Space
;
15 /* keep in order, lowest base address first */
24 static char *segname
[SEGMAX
] = { "data", "private", "shared", "stack" };
61 Area
*next
; /* next higher area */
62 Area
*prev
; /* previous lower area */
63 Seg
*seg
; /* segment we belong to */
77 ulong limit
; /* maximum address this segment can grow */
79 Area
*areas
; /* orderd by address */
81 int type
; /* SEGDATA, SEGSHARED, SEGPRIVATE, SEGSTACK */
105 panic("kmalloc: out of memory");
106 setmalloctag(p
, getcallerpc(&size
));
110 krealloc(void *ptr
, int size
)
114 p
= realloc(ptr
, size
);
117 panic("krealloc: out of memory");
118 setmalloctag(p
, getcallerpc(&ptr
));
124 kmallocz(int size
, int zero
)
128 p
= mallocz(size
, zero
);
130 panic("kmallocz: out of memory");
131 setmalloctag(p
, getcallerpc(&size
));
145 setmalloctag(p
, getcallerpc(&s
));
150 ksmprint(char *fmt
, ...)
159 n
= vsnprint(p
, n
, fmt
, args
);
161 if((p
= realloc(p
, n
+1)) == nil
)
162 panic("ksmprint: out of memory");
163 setmalloctag(p
, getcallerpc(&fmt
));
168 pagealign(ulong addr
)
173 return (addr
+ m
) & ~m
;
177 syncarea(Area
*a
, Range r
)
179 if(a
->filemap
== nil
)
181 if(a
->filemap
->file
== nil
)
183 if((a
->prot
& PROT_WRITE
) == 0)
186 if(r
.base
< a
->addr
.base
)
187 r
.base
= a
->addr
.base
;
188 if(r
.top
> a
->addr
.top
)
190 if(r
.base
< a
->filemap
->addr
.base
)
191 r
.base
= a
->filemap
->addr
.base
;
192 if(r
.top
> a
->filemap
->addr
.top
)
193 r
.top
= a
->filemap
->addr
.top
;
194 pwritefile(a
->filemap
->file
, (void*)r
.base
, r
.top
- r
.base
,
195 (r
.base
- a
->filemap
->addr
.base
) + a
->filemap
->offset
);
199 linkarea(Seg
*seg
, Area
*a
)
207 for(p
= seg
->areas
; p
&& p
->next
; p
=p
->next
)
208 if(p
->addr
.base
> a
->addr
.base
)
211 if(p
->addr
.base
> a
->addr
.base
){
213 if(a
->prev
= p
->prev
)
230 if(r
= a
->seg
->freearea
){
231 a
->seg
->freearea
= r
->next
;
233 r
= kmalloc(sizeof(Area
));
240 if(r
->filemap
= a
->filemap
)
255 syncarea(a
, a
->addr
);
260 f
->next
= seg
->freefilemap
;
261 seg
->freefilemap
= f
;
265 if(a
->futex
= x
->next
)
266 x
->next
->link
= &a
->futex
;
272 if(seg
->areas
= a
->next
)
275 if(a
->prev
->next
= a
->next
)
276 a
->next
->prev
= a
->prev
;
279 a
->next
= seg
->freearea
;
284 allocseg(int type
, Range addr
, ulong limit
, int attr
, char *class)
289 trace("allocseg(): segattach %s segment %lux-%lux", segname
[type
], addr
.base
, addr
.top
);
290 if(segattach(attr
, class, (void*)addr
.base
, addr
.top
- addr
.base
) != (void*)addr
.base
)
291 panic("allocseg: segattach %s segment: %r", segname
[type
]);
294 seg
= kmallocz(sizeof(Seg
), 1);
304 dupseg(Seg
*old
, int copy
)
315 new = allocseg(old
->type
, old
->addr
, old
->limit
, 0, nil
);
317 for(a
=old
->areas
; a
; a
=a
->next
){
332 getspace(Space
*old
, int copy
)
343 new = kmallocz(sizeof(Space
), 1);
347 for(t
=0; t
<SEGMAX
; t
++){
348 if(seg
= old
->seg
[t
]){
350 new->seg
[t
] = dupseg(seg
, t
!= SEGSHARED
);
361 putspace(Space
*space
)
372 for(t
=0; t
<SEGMAX
; t
++){
373 if(seg
= space
->seg
[t
]){
374 addr
= (void*)seg
->addr
.base
;
377 /* mark all areas as free */
378 while(a
= seg
->areas
)
381 /* clear the free lists */
382 while(a
= seg
->freearea
){
383 seg
->freearea
= a
->next
;
386 while(f
= seg
->freefilemap
){
387 seg
->freefilemap
= f
->next
;
390 while(x
= seg
->freefutex
){
391 seg
->freefutex
= x
->next
;
396 if(segdetach(addr
) < 0)
397 panic("putspace: segdetach %s segment: %r", segname
[t
]);
404 canmerge(Area
*a
, Area
*b
)
406 return a
->filemap
==nil
&&
416 if(a
->prev
&& a
->prev
->addr
.top
== a
->addr
.base
&& canmerge(a
->prev
, a
)){
417 a
->addr
.base
= a
->prev
->addr
.base
;
420 if(a
->next
&& a
->next
->addr
.base
== a
->addr
.top
&& canmerge(a
->next
, a
)){
421 a
->addr
.top
= a
->next
->addr
.top
;
427 findhole(Seg
*seg
, Range
*r
, int fixed
)
435 z
= r
->top
- r
->base
;
437 h
.base
= seg
->addr
.base
;
440 if((h
.top
= a
? a
->addr
.base
: seg
->addr
.top
) > h
.base
) {
444 if((r
->base
>= h
.base
) && (r
->top
<= h
.top
))
448 if((hz
>= z
) && (hz
< m
)) {
458 h
.base
= a
->addr
.top
;
461 if(!fixed
&& (m
!= ~0))
469 /* wake up all futexes in range and unlink from area */
471 wakefutexarea(Area
*a
, Range addr
)
475 for(fu
= a
->futex
; fu
; fu
= x
){
477 if((ulong
)fu
->addr
>= addr
.base
&& (ulong
)fu
->addr
< addr
.top
){
483 trace("wakefutexarea: fu=%p addr=%p", fu
, fu
->addr
);
490 makehole(Seg
*seg
, Range r
)
495 for(a
= seg
->areas
; a
; a
= x
){
498 if(a
->addr
.top
<= r
.base
)
500 if(a
->addr
.base
>= r
.top
)
504 if(f
.base
< a
->addr
.base
)
505 f
.base
= a
->addr
.base
;
506 if(f
.top
> a
->addr
.top
)
510 if(f
.base
== a
->addr
.base
){
511 if(f
.top
== a
->addr
.top
){
514 a
->addr
.base
= f
.top
;
516 } else if(f
.top
== a
->addr
.top
){
517 a
->addr
.top
= f
.base
;
520 b
->addr
.base
= f
.top
;
522 a
->addr
.top
= f
.base
;
526 if(segfree((void*)f
.base
, f
.top
- f
.base
) < 0)
527 panic("makehole: segfree %s segment: %r", segname
[seg
->type
]);
532 addr2seg(Space
*space
, ulong addr
)
537 for(t
=0; t
<SEGMAX
; t
++){
538 if((seg
= space
->seg
[t
]) == nil
)
541 if((addr
>= seg
->addr
.base
) && (addr
< seg
->addr
.top
))
550 addr2area(Seg
*seg
, ulong addr
)
554 for(a
=seg
->areas
; a
; a
=a
->next
)
555 if((addr
>= a
->addr
.base
) && (addr
< a
->addr
.top
))
561 okaddr(void *ptr
, int len
, int write
)
573 if(space
= current
->mem
){
575 if(seg
= addr2seg(space
, addr
)){
576 while(a
= addr2area(seg
, addr
)){
578 if((a
->prot
& PROT_WRITE
) == 0)
581 if((a
->prot
& PROT_READ
) == 0)
584 if((ulong
)ptr
+ len
<= a
->addr
.top
){
595 trace("okaddr(%lux-%lux, %d) -> %d", addr
, addr
+len
, write
, ok
);
600 unmapspace(Space
*space
, Range r
)
605 for(t
=0; t
<SEGMAX
; t
++){
606 if((seg
= space
->seg
[t
]) == nil
)
609 if(seg
->addr
.base
>= r
.top
){
613 if(seg
->addr
.top
> r
.base
)
620 mapspace(Space
*space
, Range r
, int flags
, int prot
, int *perr
)
627 if(flags
& MAP_PRIVATE
){
628 if(r
.base
>= space
->seg
[SEGSTACK
]->addr
.base
){
630 } else if(r
.base
>= space
->seg
[SEGDATA
]->addr
.base
&&
631 r
.base
< space
->seg
[SEGDATA
]->limit
){
640 if((seg
= space
->seg
[t
]) == nil
)
644 if((r
.base
>= seg
->addr
.base
) && (r
.top
<= seg
->limit
)){
645 if(r
.base
>= seg
->addr
.top
)
649 if(f
.top
> seg
->addr
.top
)
650 f
.top
= seg
->addr
.top
;
651 if(findhole(seg
, &f
, 1))
653 if(flags
& MAP_FIXED
){
654 if(seg
->type
== SEGSHARED
){
655 trace("mapspace(): cant make hole %lux-%lux in shared segment",
664 if(flags
& MAP_FIXED
){
665 trace("mapspace(): no free hole for fixed mapping %lux-%lux in %s segment",
666 r
.base
, r
.top
, segname
[seg
->type
]);
670 if(findhole(seg
, &r
, 0))
674 r
.base
= seg
->addr
.top
;
678 trace("mapspace(): addr %lux-%lux", r
.base
, r
.top
);
680 if(r
.top
> seg
->addr
.top
){
681 if(r
.top
> seg
->limit
){
682 trace("mapspace(): area top %lux over %s segment limit %lux",
683 r
.top
, segname
[seg
->type
], seg
->limit
);
686 trace("mapspace(): segbrk %s segment %lux-%lux -> %lux",
687 segname
[seg
->type
], seg
->addr
.base
, seg
->addr
.top
, r
.top
);
688 if(segbrk((void*)seg
->addr
.base
, (void*)r
.top
) == (void*)-1){
689 trace("mapspace(): segbrk failed: %r");
692 seg
->addr
.top
= r
.top
;
695 if(a
= seg
->freearea
){
696 seg
->freearea
= a
->next
;
698 a
= kmalloc(sizeof(Area
));
707 /* keep seg locked */
713 if(perr
) *perr
= -ENOMEM
;
718 brkspace(Space
*space
, ulong bk
)
725 if((seg
= space
->seg
[SEGDATA
]) == nil
)
729 if(space
->brk
< seg
->addr
.base
)
730 space
->brk
= seg
->addr
.top
;
732 if(bk
< seg
->addr
.base
)
735 old
= pagealign(space
->brk
);
745 unmapspace(space
, r
);
750 trace("brkspace(): new mapping %lux-%lux", r
.base
, r
.top
);
751 for(a
= addr2area(seg
, old
- PAGESIZE
); a
; a
= a
->next
){
752 if(a
->addr
.top
<= r
.base
)
754 if(a
->addr
.base
> r
.top
+ PAGESIZE
)
757 trace("brkspace(): mapping %lux-%lux is in the way", a
->addr
.base
, a
->addr
.top
);
763 a
= mapspace(space
, r
,
764 MAP_ANONYMOUS
|MAP_PRIVATE
|MAP_FIXED
,
765 PROT_READ
|PROT_WRITE
|PROT_EXEC
, nil
);
775 if(space
->brk
!= bk
){
776 trace("brkspace: set new brk %lux", bk
);
788 remapspace(Space
*space
, ulong addr
, ulong oldlen
, ulong newlen
, ulong newaddr
, int flags
)
795 if(pagealign(addr
) != addr
)
798 oldlen
= pagealign(oldlen
);
799 newlen
= pagealign(newlen
);
801 if((addr
+ oldlen
) < addr
)
803 if((addr
+ newlen
) <= addr
)
807 if(flags
& MREMAP_FIXED
){
808 if(pagealign(newaddr
) != newaddr
)
810 if((flags
& MREMAP_MAYMOVE
) == 0)
812 if((newaddr
<= addr
) && ((newaddr
+newlen
) > addr
))
814 if((addr
<= newaddr
) && ((addr
+oldlen
) > newaddr
))
816 move
= (newaddr
!= addr
);
820 r
.base
= addr
+ newlen
;
821 r
.top
= addr
+ oldlen
;
823 unmapspace(space
, r
);
828 if((newlen
== oldlen
) && !move
)
831 if((seg
= addr2seg(space
, addr
)) == nil
)
834 if((a
= addr2area(seg
, addr
)) == nil
)
836 if(a
->addr
.top
< (addr
+ oldlen
))
841 if((addr
+ oldlen
) != a
->addr
.top
)
843 if((addr
+ newlen
) > seg
->limit
)
846 if((addr
+ newlen
) > a
->next
->addr
.base
)
849 if((addr
+ newlen
) > seg
->addr
.top
){
850 trace("remapspace(): segbrk %s segment %lux-%lux -> %lux",
851 segname
[seg
->type
], seg
->addr
.base
, seg
->addr
.top
, (addr
+ newlen
));
852 if(segbrk((void*)seg
->addr
.base
, (void*)(addr
+ newlen
)) == (void*)-1){
853 trace("remapspace(): segbrk: %r");
857 seg
->addr
.top
= (addr
+ newlen
);
859 a
->addr
.top
= (addr
+ newlen
);
866 trace("remapspace(): domove not implemented");
878 syncspace(Space
*space
, Range r
)
883 if(seg
= addr2seg(space
, r
.base
)){
884 for(a
= addr2area(seg
, r
.base
); a
; a
=a
->next
){
885 if(r
.base
>= a
->addr
.top
)
899 space
= current
->mem
;
900 a
= space
->seg
[SEGSTACK
]->addr
.top
;
901 size
= pagealign(size
);
902 a
= sys_mmap(a
- size
, size
,
903 PROT_READ
|PROT_WRITE
,
904 MAP_PRIVATE
|MAP_ANONYMOUS
|MAP_FIXED
, -1, 0);
908 return (void*)(a
+ size
);
919 space
= current
->mem
;
920 base
= pagealign(base
);
921 top
= space
->seg
[SEGSTACK
]->addr
.base
- PAGESIZE
;
923 for(t
=0; t
<SEGMAX
; t
++){
924 if(space
->seg
[t
] == nil
){
930 r
.base
= base
+ 0x10000000;
933 r
.base
= top
- 0x10000000;
936 r
.top
= r
.base
+ PAGESIZE
;
937 space
->seg
[t
] = allocseg(t
, r
, r
.top
, 0, (t
== SEGSHARED
) ? "shared" : "memory");
939 if(t
> 0 && space
->seg
[t
-1])
940 space
->seg
[t
-1]->limit
= space
->seg
[t
]->addr
.base
- PAGESIZE
;
945 * unmapuserspace is called from kprocfork to get rid of
946 * the linux memory segments used by the calling process
947 * before current is set to zero. we just segdetach() all that
948 * segments but keep the data structures valid for the calling
958 space
= current
->mem
;
960 for(t
=0; t
<SEGMAX
; t
++){
961 if((seg
= space
->seg
[t
]) == nil
)
963 if(segdetach((void*)seg
->addr
.base
) < 0)
964 panic("unmapuserspace: segdetach %s segment: %r", segname
[seg
->type
]);
970 * we write segment out into a file, detach it and reattach
971 * a new one and reading contents back. i'm surprised that
972 * this even works seamless with the Plan9 Bss! :-)
975 convertseg(Range r
, ulong attr
, char *class)
983 snprint(name
, sizeof(name
), "/tmp/seg%s%d", class, getpid());
984 fd
= create(name
, ORDWR
|ORCLOSE
, 0600);
986 panic("convertseg: cant create %s: %r", name
);
988 len
= r
.top
- r
.base
;
991 n
= write(fd
, (void*)r
.base
, len
);
993 panic("convertseg: write: %r");
996 /* copy string to stack because its memory gets detached :-) */
997 strncpy(name
, class, sizeof(name
));
999 trace("detaching %lux-%lux", r
.base
, r
.top
);
1001 /* point of no return */
1002 if(segdetach((void*)r
.base
) < 0)
1003 panic("convertseg: segdetach: %r");
1004 if(segattach(attr
, name
, (void*)r
.base
, len
) != (void*)r
.base
)
1010 * we use pread directly to avoid hitting profiling code until
1011 * data segment is read back again. pread is unprofiled syscall
1014 n
= pread(fd
, (void*)(r
.base
+ p
), len
- p
, (vlong
)p
);
1020 /* anything normal again */
1021 trace("segment %lux-%lux reattached as %s", r
.base
, r
.top
, class);
1034 static int firsttime
= 1;
1036 space
= kmallocz(sizeof(Space
), 1);
1039 snprint(buf
, sizeof(buf
), "/proc/%d/segment", getpid());
1040 if((fd
= open(buf
, OREAD
)) < 0)
1041 panic("initspace: cant open %s: %r", buf
);
1043 n
= 10 + 9 + 9 + 4 + 1;
1045 while(readn(fd
, buf
, n
)==n
){
1054 r
.base
= strtoul(&buf
[9], nil
, 16);
1055 r
.top
= strtoul(&buf
[19], nil
, 16);
1057 trace("initspace(): %s %lux-%lux", name
, r
.base
, r
.top
);
1061 * convert Plan9 data+bss segments into shared segments so
1062 * that the memory of emulator data structures gets shared across
1063 * all processes. This only happens if initspace() is called the first time.
1065 if(strstr(name
, "Data")==name
)
1066 convertseg(r
, 0, "shared");
1067 if(strstr(name
, "Bss")==name
)
1068 convertseg(r
, 0, "shared");
1071 if(strstr(name
, "Stack")==name
){
1072 x
.top
= r
.base
- PAGESIZE
;
1073 x
.base
= x
.top
- pagealign((MAXPROC
/ 4) * USTACK
);
1082 /* allocate the linux stack */
1083 space
->seg
[SEGSTACK
] = allocseg(SEGSTACK
, x
, x
.top
, 0, "memory");
1085 current
->mem
= space
;
1092 if(space
= current
->mem
){
1098 void clonemem(Uproc
*new, int copy
)
1102 if((space
= current
->mem
) == nil
){
1106 new->mem
= getspace(space
, copy
);
1109 ulong
procmemstat(Uproc
*proc
, ulong
*pdat
, ulong
*plib
, ulong
*pshr
, ulong
*pstk
, ulong
*pexe
)
1121 if((space
= proc
->mem
) == nil
)
1126 for(i
=0; i
<SEGMAX
; i
++){
1129 if((seg
= space
->seg
[i
]) == nil
)
1132 for(a
= seg
->areas
; a
; a
= a
->next
){
1133 z
= a
->addr
.top
- a
->addr
.base
;
1151 if(pexe
&& (a
->prot
& PROT_EXEC
))
1162 struct linux_mmap_args
{
1172 sys_linux_mmap(void *a
)
1174 struct linux_mmap_args
*p
= a
;
1176 if(pagealign(p
->offset
) != p
->offset
)
1185 p
->offset
/ PAGESIZE
);
1189 sys_mmap(ulong addr
, ulong len
, int prot
, int flags
, int fd
, ulong pgoff
)
1200 trace("sys_mmap(%lux, %lux, %d, %d, %d, %lux)", addr
, len
, prot
, flags
, fd
, pgoff
);
1202 if(pagealign(addr
) != addr
)
1203 return (ulong
)-EINVAL
;
1206 r
.top
= addr
+ pagealign(len
);
1208 return (ulong
)-EINVAL
;
1211 if((flags
& MAP_ANONYMOUS
)==0)
1212 if((file
= fdgetfile(fd
))==nil
)
1213 return (ulong
)-EBADF
;
1215 space
= current
->mem
;
1217 if((a
= mapspace(space
, r
, flags
, prot
, &e
)) == nil
){
1226 if(flags
& MAP_ANONYMOUS
){
1234 o
= pgoff
* PAGESIZE
;
1236 if(f
= seg
->freefilemap
)
1237 seg
->freefilemap
= f
->next
;
1239 f
= kmalloc(sizeof(Filemap
));
1243 f
->path
= kstrdup(file
->path
);
1245 if((f
->mode
= file
->mode
) != O_RDONLY
){
1246 f
->file
= getfile(file
);
1254 trace("map %s [%lux-%lux] at [%lux-%lux]", file
->path
, o
, o
+ (r
.top
- r
.base
), r
.base
, r
.top
);
1257 while(addr
< r
.top
){
1258 n
= preadfile(file
, (void*)addr
, r
.top
- addr
, o
);
1262 trace("read failed at offset %lux for address %lux failed: %r", o
, addr
);
1274 int sys_munmap(ulong addr
, ulong len
)
1279 trace("sys_munmap(%lux, %lux)", addr
, len
);
1281 if(pagealign(addr
) != addr
)
1284 r
.top
= addr
+ pagealign(len
);
1288 space
= current
->mem
;
1290 unmapspace(current
->mem
, r
);
1302 trace("sys_brk(%lux)", bk
);
1304 space
= current
->mem
;
1306 a
= brkspace(space
, bk
);
1312 int sys_mprotect(ulong addr
, ulong len
, int prot
)
1319 trace("sys_mprotect(%lux, %lux, %lux)", addr
, len
, (ulong
)prot
);
1321 len
= pagealign(len
);
1322 if(pagealign(addr
) != addr
)
1328 space
= current
->mem
;
1330 if(seg
= addr2seg(space
, addr
)){
1331 for(a
= addr2area(seg
, addr
); a
!=nil
; a
=a
->next
){
1332 if(addr
+ len
<= a
->addr
.base
)
1337 wakefutexarea(a
, a
->addr
);
1338 if(a
->addr
.base
< addr
){
1340 a
->addr
.base
= addr
;
1344 if(a
->addr
.top
> addr
+ len
){
1346 a
->addr
.top
= addr
+ len
;
1347 b
->addr
.base
= addr
+ len
;
1350 trace("%lux-%lux %lux -> %lux", a
->addr
.base
, a
->addr
.top
, (ulong
)a
->prot
, (long)prot
);
1360 int sys_msync(ulong addr
, ulong len
, int flags
)
1365 trace("sys_msync(%lux, %lux, %x)", addr
, len
, flags
);
1367 if(pagealign(addr
) != addr
)
1370 r
.top
= addr
+ pagealign(len
);
1374 space
= current
->mem
;
1376 syncspace(space
, r
);
1383 sys_mremap(ulong addr
, ulong oldlen
, ulong newlen
, int flags
, ulong newaddr
)
1388 trace("sys_mremap(%lux, %lux, %lux, %x, %lux)",
1389 addr
, oldlen
, newlen
, flags
, newaddr
);
1391 space
= current
->mem
;
1393 r
= remapspace(space
, addr
, oldlen
, newlen
, newaddr
, flags
);
1407 int sys_futex(ulong
*addr
, int op
, int val
, void *ptime
, ulong
*addr2
, int val3
)
1416 trace("sys_futex(%p, %d, %d, %p, %p, %d)", addr
, op
, val
, ptime
, addr2
, val3
);
1420 if((space
= current
->mem
) == 0)
1424 if((seg
= addr2seg(space
, (ulong
)addr
)) == nil
){
1429 if((a
= addr2area(seg
, (ulong
)addr
)) == nil
)
1431 for(fu
= a
->futex
; fu
; fu
= fu
->next
)
1432 if(fu
->addr
== addr
)
1437 trace("sys_futex(): FUTEX_WAIT futex=%p addr=%p", fu
, addr
);
1440 if(fu
= seg
->freefutex
){
1441 seg
->freefutex
= fu
->next
;
1443 fu
= kmallocz(sizeof(Futex
), 1);
1447 if(fu
->next
= a
->futex
)
1448 fu
->next
->link
= &fu
->next
;
1449 fu
->link
= &a
->futex
;
1458 struct linux_timespec
*ts
= ptime
;
1463 if(current
->restart
->syscall
){
1464 timeout
= current
->restart
->futex
.timeout
;
1466 timeout
= now
+ (vlong
)ts
->tv_sec
* 1000000000LL + ts
->tv_nsec
;
1469 current
->timeout
= timeout
;
1479 err
= sleepq(fu
, seg
, 1);
1483 current
->timeout
= 0;
1486 if(err
== -ERESTART
)
1487 current
->restart
->futex
.timeout
= timeout
;
1491 if(*fu
->link
= fu
->next
)
1492 fu
->next
->link
= fu
->link
;
1496 fu
->next
= seg
->freefutex
;
1497 seg
->freefutex
= fu
;
1502 trace("sys_futex(): FUTEX_WAKE futex=%p addr=%p", fu
, addr
);
1503 err
= fu
? wakeq(fu
, val
< 0 ? 0 : val
) : 0;
1506 case FUTEX_CMP_REQUEUE
:
1507 trace("sys_futex(): FUTEX_CMP_REQUEUE futex=%p addr=%p", fu
, addr
);
1512 trace("sys_futex(): FUTEX_REQUEUE futex=%p addr=%p", fu
, addr
);
1514 err
= fu
? wakeq(fu
, val
< 0 ? 0 : val
) : 0;
1518 /* BUG: fu2 has to be in the same segment as fu */
1519 if(a
= addr2area(seg
, (ulong
)addr2
)){
1520 for(fu2
= a
->futex
; fu2
; fu2
= fu2
->next
){
1521 if(fu2
->addr
== addr2
){
1522 err
+= requeue(fu
, fu2
, val2
);