vhost: Fix mmap size calculation
I had a bug where a requested size of 1G was resulting in an aligned size of '1G + 2M', resulting in an OOM error. Previous code was adding one huge page size when memory is already aligned. Change-Id: Idd3aa0e9b893fb3efccba6ae1c7161e26d3f9456 Signed-off-by: Pierre Pfister <ppfister@cisco.com>
This commit is contained in:

committed by
Damjan Marion

parent
1985c93bd7
commit
c4352553ee
@ -303,7 +303,7 @@ unmap_all_mem_regions (vhost_user_intf_t * vui)
|
||||
|
||||
ssize_t map_sz = (vui->regions[i].memory_size +
|
||||
vui->regions[i].mmap_offset +
|
||||
page_sz) & ~(page_sz - 1);
|
||||
page_sz - 1) & ~(page_sz - 1);
|
||||
|
||||
r =
|
||||
munmap (vui->region_mmap_addr[i] - vui->regions[i].mmap_offset,
|
||||
@ -928,7 +928,7 @@ vhost_user_socket_read (unix_file_t * uf)
|
||||
/* align size to 2M page */
|
||||
ssize_t map_sz = (vui->regions[i].memory_size +
|
||||
vui->regions[i].mmap_offset +
|
||||
page_sz) & ~(page_sz - 1);
|
||||
page_sz - 1) & ~(page_sz - 1);
|
||||
|
||||
vui->region_mmap_addr[i] = mmap (0, map_sz, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED, fds[i], 0);
|
||||
@ -1179,7 +1179,7 @@ vhost_user_socket_read (unix_file_t * uf)
|
||||
/* align size to 2M page */
|
||||
long page_sz = get_huge_page_size (fd);
|
||||
ssize_t map_sz =
|
||||
(msg.log.size + msg.log.offset + page_sz) & ~(page_sz - 1);
|
||||
(msg.log.size + msg.log.offset + page_sz - 1) & ~(page_sz - 1);
|
||||
|
||||
vui->log_base_addr = mmap (0, map_sz, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED, fd, 0);
|
||||
|
Reference in New Issue
Block a user