Memory mapping
Before using Binder for IPC, user space needs to initialize Binder driver. This process mainly implements the open and mmap operations of Binder driver. mmap map the memory space used by Binder transmission, the size is (1M - 8K), but only for virtual address space mapping, the actual physical memory allocation will be done during data transmission. The source code of mmap is as follows.
static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; struct vm_struct *area; struct binder_proc *proc = filp->private_data; const char *failure_string; struct binder_buffer *buffer; if (proc->tsk != current) return -EINVAL; // Mapping space cannot be greater than 4M if ((vma->vm_end - vma->vm_start) > SZ_4M) vma->vm_end = vma->vm_start + SZ_4M; ...... // The subprocess of fork cannot copy the mapping space and is not allowed to modify the write property vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; ...... // Get kernel virtual address space area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); ...... proc->buffer = area->addr; proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; mutex_unlock(&binder_mmap_lock); ...... // Create physical page structure proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); ...... proc->buffer_size = vma->vm_end - vma->vm_start; vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; // Assign a physical page and map to the virtual address space if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { ...... // Create the buffers list and insert the first free buffer buffer = proc->buffer; INIT_LIST_HEAD(&proc->buffers); list_add(&buffer->entry, &proc->buffers); buffer->free = 1; binder_insert_free_buffer(proc, buffer); // Asynchronous transfer free space size set to half of map size proc->free_async_space = proc->buffer_size / 2; barrier(); proc->files = get_files_struct(current); proc->vma = vma; proc->vma_vm_mm = vma->vm_mm; ...... }
A few details about it.
- The kernel space limits the size of mmap to no more than 4M, while the user space in Android is limited to (1M - 8K). So theoretically, the user space can be modified to a maximum of 4M.
- mmap allocates a page for mapping to store the first binder_buffer. At this time, there is only one free buffer in the whole Binder memory. Another function of mapping is to verify the effectiveness of memory management during initialization and terminate in time if there is a problem.
- The data structure binder_buffer is also stored in mmap space, so the actual data size that Binder can transmit is not equal to the mapped space size.
- After mmap is completed, there is only one node on the proc - > buffers queue, pointing to proc - > buffer, and the size is the entire map space. There is only one node in the proc - > free buffer tree, and the proc - > allocated buffers tree is empty.
The allocation of physical memory and the mapping of virtual address space are carried out through binder update page range(). This function is also used for the recovery of physical memory, which is distinguished by the parameter allocate.
static int binder_update_page_range(struct binder_proc *proc, int allocate, void *start, void *end, struct vm_area_struct *vma) { void *page_addr; unsigned long user_page_addr; struct vm_struct tmp_area; struct page **page; struct mm_struct *mm; ...... // vma is not NULL only when mmap is used. In other cases, memory related data will be obtained according to proc. if (vma) mm = NULL; else // Get the memory descriptor and increase the user count to prevent mm struct from being released mm = get_task_mm(proc->tsk); if (mm) { down_write(&mm->mmap_sem); vma = proc->vma; if (vma && mm != proc->vma_vm_mm) { pr_err("%d: vma mm and task mm mismatch\n", proc->pid); vma = NULL; } } // allocate is 0 when reclaiming memory if (allocate == 0) goto free_range; ...... // Cycle physical pages one at a time for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { int ret; struct page **page_array_ptr; page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; BUG_ON(*page); // Assign a physical page and save the address to proc *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); ...... tmp_area.addr = page_addr; tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; page_array_ptr = page; // Mapping page tables to physical pages ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); ...... user_page_addr = (uintptr_t)page_addr + proc->user_buffer_offset; // Insert physical page into user virtual address space ret = vm_insert_page(vma, user_page_addr, page[0]); ...... } if (mm) { up_write(&mm->mmap_sem); // Reduce the user count of memory descriptors mmput(mm); } return 0; free_range: for (page_addr = end - PAGE_SIZE; page_addr >= start; page_addr -= PAGE_SIZE) { page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; if (vma) // Unmapping virtual address space and physical page zap_page_range(vma, (uintptr_t)page_addr + proc->user_buffer_offset, PAGE_SIZE, NULL); err_vm_insert_page_failed: // Unmapping physical page and kernel page table unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); err_map_kernel_failed: // Release physical page __free_page(*page); *page = NULL; err_alloc_page_failed: ; } ........ }
memory allocation
The Binder memory allocation function is Binder ﹐ alloc ﹐ buf(), which can be viewed directly from the source code.
static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, size_t data_size, size_t offsets_size, int is_async) { struct rb_node *n = proc->free_buffers.rb_node; struct binder_buffer *buffer; size_t buffer_size; struct rb_node *best_fit = NULL; void *has_page_addr; void *end_page_addr; size_t size; ...... // size aligned by pointer bytes size = ALIGN(data_size, sizeof(void *)) + ALIGN(offsets_size, sizeof(void *)); ...... // Find the node matching the size on free_buffers. After the loop ends, if n==NULL // Indicates that there is no matching node, then the closest node is the one larger than size. while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(!buffer->free); buffer_size = binder_buffer_size(proc, buffer); if (size < buffer_size) { best_fit = n; n = n->rb_left; } else if (size > buffer_size) n = n->rb_right; else { best_fit = n; break; } } ...... // No matching nodes are found in free_buffers. Use the best_fit node to allocate if (n == NULL) { buffer = rb_entry(best_fit, struct binder_buffer, rb_node); buffer_size = binder_buffer_size(proc, buffer); } // Start address of the last page of the best fit node has_page_addr = (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); // The memory size of the best fit node. If the allocated size is not enough to create a new buffer, it will not be split. if (n == NULL) { if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) buffer_size = size; /* no room for other buffers */ else buffer_size = size + sizeof(struct binder_buffer); } // End address of the last page where space needs to be allocated end_page_addr = (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); // The end page > has page indicates that the end address of the space to be allocated is on the last page of the best fit node. // This is to modify the end address of the allocation to the hasou page, because the last page has been mapped. if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; // Assign physical pages if (binder_update_page_range(proc, 1, (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) return NULL; // Erase best fit from free buffers and insert the new buffer into allocated buffers. rb_erase(best_fit, &proc->free_buffers); buffer->free = 0; binder_insert_allocated_buffer(proc, buffer); // Extra space to split, insert free_buffers if (buffer_size != size) { struct binder_buffer *new_buffer = (void *)buffer->data + size; list_add(&new_buffer->entry, &buffer->entry); new_buffer->free = 1; binder_insert_free_buffer(proc, new_buffer); } ...... buffer->data_size = data_size; buffer->offsets_size = offsets_size; buffer->async_transaction = is_async; ...... return buffer; }
The distribution process focuses on the calculation of the starting address. Use if you can find a matching buffer in the free_buffers tree. If no matching buffer is found, the closest buffer larger than the required size is used. Since there will not be two consecutive free buffer s on the proc - > buffers queue (they will be merged when the buffer is released), the allocated situation should be considered when calculating the starting address. When the starting address of demand allocation is on an allocated buffer, there is no need to apply for this page.
- Physical memory is allocated by page.
- There will not be two consecutive free buffer s on proc - > buffers.
- The assigned starting address is aligned down by page, page "align ((uintptr? T) buffer - > data).
- If the end address assigned is on the same page as the start address of the next buffer, it will be aligned up by page (((uintptr? Buffer - > data + buffer? Size) & page? Mask).
- If the assigned end address is on an idle page, the page is aligned down, Page > align ((uintptr? Buffer - > data + buffer? Size).
Describe the changes in Binder's memory with a graph.
Memory recycling
Also take a look at the source code of binder ﹣ free ﹣ buf().
static void binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) { size_t size, buffer_size; // Get the size of the buffer to release buffer_size = binder_buffer_size(proc, buffer); // size aligned by pointer bytes size = ALIGN(buffer->data_size, sizeof(void *)) + ALIGN(buffer->offsets_size, sizeof(void *)); ...... // Release physical page binder_update_page_range(proc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), NULL); // Erase the released buffer from the allocated buffers tree rb_erase(&buffer->rb_node, &proc->allocated_buffers); buffer->free = 1; // If the next buffer in proc - > buffers is free, it will be merged into the buffer being released. if (!list_is_last(&buffer->entry, &proc->buffers)) { struct binder_buffer *next = list_entry(buffer->entry.next, struct binder_buffer, entry); if (next->free) { rb_erase(&next->rb_node, &proc->free_buffers); binder_delete_free_buffer(proc, next); } } // If the previous buffer in proc - > buffers is free, the released buffer will be merged into the previous buffer. if (proc->buffers.next != &buffer->entry) { struct binder_buffer *prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); if (prev->free) { binder_delete_free_buffer(proc, buffer); rb_erase(&prev->rb_node, &proc->free_buffers); buffer = prev; } } // Insert the processed buffer into the free buffers tree binder_insert_free_buffer(proc, buffer); }
In the process of memory reclaiming, we need to pay attention to the calculation of boundary address. Similar to allocation, we need to consider that the released buffer is on the same page as other buffers.
- The start address of the release is aligned down page by page, page ﹣ align ((uintptr ﹣ T) buffer - > data.
- The end address of the release is aligned up by page, ((uintptr? Buffer - > data + buffer? Size) & page? Mask).
When the memory is recycled, it is also necessary to merge the connected free buffer s. After finding the merged buffers, use binder [delete] free [buffer() to delete the next buffer.
static void binder_delete_free_buffer(struct binder_proc *proc, struct binder_buffer *buffer) { struct binder_buffer *prev, *next = NULL; int free_page_end = 1; int free_page_start = 1; // Get the previous buffer of the deleted buffer prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); // If the deleted buffer and the previous buffer will share the same physical page, the start page will not be deleted, and set free page start to 0. if (buffer_end_page(prev) == buffer_start_page(buffer)) { free_page_start = 0; // If the deleted buffer end page is also on this page, the physical page will not be deleted, and the free page end is also set to 0. if (buffer_end_page(prev) == buffer_end_page(buffer)) free_page_end = 0; ...... } if (!list_is_last(&buffer->entry, &proc->buffers)) { // Get the next buffer of the deleted buffer next = list_entry(buffer->entry.next, struct binder_buffer, entry); // If the deleted buffer and the next buffer will share the same physical page, the end page will not be deleted if (buffer_start_page(next) == buffer_end_page(buffer)) { free_page_end = 0; // If the deleted buffer start page is also on this page, the physical page will not be deleted if (buffer_start_page(next) == buffer_start_page(buffer)) free_page_start = 0; ...... } // Remove buffer from proc - > buffers list list_del(&buffer->entry); if (free_page_start || free_page_end) { ...... // Release the physical page. Refer to free page start and free page end. binder_update_page_range(proc, 0, free_page_start ? buffer_start_page(buffer) : buffer_end_page(buffer), (free_page_end ? buffer_end_page(buffer) : buffer_start_page(buffer)) + PAGE_SIZE, NULL); } }
Finally, the graph is used to show the change of buffer during memory recovery.