欢迎您访问 最编程 本站为您分享编程语言代码,编程技术文章!
您现在的位置是: 首页

Android Framework原理 -- Binder驱动源码分析-4.2 binder_mmap

最编程 2024-08-13 20:21:40
...

binder_mmap,我们之前简单介绍过mmap的原理,那么这里我们看下,Binder驱动内部是如何做的

static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
   int ret;
   //内核空间
   struct vm_struct *area;
   //当前进程信息
   struct binder_proc *proc = filp->private_data;
   const char *failure_string;
   struct binder_buffer *buffer;

   if (proc->tsk != current)
      return -EINVAL;
   //①
   if ((vma->vm_end - vma->vm_start) > SZ_4M)
      vma->vm_end = vma->vm_start + SZ_4M;

   mutex_lock(&binder_mmap_lock);
   if (proc->buffer) {
      ret = -EBUSY;
      failure_string = "already mapped";
      goto err_already_mapped;
   }
   ......
   //②
   area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
   if (area == NULL) {
      ret = -ENOMEM;
      failure_string = "get_vm_area";
      goto err_get_vm_area_failed;
   }
   proc->buffer = area->addr;
   proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
   mutex_unlock(&binder_mmap_lock);

#ifdef CONFIG_CPU_CACHE_VIPT
   if (cache_is_vipt_aliasing()) {
      while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
         pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
         vma->vm_start += PAGE_SIZE;
      }
   }
#endif
   proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
   if (proc->pages == NULL) {
      ret = -ENOMEM;
      failure_string = "alloc page array";
      goto err_alloc_pages_failed;
   }
   proc->buffer_size = vma->vm_end - vma->vm_start;

   vma->vm_ops = &binder_vm_ops;
   vma->vm_private_data = proc;
   //③
   if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
      ret = -ENOMEM;
      failure_string = "alloc small buf";
      goto err_alloc_small_buf_failed;
   }
   buffer = proc->buffer;
   INIT_LIST_HEAD(&proc->buffers);
   list_add(&buffer->entry, &proc->buffers);
   buffer->free = 1;
   binder_insert_free_buffer(proc, buffer);
   proc->free_async_space = proc->buffer_size / 2;
   barrier();
   proc->files = get_files_struct(current);
   proc->vma = vma;
   proc->vma_vm_mm = vma->vm_mm;

   /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
       proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
   return 0;

err_alloc_small_buf_failed:
   kfree(proc->pages);
   proc->pages = NULL;
err_alloc_pages_failed:
   mutex_lock(&binder_mmap_lock);
   vfree(proc->buffer);
   proc->buffer = NULL;
err_get_vm_area_failed:
err_already_mapped:
   mutex_unlock(&binder_mmap_lock);
err_bad_arg:
   pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
          proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
   return ret;
}

我们先看下binder_mmap的两个入参,它是从service_manager那边传过来的,我们重点关注第二个参数:vma,我们可以把它看做是用户空间,然后在binder_mmap中创建了一个area,就是内核空间

①:首先,会判断用户空间大小是否超过4M,我们可以往前看,当service_manager调用open方法时,传入的mapsize大小为128 * 1024,也就是128K,也就是说在内核空间开辟了一块128K的用户空间内存

②:get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);调用get_vm_area方法,就是在内核空间寻找一块连续的内存,多大呢?就是传进来的用户空间的大小;然后将内核空间的虚拟地址赋值给用户进程

③:调用binder_update_page_range方法,这个方法中主要工作就是创建物理内存并做映射关系,看下源码

static int binder_update_page_range(struct binder_proc *proc, int allocate,
                void *start, void *end,
                struct vm_area_struct *vma)
{
   void *page_addr;
   unsigned long user_page_addr;
   struct vm_struct tmp_area;
   struct page **page;
   struct mm_struct *mm;
   //......

   if (allocate == 0)
      goto free_range;

   if (vma == NULL) {
      pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
         proc->pid);
      goto err_no_vma;
   }

   for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
      int ret;

      page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];

      BUG_ON(*page);
      //分配一页的物理内存 4K
      *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
      if (*page == NULL) {
         pr_err("%d: binder_alloc_buf failed for page at %p\n",
            proc->pid, page_addr);
         goto err_alloc_page_failed;
      }
      tmp_area.addr = page_addr;
      tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
      //将内核空间与其建立映射关系
      ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
      if (ret) {
         pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
                proc->pid, page_addr);
         goto err_map_kernel_failed;
      }
      user_page_addr =
         (uintptr_t)page_addr + proc->user_buffer_offset;
      //将用户空间与其建立映射关系
      ret = vm_insert_page(vma, user_page_addr, page[0]);
      if (ret) {
         pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
                proc->pid, user_page_addr);
         goto err_vm_insert_page_failed;
      }
      /* vm_insert_page does not seem to increment the refcount */
   }
   if (mm) {
      up_write(&mm->mmap_sem);
      mmput(mm);
   }
   return 0;

这里我们看到就是,首先会分配一页的物理内存4K,然后调用map_vm_area将内核空间虚拟地址与物理内存映射;调用vm_insert_page方法,将用户空间与物理内存映射,见下图:

image.png

就这样,完成了物理内存与用户空间和内核空间的映射,binder_mmap完成了自己的工作。

接着再回到service_manager的main方法中,我们看到调用了binder_open之后,会调用binder_loop方法,这个有点儿类似Android的Handler,也是开启循环,接收命令去执行任务。

推荐阅读