-
进程间通信的原理:
Binder驱动的初始化:
static int __init binder_init(void){
int ret;
binder_deferred_workqueue = create_singlethread_workqueue("binder");
if (!binder_deferred_workqueue)
return -ENOMEM;
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", binder_debugfs_dir_entry_root);
ret = misc_register(&binder_miscdev); //见2.1
if (binder_debugfs_dir_entry_root) {
//...省略部分代码
}
return ret;
}
2.1 驱动注册 misc_register(&binder_miscdev):
a. 内核维护一个misc_list链表,misc设备在misc_register注册的时候链接到这个链表,在misc_deregister中解除链接。主要的设备结构就是miscdevice。定义如下:
struct miscdevice {
int minor;
const char *name;
const struct file_operations *fops;
struct list_head list;
struct device *parent;
struct device *this_device;
const char *nodename;
mode_t mode;
};
b. 这个结构体是misc设备基本的结构体,在注册misc设备的时候必须要声明并初始化一个这样的结构体,但其中一般只需填充name minor fops字段就可以了。此处的binder的结构体如下:
static struct miscdevice binder_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "binder",
.fops = &binder_fops
};
c. 那么这个binder_fops又是什么呢?
static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
.compat_ioctl = binder_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
};
===>定义了binder方法的操作,例如调用了open的方法时,驱动就会去找到binder_open()方法并调用;
- binder_open()方法:
static int binder_open(struct inode *nodp, struct file *filp){
struct binder_proc *proc; //见3.1
//在内核空间开辟一块连续的内存,大小不超过128k,初始化为0;
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL) return -ENOMEM;
//获取当前线程的task_struct;
get_task_struct(current);
proc->tsk = current;
//初始化todo列表 -- 工作队列
INIT_LIST_HEAD(&proc->todo);
//初始化等待队列
init_waitqueue_head(&proc->wait);
//把binder_procs 加入到binder驱动的列表中
hlist_add_head(&proc->proc_node, &binder_procs);
//binder_proc保存到 文件的私有数据
filp->private_data = proc;
return 0;
}
3.1 binder_proc结构体:
struct binder_proc {
struct rb_root threads; //线程的红黑树,例如servicemanager可能同时处理多个服务的请求
struct rb_root nodes; //内部binder对象的红黑树,例如客户端像驱动发送数据flat_binder_object里面就有binder的弱应用
struct rb_root refs_by_desc; //外部对应的binder对象的红黑树,以handle做key,sm接收到远程写入的binder就存在此处
struct rb_root refs_by_node; //外部对应的binder对象的红黑树,以地址做key
int pid; //进程id
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
void *buffer;
struct page **pages;
size_t buffer_size;
uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
int max_threads;
//省略部分...
};
- binder_mmap()方法:
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
struct vm_struct *area; //内核空间
//获取binder_proc,binder_open()方法中存入
struct binder_proc *proc = filp->private_data;
const char *failure_string;
struct binder_buffer *buffer;
if (proc->tsk != current)
return -EINVAL;
//普通进程大小为1M - 8k;sm为128k;如果大于4M,也只取4M;
if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M;
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
mutex_lock(&binder_mmap_lock);
// 内核空间开辟的首地址
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
// 内核空间的首地址
proc->buffer = area->addr;
// 计算用户空间和内核空间的偏移量
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
mutex_unlock(&binder_mmap_lock);
// 按页开辟内存
proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
// 1M-8k
proc->buffer_size = vma->vm_end - vma->vm_start;
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
// 开辟映射物理页,一次拷贝的关键在此处;
if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
ret = -ENOMEM;
}
buffer = proc->buffer;
list_add(&buffer->entry, &proc->buffers);
buffer->free = 1;
proc->files = get_files_struct(current);
proc->vma = vma;
proc->vma_vm_mm = vma->vm_mm;
return ret;
}
3.1 binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma):
proc->buffer:首地址(proc->buffer = area->addr)
PAGE_SIZE:一般是4kb;
vma:vm_area_struct 结构体;
static int binder_update_page_range(struct binder_proc *proc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)
{
void *page_addr;
unsigned long user_page_addr;
struct vm_struct tmp_area;
struct page **page;
struct mm_struct *mm;
if (end <= start)
return 0;
trace_binder_update_page_range(proc, allocate, start, end);
if (vma)
mm = NULL;
else
mm = get_task_mm(proc->tsk);
if (mm) {
down_write(&mm->mmap_sem);
vma = proc->vma;
if (vma && mm != proc->vma_vm_mm) {
pr_err("%d: vma mm and task mm mismatch\n",
proc->pid);
vma = NULL;
}
}
//判断是不是释放,此外为1,所以是开辟内存(物理内存);
if (allocate == 0)
goto free_range;
//这里只有一个物理页;早期版本是全部开辟好
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
//在物理内存上开辟一个页
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
BUG_ON(*page);
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
if (*page == NULL) {
pr_err("%d: binder_alloc_buf failed for page at %p\n",
proc->pid, page_addr);
goto err_alloc_page_failed;
}
tmp_area.addr = page_addr;
tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
//把开辟好的这一个页 映射到 内核空间;
ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
//根据偏移量 计算 用户空间的首地址
user_page_addr =(uintptr_t)page_addr + proc->user_buffer_offset;
//把开辟好的这一个页 映射到 用户空间
ret = vm_insert_page(vma, user_page_addr, page[0]);
}
if (mm) {
up_write(&mm->mmap_sem);
mmput(mm);
}
return 0;
//释放的逻辑
free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) {
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
if (vma)
zap_page_range(vma, (uintptr_t)page_addr +
proc->user_buffer_offset, PAGE_SIZE, NULL);
}
binder一次拷贝的关键是,完成内存的时候,同时完成了内核空间跟用户空间的映射,也就是说,同一份物理内存,既可以在用户空间,用虚拟地址访问,也可以在内核空间用虚拟地址访问。
- binder_ioctl()方法:
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
//拿到 binder_proc *proc 结构体
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
trace_binder_ioctl(cmd, arg);
binder_lock(__func__);
// 通过proc得到thread
thread = binder_get_thread(proc);
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
case BINDER_SET_MAX_THREADS:
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
break;
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
goto err;
break;
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
binder_free_thread(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {
struct binder_version __user *ver = ubuf;
if (size != sizeof(struct binder_version)) {
ret = -EINVAL;
goto err;
}
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) {
ret = -EINVAL;
goto err;
}
break;
}
default:
ret = -EINVAL;
goto err;
}
ret = 0;
err:
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
binder_unlock(__func__);
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
trace_binder_ioctl_done(ret);
return ret;
}