binder_alloc.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180
  1. /* binder_alloc.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2017 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <asm/cacheflush.h>
  19. #include <linux/list.h>
  20. #include <linux/sched/mm.h>
  21. #include <linux/module.h>
  22. #include <linux/rtmutex.h>
  23. #include <linux/rbtree.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/slab.h>
  27. #include <linux/sched.h>
  28. #include <linux/list_lru.h>
  29. #include <linux/uaccess.h>
  30. #include <linux/highmem.h>
  31. #include "binder_alloc.h"
  32. #include "binder_trace.h"
  33. struct list_lru binder_alloc_lru;
  34. static DEFINE_MUTEX(binder_alloc_mmap_lock);
  35. enum {
  36. BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
  37. BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
  38. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
  39. };
  40. static uint32_t binder_alloc_debug_mask;
  41. module_param_named(debug_mask, binder_alloc_debug_mask,
  42. uint, 0644);
  43. #define binder_alloc_debug(mask, x...) \
  44. do { \
  45. if (binder_alloc_debug_mask & mask) \
  46. pr_info(x); \
  47. } while (0)
  48. static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
  49. {
  50. return list_entry(buffer->entry.next, struct binder_buffer, entry);
  51. }
  52. static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
  53. {
  54. return list_entry(buffer->entry.prev, struct binder_buffer, entry);
  55. }
  56. static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  57. struct binder_buffer *buffer)
  58. {
  59. if (list_is_last(&buffer->entry, &alloc->buffers))
  60. return alloc->buffer + alloc->buffer_size - buffer->user_data;
  61. return binder_buffer_next(buffer)->user_data - buffer->user_data;
  62. }
  63. static void binder_insert_free_buffer(struct binder_alloc *alloc,
  64. struct binder_buffer *new_buffer)
  65. {
  66. struct rb_node **p = &alloc->free_buffers.rb_node;
  67. struct rb_node *parent = NULL;
  68. struct binder_buffer *buffer;
  69. size_t buffer_size;
  70. size_t new_buffer_size;
  71. BUG_ON(!new_buffer->free);
  72. new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  73. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  74. "%d: add free buffer, size %zd, at %pK\n",
  75. alloc->pid, new_buffer_size, new_buffer);
  76. while (*p) {
  77. parent = *p;
  78. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  79. BUG_ON(!buffer->free);
  80. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  81. if (new_buffer_size < buffer_size)
  82. p = &parent->rb_left;
  83. else
  84. p = &parent->rb_right;
  85. }
  86. rb_link_node(&new_buffer->rb_node, parent, p);
  87. rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
  88. }
  89. static void binder_insert_allocated_buffer_locked(
  90. struct binder_alloc *alloc, struct binder_buffer *new_buffer)
  91. {
  92. struct rb_node **p = &alloc->allocated_buffers.rb_node;
  93. struct rb_node *parent = NULL;
  94. struct binder_buffer *buffer;
  95. BUG_ON(new_buffer->free);
  96. while (*p) {
  97. parent = *p;
  98. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  99. BUG_ON(buffer->free);
  100. if (new_buffer->user_data < buffer->user_data)
  101. p = &parent->rb_left;
  102. else if (new_buffer->user_data > buffer->user_data)
  103. p = &parent->rb_right;
  104. else
  105. BUG();
  106. }
  107. rb_link_node(&new_buffer->rb_node, parent, p);
  108. rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
  109. }
  110. static struct binder_buffer *binder_alloc_prepare_to_free_locked(
  111. struct binder_alloc *alloc,
  112. uintptr_t user_ptr)
  113. {
  114. struct rb_node *n = alloc->allocated_buffers.rb_node;
  115. struct binder_buffer *buffer;
  116. void __user *uptr;
  117. uptr = (void __user *)user_ptr;
  118. while (n) {
  119. buffer = rb_entry(n, struct binder_buffer, rb_node);
  120. BUG_ON(buffer->free);
  121. if (uptr < buffer->user_data)
  122. n = n->rb_left;
  123. else if (uptr > buffer->user_data)
  124. n = n->rb_right;
  125. else {
  126. /*
  127. * Guard against user threads attempting to
  128. * free the buffer when in use by kernel or
  129. * after it's already been freed.
  130. */
  131. if (!buffer->allow_user_free)
  132. return ERR_PTR(-EPERM);
  133. buffer->allow_user_free = 0;
  134. return buffer;
  135. }
  136. }
  137. return NULL;
  138. }
  139. /**
  140. * binder_alloc_buffer_lookup() - get buffer given user ptr
  141. * @alloc: binder_alloc for this proc
  142. * @user_ptr: User pointer to buffer data
  143. *
  144. * Validate userspace pointer to buffer data and return buffer corresponding to
  145. * that user pointer. Search the rb tree for buffer that matches user data
  146. * pointer.
  147. *
  148. * Return: Pointer to buffer or NULL
  149. */
  150. struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
  151. uintptr_t user_ptr)
  152. {
  153. struct binder_buffer *buffer;
  154. mutex_lock(&alloc->mutex);
  155. buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
  156. mutex_unlock(&alloc->mutex);
  157. return buffer;
  158. }
  159. static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
  160. void __user *start, void __user *end)
  161. {
  162. void __user *page_addr;
  163. unsigned long user_page_addr;
  164. struct binder_lru_page *page;
  165. struct vm_area_struct *vma = NULL;
  166. struct mm_struct *mm = NULL;
  167. bool need_mm = false;
  168. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  169. "%d: %s pages %pK-%pK\n", alloc->pid,
  170. allocate ? "allocate" : "free", start, end);
  171. if (end <= start)
  172. return 0;
  173. trace_binder_update_page_range(alloc, allocate, start, end);
  174. if (allocate == 0)
  175. goto free_range;
  176. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  177. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  178. if (!page->page_ptr) {
  179. need_mm = true;
  180. break;
  181. }
  182. }
  183. if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
  184. mm = alloc->vma_vm_mm;
  185. if (mm) {
  186. down_read(&mm->mmap_sem);
  187. vma = alloc->vma;
  188. }
  189. if (!vma && need_mm) {
  190. pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
  191. alloc->pid);
  192. goto err_no_vma;
  193. }
  194. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  195. int ret;
  196. bool on_lru;
  197. size_t index;
  198. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  199. page = &alloc->pages[index];
  200. if (page->page_ptr) {
  201. trace_binder_alloc_lru_start(alloc, index);
  202. on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
  203. WARN_ON(!on_lru);
  204. trace_binder_alloc_lru_end(alloc, index);
  205. continue;
  206. }
  207. if (WARN_ON(!vma))
  208. goto err_page_ptr_cleared;
  209. trace_binder_alloc_page_start(alloc, index);
  210. page->page_ptr = alloc_page(GFP_KERNEL |
  211. __GFP_HIGHMEM |
  212. __GFP_ZERO);
  213. if (!page->page_ptr) {
  214. pr_err("%d: binder_alloc_buf failed for page at %pK\n",
  215. alloc->pid, page_addr);
  216. goto err_alloc_page_failed;
  217. }
  218. page->alloc = alloc;
  219. INIT_LIST_HEAD(&page->lru);
  220. user_page_addr = (uintptr_t)page_addr;
  221. ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
  222. if (ret) {
  223. pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  224. alloc->pid, user_page_addr);
  225. goto err_vm_insert_page_failed;
  226. }
  227. if (index + 1 > alloc->pages_high)
  228. alloc->pages_high = index + 1;
  229. trace_binder_alloc_page_end(alloc, index);
  230. /* vm_insert_page does not seem to increment the refcount */
  231. }
  232. if (mm) {
  233. up_read(&mm->mmap_sem);
  234. mmput(mm);
  235. }
  236. return 0;
  237. free_range:
  238. for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
  239. bool ret;
  240. size_t index;
  241. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  242. page = &alloc->pages[index];
  243. trace_binder_free_lru_start(alloc, index);
  244. ret = list_lru_add(&binder_alloc_lru, &page->lru);
  245. WARN_ON(!ret);
  246. trace_binder_free_lru_end(alloc, index);
  247. if (page_addr == start)
  248. break;
  249. continue;
  250. err_vm_insert_page_failed:
  251. __free_page(page->page_ptr);
  252. page->page_ptr = NULL;
  253. err_alloc_page_failed:
  254. err_page_ptr_cleared:
  255. if (page_addr == start)
  256. break;
  257. }
  258. err_no_vma:
  259. if (mm) {
  260. up_read(&mm->mmap_sem);
  261. mmput(mm);
  262. }
  263. return vma ? -ENOMEM : -ESRCH;
  264. }
  265. static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
  266. struct vm_area_struct *vma)
  267. {
  268. if (vma)
  269. alloc->vma_vm_mm = vma->vm_mm;
  270. /*
  271. * If we see alloc->vma is not NULL, buffer data structures set up
  272. * completely. Look at smp_rmb side binder_alloc_get_vma.
  273. * We also want to guarantee new alloc->vma_vm_mm is always visible
  274. * if alloc->vma is set.
  275. */
  276. smp_wmb();
  277. alloc->vma = vma;
  278. }
  279. static inline struct vm_area_struct *binder_alloc_get_vma(
  280. struct binder_alloc *alloc)
  281. {
  282. struct vm_area_struct *vma = NULL;
  283. if (alloc->vma) {
  284. /* Look at description in binder_alloc_set_vma */
  285. smp_rmb();
  286. vma = alloc->vma;
  287. }
  288. return vma;
  289. }
  290. static struct binder_buffer *binder_alloc_new_buf_locked(
  291. struct binder_alloc *alloc,
  292. size_t data_size,
  293. size_t offsets_size,
  294. size_t extra_buffers_size,
  295. int is_async)
  296. {
  297. struct rb_node *n = alloc->free_buffers.rb_node;
  298. struct binder_buffer *buffer;
  299. size_t buffer_size;
  300. struct rb_node *best_fit = NULL;
  301. void __user *has_page_addr;
  302. void __user *end_page_addr;
  303. size_t size, data_offsets_size;
  304. int ret;
  305. if (!binder_alloc_get_vma(alloc)) {
  306. pr_err("%d: binder_alloc_buf, no vma\n",
  307. alloc->pid);
  308. return ERR_PTR(-ESRCH);
  309. }
  310. data_offsets_size = ALIGN(data_size, sizeof(void *)) +
  311. ALIGN(offsets_size, sizeof(void *));
  312. if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
  313. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  314. "%d: got transaction with invalid size %zd-%zd\n",
  315. alloc->pid, data_size, offsets_size);
  316. return ERR_PTR(-EINVAL);
  317. }
  318. size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
  319. if (size < data_offsets_size || size < extra_buffers_size) {
  320. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  321. "%d: got transaction with invalid extra_buffers_size %zd\n",
  322. alloc->pid, extra_buffers_size);
  323. return ERR_PTR(-EINVAL);
  324. }
  325. if (is_async &&
  326. alloc->free_async_space < size + sizeof(struct binder_buffer)) {
  327. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  328. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  329. alloc->pid, size);
  330. return ERR_PTR(-ENOSPC);
  331. }
  332. /* Pad 0-size buffers so they get assigned unique addresses */
  333. size = max(size, sizeof(void *));
  334. while (n) {
  335. buffer = rb_entry(n, struct binder_buffer, rb_node);
  336. BUG_ON(!buffer->free);
  337. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  338. if (size < buffer_size) {
  339. best_fit = n;
  340. n = n->rb_left;
  341. } else if (size > buffer_size)
  342. n = n->rb_right;
  343. else {
  344. best_fit = n;
  345. break;
  346. }
  347. }
  348. if (best_fit == NULL) {
  349. size_t allocated_buffers = 0;
  350. size_t largest_alloc_size = 0;
  351. size_t total_alloc_size = 0;
  352. size_t free_buffers = 0;
  353. size_t largest_free_size = 0;
  354. size_t total_free_size = 0;
  355. for (n = rb_first(&alloc->allocated_buffers); n != NULL;
  356. n = rb_next(n)) {
  357. buffer = rb_entry(n, struct binder_buffer, rb_node);
  358. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  359. allocated_buffers++;
  360. total_alloc_size += buffer_size;
  361. if (buffer_size > largest_alloc_size)
  362. largest_alloc_size = buffer_size;
  363. }
  364. for (n = rb_first(&alloc->free_buffers); n != NULL;
  365. n = rb_next(n)) {
  366. buffer = rb_entry(n, struct binder_buffer, rb_node);
  367. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  368. free_buffers++;
  369. total_free_size += buffer_size;
  370. if (buffer_size > largest_free_size)
  371. largest_free_size = buffer_size;
  372. }
  373. pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
  374. alloc->pid, size);
  375. pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
  376. total_alloc_size, allocated_buffers, largest_alloc_size,
  377. total_free_size, free_buffers, largest_free_size);
  378. return ERR_PTR(-ENOSPC);
  379. }
  380. if (n == NULL) {
  381. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  382. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  383. }
  384. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  385. "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
  386. alloc->pid, size, buffer, buffer_size);
  387. has_page_addr = (void __user *)
  388. (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
  389. WARN_ON(n && buffer_size != size);
  390. end_page_addr =
  391. (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
  392. if (end_page_addr > has_page_addr)
  393. end_page_addr = has_page_addr;
  394. ret = binder_update_page_range(alloc, 1, (void __user *)
  395. PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
  396. if (ret)
  397. return ERR_PTR(ret);
  398. if (buffer_size != size) {
  399. struct binder_buffer *new_buffer;
  400. new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  401. if (!new_buffer) {
  402. pr_err("%s: %d failed to alloc new buffer struct\n",
  403. __func__, alloc->pid);
  404. goto err_alloc_buf_struct_failed;
  405. }
  406. new_buffer->user_data = (u8 __user *)buffer->user_data + size;
  407. list_add(&new_buffer->entry, &buffer->entry);
  408. new_buffer->free = 1;
  409. binder_insert_free_buffer(alloc, new_buffer);
  410. }
  411. rb_erase(best_fit, &alloc->free_buffers);
  412. buffer->free = 0;
  413. buffer->allow_user_free = 0;
  414. binder_insert_allocated_buffer_locked(alloc, buffer);
  415. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  416. "%d: binder_alloc_buf size %zd got %pK\n",
  417. alloc->pid, size, buffer);
  418. buffer->data_size = data_size;
  419. buffer->offsets_size = offsets_size;
  420. buffer->async_transaction = is_async;
  421. buffer->extra_buffers_size = extra_buffers_size;
  422. if (is_async) {
  423. alloc->free_async_space -= size + sizeof(struct binder_buffer);
  424. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  425. "%d: binder_alloc_buf size %zd async free %zd\n",
  426. alloc->pid, size, alloc->free_async_space);
  427. }
  428. return buffer;
  429. err_alloc_buf_struct_failed:
  430. binder_update_page_range(alloc, 0, (void __user *)
  431. PAGE_ALIGN((uintptr_t)buffer->user_data),
  432. end_page_addr);
  433. return ERR_PTR(-ENOMEM);
  434. }
  435. /**
  436. * binder_alloc_new_buf() - Allocate a new binder buffer
  437. * @alloc: binder_alloc for this proc
  438. * @data_size: size of user data buffer
  439. * @offsets_size: user specified buffer offset
  440. * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  441. * @is_async: buffer for async transaction
  442. *
  443. * Allocate a new buffer given the requested sizes. Returns
  444. * the kernel version of the buffer pointer. The size allocated
  445. * is the sum of the three given sizes (each rounded up to
  446. * pointer-sized boundary)
  447. *
  448. * Return: The allocated buffer or %NULL if error
  449. */
  450. struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
  451. size_t data_size,
  452. size_t offsets_size,
  453. size_t extra_buffers_size,
  454. int is_async)
  455. {
  456. struct binder_buffer *buffer;
  457. mutex_lock(&alloc->mutex);
  458. buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
  459. extra_buffers_size, is_async);
  460. mutex_unlock(&alloc->mutex);
  461. return buffer;
  462. }
  463. static void __user *buffer_start_page(struct binder_buffer *buffer)
  464. {
  465. return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
  466. }
  467. static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
  468. {
  469. return (void __user *)
  470. (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
  471. }
  472. static void binder_delete_free_buffer(struct binder_alloc *alloc,
  473. struct binder_buffer *buffer)
  474. {
  475. struct binder_buffer *prev, *next = NULL;
  476. bool to_free = true;
  477. BUG_ON(alloc->buffers.next == &buffer->entry);
  478. prev = binder_buffer_prev(buffer);
  479. BUG_ON(!prev->free);
  480. if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
  481. to_free = false;
  482. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  483. "%d: merge free, buffer %pK share page with %pK\n",
  484. alloc->pid, buffer->user_data,
  485. prev->user_data);
  486. }
  487. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  488. next = binder_buffer_next(buffer);
  489. if (buffer_start_page(next) == buffer_start_page(buffer)) {
  490. to_free = false;
  491. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  492. "%d: merge free, buffer %pK share page with %pK\n",
  493. alloc->pid,
  494. buffer->user_data,
  495. next->user_data);
  496. }
  497. }
  498. if (PAGE_ALIGNED(buffer->user_data)) {
  499. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  500. "%d: merge free, buffer start %pK is page aligned\n",
  501. alloc->pid, buffer->user_data);
  502. to_free = false;
  503. }
  504. if (to_free) {
  505. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  506. "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
  507. alloc->pid, buffer->user_data,
  508. prev->user_data,
  509. next ? next->user_data : NULL);
  510. binder_update_page_range(alloc, 0, buffer_start_page(buffer),
  511. buffer_start_page(buffer) + PAGE_SIZE);
  512. }
  513. list_del(&buffer->entry);
  514. kfree(buffer);
  515. }
  516. static void binder_free_buf_locked(struct binder_alloc *alloc,
  517. struct binder_buffer *buffer)
  518. {
  519. size_t size, buffer_size;
  520. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  521. size = ALIGN(buffer->data_size, sizeof(void *)) +
  522. ALIGN(buffer->offsets_size, sizeof(void *)) +
  523. ALIGN(buffer->extra_buffers_size, sizeof(void *));
  524. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  525. "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
  526. alloc->pid, buffer, size, buffer_size);
  527. BUG_ON(buffer->free);
  528. BUG_ON(size > buffer_size);
  529. BUG_ON(buffer->transaction != NULL);
  530. BUG_ON(buffer->user_data < alloc->buffer);
  531. BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
  532. if (buffer->async_transaction) {
  533. alloc->free_async_space += size + sizeof(struct binder_buffer);
  534. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  535. "%d: binder_free_buf size %zd async free %zd\n",
  536. alloc->pid, size, alloc->free_async_space);
  537. }
  538. binder_update_page_range(alloc, 0,
  539. (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
  540. (void __user *)(((uintptr_t)
  541. buffer->user_data + buffer_size) & PAGE_MASK));
  542. rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
  543. buffer->free = 1;
  544. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  545. struct binder_buffer *next = binder_buffer_next(buffer);
  546. if (next->free) {
  547. rb_erase(&next->rb_node, &alloc->free_buffers);
  548. binder_delete_free_buffer(alloc, next);
  549. }
  550. }
  551. if (alloc->buffers.next != &buffer->entry) {
  552. struct binder_buffer *prev = binder_buffer_prev(buffer);
  553. if (prev->free) {
  554. binder_delete_free_buffer(alloc, buffer);
  555. rb_erase(&prev->rb_node, &alloc->free_buffers);
  556. buffer = prev;
  557. }
  558. }
  559. binder_insert_free_buffer(alloc, buffer);
  560. }
  561. /**
  562. * binder_alloc_free_buf() - free a binder buffer
  563. * @alloc: binder_alloc for this proc
  564. * @buffer: kernel pointer to buffer
  565. *
  566. * Free the buffer allocated via binder_alloc_new_buffer()
  567. */
  568. void binder_alloc_free_buf(struct binder_alloc *alloc,
  569. struct binder_buffer *buffer)
  570. {
  571. mutex_lock(&alloc->mutex);
  572. binder_free_buf_locked(alloc, buffer);
  573. mutex_unlock(&alloc->mutex);
  574. }
  575. /**
  576. * binder_alloc_mmap_handler() - map virtual address space for proc
  577. * @alloc: alloc structure for this proc
  578. * @vma: vma passed to mmap()
  579. *
  580. * Called by binder_mmap() to initialize the space specified in
  581. * vma for allocating binder buffers
  582. *
  583. * Return:
  584. * 0 = success
  585. * -EBUSY = address space already mapped
  586. * -ENOMEM = failed to map memory to given address space
  587. */
  588. int binder_alloc_mmap_handler(struct binder_alloc *alloc,
  589. struct vm_area_struct *vma)
  590. {
  591. int ret;
  592. const char *failure_string;
  593. struct binder_buffer *buffer;
  594. mutex_lock(&binder_alloc_mmap_lock);
  595. if (alloc->buffer) {
  596. ret = -EBUSY;
  597. failure_string = "already mapped";
  598. goto err_already_mapped;
  599. }
  600. alloc->buffer = (void __user *)vma->vm_start;
  601. mutex_unlock(&binder_alloc_mmap_lock);
  602. alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
  603. ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
  604. GFP_KERNEL);
  605. if (alloc->pages == NULL) {
  606. ret = -ENOMEM;
  607. failure_string = "alloc page array";
  608. goto err_alloc_pages_failed;
  609. }
  610. alloc->buffer_size = vma->vm_end - vma->vm_start;
  611. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  612. if (!buffer) {
  613. ret = -ENOMEM;
  614. failure_string = "alloc buffer struct";
  615. goto err_alloc_buf_struct_failed;
  616. }
  617. buffer->user_data = alloc->buffer;
  618. list_add(&buffer->entry, &alloc->buffers);
  619. buffer->free = 1;
  620. binder_insert_free_buffer(alloc, buffer);
  621. alloc->free_async_space = alloc->buffer_size / 2;
  622. binder_alloc_set_vma(alloc, vma);
  623. mmgrab(alloc->vma_vm_mm);
  624. return 0;
  625. err_alloc_buf_struct_failed:
  626. kfree(alloc->pages);
  627. alloc->pages = NULL;
  628. err_alloc_pages_failed:
  629. mutex_lock(&binder_alloc_mmap_lock);
  630. alloc->buffer = NULL;
  631. err_already_mapped:
  632. mutex_unlock(&binder_alloc_mmap_lock);
  633. pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
  634. alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
  635. return ret;
  636. }
  637. void binder_alloc_deferred_release(struct binder_alloc *alloc)
  638. {
  639. struct rb_node *n;
  640. int buffers, page_count;
  641. struct binder_buffer *buffer;
  642. buffers = 0;
  643. mutex_lock(&alloc->mutex);
  644. BUG_ON(alloc->vma);
  645. while ((n = rb_first(&alloc->allocated_buffers))) {
  646. buffer = rb_entry(n, struct binder_buffer, rb_node);
  647. /* Transaction should already have been freed */
  648. BUG_ON(buffer->transaction);
  649. binder_free_buf_locked(alloc, buffer);
  650. buffers++;
  651. }
  652. while (!list_empty(&alloc->buffers)) {
  653. buffer = list_first_entry(&alloc->buffers,
  654. struct binder_buffer, entry);
  655. WARN_ON(!buffer->free);
  656. list_del(&buffer->entry);
  657. WARN_ON_ONCE(!list_empty(&alloc->buffers));
  658. kfree(buffer);
  659. }
  660. page_count = 0;
  661. if (alloc->pages) {
  662. int i;
  663. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  664. void __user *page_addr;
  665. bool on_lru;
  666. if (!alloc->pages[i].page_ptr)
  667. continue;
  668. on_lru = list_lru_del(&binder_alloc_lru,
  669. &alloc->pages[i].lru);
  670. page_addr = alloc->buffer + i * PAGE_SIZE;
  671. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  672. "%s: %d: page %d at %pK %s\n",
  673. __func__, alloc->pid, i, page_addr,
  674. on_lru ? "on lru" : "active");
  675. __free_page(alloc->pages[i].page_ptr);
  676. page_count++;
  677. }
  678. kfree(alloc->pages);
  679. }
  680. mutex_unlock(&alloc->mutex);
  681. if (alloc->vma_vm_mm)
  682. mmdrop(alloc->vma_vm_mm);
  683. binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
  684. "%s: %d buffers %d, pages %d\n",
  685. __func__, alloc->pid, buffers, page_count);
  686. }
  687. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  688. struct binder_buffer *buffer)
  689. {
  690. seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
  691. prefix, buffer->debug_id, buffer->user_data,
  692. buffer->data_size, buffer->offsets_size,
  693. buffer->extra_buffers_size,
  694. buffer->transaction ? "active" : "delivered");
  695. }
  696. /**
  697. * binder_alloc_print_allocated() - print buffer info
  698. * @m: seq_file for output via seq_printf()
  699. * @alloc: binder_alloc for this proc
  700. *
  701. * Prints information about every buffer associated with
  702. * the binder_alloc state to the given seq_file
  703. */
  704. void binder_alloc_print_allocated(struct seq_file *m,
  705. struct binder_alloc *alloc)
  706. {
  707. struct rb_node *n;
  708. mutex_lock(&alloc->mutex);
  709. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  710. print_binder_buffer(m, " buffer",
  711. rb_entry(n, struct binder_buffer, rb_node));
  712. mutex_unlock(&alloc->mutex);
  713. }
  714. /**
  715. * binder_alloc_print_pages() - print page usage
  716. * @m: seq_file for output via seq_printf()
  717. * @alloc: binder_alloc for this proc
  718. */
  719. void binder_alloc_print_pages(struct seq_file *m,
  720. struct binder_alloc *alloc)
  721. {
  722. struct binder_lru_page *page;
  723. int i;
  724. int active = 0;
  725. int lru = 0;
  726. int free = 0;
  727. mutex_lock(&alloc->mutex);
  728. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  729. page = &alloc->pages[i];
  730. if (!page->page_ptr)
  731. free++;
  732. else if (list_empty(&page->lru))
  733. active++;
  734. else
  735. lru++;
  736. }
  737. mutex_unlock(&alloc->mutex);
  738. seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
  739. seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
  740. }
  741. /**
  742. * binder_alloc_get_allocated_count() - return count of buffers
  743. * @alloc: binder_alloc for this proc
  744. *
  745. * Return: count of allocated buffers
  746. */
  747. int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  748. {
  749. struct rb_node *n;
  750. int count = 0;
  751. mutex_lock(&alloc->mutex);
  752. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  753. count++;
  754. mutex_unlock(&alloc->mutex);
  755. return count;
  756. }
  757. /**
  758. * binder_alloc_vma_close() - invalidate address space
  759. * @alloc: binder_alloc for this proc
  760. *
  761. * Called from binder_vma_close() when releasing address space.
  762. * Clears alloc->vma to prevent new incoming transactions from
  763. * allocating more buffers.
  764. */
  765. void binder_alloc_vma_close(struct binder_alloc *alloc)
  766. {
  767. binder_alloc_set_vma(alloc, NULL);
  768. }
  769. /**
  770. * binder_alloc_free_page() - shrinker callback to free pages
  771. * @item: item to free
  772. * @lock: lock protecting the item
  773. * @cb_arg: callback argument
  774. *
  775. * Called from list_lru_walk() in binder_shrink_scan() to free
  776. * up pages when the system is under memory pressure.
  777. */
  778. enum lru_status binder_alloc_free_page(struct list_head *item,
  779. struct list_lru_one *lru,
  780. spinlock_t *lock,
  781. void *cb_arg)
  782. {
  783. struct mm_struct *mm = NULL;
  784. struct binder_lru_page *page = container_of(item,
  785. struct binder_lru_page,
  786. lru);
  787. struct binder_alloc *alloc;
  788. uintptr_t page_addr;
  789. size_t index;
  790. struct vm_area_struct *vma;
  791. alloc = page->alloc;
  792. if (!mutex_trylock(&alloc->mutex))
  793. goto err_get_alloc_mutex_failed;
  794. if (!page->page_ptr)
  795. goto err_page_already_freed;
  796. index = page - alloc->pages;
  797. page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
  798. mm = alloc->vma_vm_mm;
  799. if (!mmget_not_zero(mm))
  800. goto err_mmget;
  801. if (!down_read_trylock(&mm->mmap_sem))
  802. goto err_down_read_mmap_sem_failed;
  803. vma = binder_alloc_get_vma(alloc);
  804. list_lru_isolate(lru, item);
  805. spin_unlock(lock);
  806. if (vma) {
  807. trace_binder_unmap_user_start(alloc, index);
  808. zap_page_range(vma, page_addr, PAGE_SIZE);
  809. trace_binder_unmap_user_end(alloc, index);
  810. }
  811. up_read(&mm->mmap_sem);
  812. mmput_async(mm);
  813. trace_binder_unmap_kernel_start(alloc, index);
  814. __free_page(page->page_ptr);
  815. page->page_ptr = NULL;
  816. trace_binder_unmap_kernel_end(alloc, index);
  817. spin_lock(lock);
  818. mutex_unlock(&alloc->mutex);
  819. return LRU_REMOVED_RETRY;
  820. err_down_read_mmap_sem_failed:
  821. mmput_async(mm);
  822. err_mmget:
  823. err_page_already_freed:
  824. mutex_unlock(&alloc->mutex);
  825. err_get_alloc_mutex_failed:
  826. return LRU_SKIP;
  827. }
  828. static unsigned long
  829. binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  830. {
  831. unsigned long ret = list_lru_count(&binder_alloc_lru);
  832. return ret;
  833. }
  834. static unsigned long
  835. binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  836. {
  837. unsigned long ret;
  838. ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
  839. NULL, sc->nr_to_scan);
  840. return ret;
  841. }
  842. static struct shrinker binder_shrinker = {
  843. .count_objects = binder_shrink_count,
  844. .scan_objects = binder_shrink_scan,
  845. .seeks = DEFAULT_SEEKS,
  846. };
  847. /**
  848. * binder_alloc_init() - called by binder_open() for per-proc initialization
  849. * @alloc: binder_alloc for this proc
  850. *
  851. * Called from binder_open() to initialize binder_alloc fields for
  852. * new binder proc
  853. */
  854. void binder_alloc_init(struct binder_alloc *alloc)
  855. {
  856. alloc->pid = current->group_leader->pid;
  857. mutex_init(&alloc->mutex);
  858. INIT_LIST_HEAD(&alloc->buffers);
  859. }
  860. int binder_alloc_shrinker_init(void)
  861. {
  862. int ret = list_lru_init(&binder_alloc_lru);
  863. if (ret == 0) {
  864. ret = register_shrinker(&binder_shrinker);
  865. if (ret)
  866. list_lru_destroy(&binder_alloc_lru);
  867. }
  868. return ret;
  869. }
  870. /**
  871. * check_buffer() - verify that buffer/offset is safe to access
  872. * @alloc: binder_alloc for this proc
  873. * @buffer: binder buffer to be accessed
  874. * @offset: offset into @buffer data
  875. * @bytes: bytes to access from offset
  876. *
  877. * Check that the @offset/@bytes are within the size of the given
  878. * @buffer and that the buffer is currently active and not freeable.
  879. * Offsets must also be multiples of sizeof(u32). The kernel is
  880. * allowed to touch the buffer in two cases:
  881. *
  882. * 1) when the buffer is being created:
  883. * (buffer->free == 0 && buffer->allow_user_free == 0)
  884. * 2) when the buffer is being torn down:
  885. * (buffer->free == 0 && buffer->transaction == NULL).
  886. *
  887. * Return: true if the buffer is safe to access
  888. */
  889. static inline bool check_buffer(struct binder_alloc *alloc,
  890. struct binder_buffer *buffer,
  891. binder_size_t offset, size_t bytes)
  892. {
  893. size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
  894. return buffer_size >= bytes &&
  895. offset <= buffer_size - bytes &&
  896. IS_ALIGNED(offset, sizeof(u32)) &&
  897. !buffer->free &&
  898. (!buffer->allow_user_free || !buffer->transaction);
  899. }
  900. /**
  901. * binder_alloc_get_page() - get kernel pointer for given buffer offset
  902. * @alloc: binder_alloc for this proc
  903. * @buffer: binder buffer to be accessed
  904. * @buffer_offset: offset into @buffer data
  905. * @pgoffp: address to copy final page offset to
  906. *
  907. * Lookup the struct page corresponding to the address
  908. * at @buffer_offset into @buffer->user_data. If @pgoffp is not
  909. * NULL, the byte-offset into the page is written there.
  910. *
  911. * The caller is responsible to ensure that the offset points
  912. * to a valid address within the @buffer and that @buffer is
  913. * not freeable by the user. Since it can't be freed, we are
  914. * guaranteed that the corresponding elements of @alloc->pages[]
  915. * cannot change.
  916. *
  917. * Return: struct page
  918. */
  919. static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
  920. struct binder_buffer *buffer,
  921. binder_size_t buffer_offset,
  922. pgoff_t *pgoffp)
  923. {
  924. binder_size_t buffer_space_offset = buffer_offset +
  925. (buffer->user_data - alloc->buffer);
  926. pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
  927. size_t index = buffer_space_offset >> PAGE_SHIFT;
  928. struct binder_lru_page *lru_page;
  929. lru_page = &alloc->pages[index];
  930. *pgoffp = pgoff;
  931. return lru_page->page_ptr;
  932. }
  933. /**
  934. * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
  935. * @alloc: binder_alloc for this proc
  936. * @buffer: binder buffer to be accessed
  937. * @buffer_offset: offset into @buffer data
  938. * @from: userspace pointer to source buffer
  939. * @bytes: bytes to copy
  940. *
  941. * Copy bytes from source userspace to target buffer.
  942. *
  943. * Return: bytes remaining to be copied
  944. */
  945. unsigned long
  946. binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
  947. struct binder_buffer *buffer,
  948. binder_size_t buffer_offset,
  949. const void __user *from,
  950. size_t bytes)
  951. {
  952. if (!check_buffer(alloc, buffer, buffer_offset, bytes))
  953. return bytes;
  954. while (bytes) {
  955. unsigned long size;
  956. unsigned long ret;
  957. struct page *page;
  958. pgoff_t pgoff;
  959. void *kptr;
  960. page = binder_alloc_get_page(alloc, buffer,
  961. buffer_offset, &pgoff);
  962. size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
  963. kptr = kmap(page) + pgoff;
  964. ret = copy_from_user(kptr, from, size);
  965. kunmap(page);
  966. if (ret)
  967. return bytes - size + ret;
  968. bytes -= size;
  969. from += size;
  970. buffer_offset += size;
  971. }
  972. return 0;
  973. }
  974. static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
  975. bool to_buffer,
  976. struct binder_buffer *buffer,
  977. binder_size_t buffer_offset,
  978. void *ptr,
  979. size_t bytes)
  980. {
  981. /* All copies must be 32-bit aligned and 32-bit size */
  982. BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
  983. while (bytes) {
  984. unsigned long size;
  985. struct page *page;
  986. pgoff_t pgoff;
  987. void *tmpptr;
  988. void *base_ptr;
  989. page = binder_alloc_get_page(alloc, buffer,
  990. buffer_offset, &pgoff);
  991. size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
  992. base_ptr = kmap_atomic(page);
  993. tmpptr = base_ptr + pgoff;
  994. if (to_buffer)
  995. memcpy(tmpptr, ptr, size);
  996. else
  997. memcpy(ptr, tmpptr, size);
  998. /*
  999. * kunmap_atomic() takes care of flushing the cache
  1000. * if this device has VIVT cache arch
  1001. */
  1002. kunmap_atomic(base_ptr);
  1003. bytes -= size;
  1004. pgoff = 0;
  1005. ptr = ptr + size;
  1006. buffer_offset += size;
  1007. }
  1008. }
  1009. void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
  1010. struct binder_buffer *buffer,
  1011. binder_size_t buffer_offset,
  1012. void *src,
  1013. size_t bytes)
  1014. {
  1015. binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
  1016. src, bytes);
  1017. }
  1018. void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
  1019. void *dest,
  1020. struct binder_buffer *buffer,
  1021. binder_size_t buffer_offset,
  1022. size_t bytes)
  1023. {
  1024. binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
  1025. dest, bytes);
  1026. }