binder_alloc.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186
  1. /* binder_alloc.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2017 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <linux/list.h>
  19. #include <linux/sched/mm.h>
  20. #include <linux/module.h>
  21. #include <linux/rtmutex.h>
  22. #include <linux/rbtree.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/slab.h>
  26. #include <linux/sched.h>
  27. #include <linux/list_lru.h>
  28. #include <linux/ratelimit.h>
  29. #include <asm/cacheflush.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/highmem.h>
  32. #include "binder_alloc.h"
  33. #include "binder_trace.h"
  34. struct list_lru binder_alloc_lru;
  35. static DEFINE_MUTEX(binder_alloc_mmap_lock);
  36. enum {
  37. BINDER_DEBUG_USER_ERROR = 1U << 0,
  38. BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
  39. BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
  40. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
  41. };
  42. static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
  43. module_param_named(debug_mask, binder_alloc_debug_mask,
  44. uint, 0644);
  45. #define binder_alloc_debug(mask, x...) \
  46. do { \
  47. if (binder_alloc_debug_mask & mask) \
  48. pr_info_ratelimited(x); \
  49. } while (0)
  50. static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
  51. {
  52. return list_entry(buffer->entry.next, struct binder_buffer, entry);
  53. }
  54. static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
  55. {
  56. return list_entry(buffer->entry.prev, struct binder_buffer, entry);
  57. }
  58. static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  59. struct binder_buffer *buffer)
  60. {
  61. if (list_is_last(&buffer->entry, &alloc->buffers))
  62. return alloc->buffer + alloc->buffer_size - buffer->user_data;
  63. return binder_buffer_next(buffer)->user_data - buffer->user_data;
  64. }
  65. static void binder_insert_free_buffer(struct binder_alloc *alloc,
  66. struct binder_buffer *new_buffer)
  67. {
  68. struct rb_node **p = &alloc->free_buffers.rb_node;
  69. struct rb_node *parent = NULL;
  70. struct binder_buffer *buffer;
  71. size_t buffer_size;
  72. size_t new_buffer_size;
  73. BUG_ON(!new_buffer->free);
  74. new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  75. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  76. "%d: add free buffer, size %zd, at %pK\n",
  77. alloc->pid, new_buffer_size, new_buffer);
  78. while (*p) {
  79. parent = *p;
  80. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  81. BUG_ON(!buffer->free);
  82. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  83. if (new_buffer_size < buffer_size)
  84. p = &parent->rb_left;
  85. else
  86. p = &parent->rb_right;
  87. }
  88. rb_link_node(&new_buffer->rb_node, parent, p);
  89. rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
  90. }
  91. static void binder_insert_allocated_buffer_locked(
  92. struct binder_alloc *alloc, struct binder_buffer *new_buffer)
  93. {
  94. struct rb_node **p = &alloc->allocated_buffers.rb_node;
  95. struct rb_node *parent = NULL;
  96. struct binder_buffer *buffer;
  97. BUG_ON(new_buffer->free);
  98. while (*p) {
  99. parent = *p;
  100. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  101. BUG_ON(buffer->free);
  102. if (new_buffer->user_data < buffer->user_data)
  103. p = &parent->rb_left;
  104. else if (new_buffer->user_data > buffer->user_data)
  105. p = &parent->rb_right;
  106. else
  107. BUG();
  108. }
  109. rb_link_node(&new_buffer->rb_node, parent, p);
  110. rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
  111. }
  112. static struct binder_buffer *binder_alloc_prepare_to_free_locked(
  113. struct binder_alloc *alloc,
  114. uintptr_t user_ptr)
  115. {
  116. struct rb_node *n = alloc->allocated_buffers.rb_node;
  117. struct binder_buffer *buffer;
  118. void __user *uptr;
  119. uptr = (void __user *)user_ptr;
  120. while (n) {
  121. buffer = rb_entry(n, struct binder_buffer, rb_node);
  122. BUG_ON(buffer->free);
  123. if (uptr < buffer->user_data)
  124. n = n->rb_left;
  125. else if (uptr > buffer->user_data)
  126. n = n->rb_right;
  127. else {
  128. /*
  129. * Guard against user threads attempting to
  130. * free the buffer when in use by kernel or
  131. * after it's already been freed.
  132. */
  133. if (!buffer->allow_user_free)
  134. return ERR_PTR(-EPERM);
  135. buffer->allow_user_free = 0;
  136. return buffer;
  137. }
  138. }
  139. return NULL;
  140. }
  141. /**
  142. * binder_alloc_buffer_lookup() - get buffer given user ptr
  143. * @alloc: binder_alloc for this proc
  144. * @user_ptr: User pointer to buffer data
  145. *
  146. * Validate userspace pointer to buffer data and return buffer corresponding to
  147. * that user pointer. Search the rb tree for buffer that matches user data
  148. * pointer.
  149. *
  150. * Return: Pointer to buffer or NULL
  151. */
  152. struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
  153. uintptr_t user_ptr)
  154. {
  155. struct binder_buffer *buffer;
  156. mutex_lock(&alloc->mutex);
  157. buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
  158. mutex_unlock(&alloc->mutex);
  159. return buffer;
  160. }
  161. static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
  162. void __user *start, void __user *end)
  163. {
  164. void __user *page_addr;
  165. unsigned long user_page_addr;
  166. struct binder_lru_page *page;
  167. struct vm_area_struct *vma = NULL;
  168. struct mm_struct *mm = NULL;
  169. bool need_mm = false;
  170. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  171. "%d: %s pages %pK-%pK\n", alloc->pid,
  172. allocate ? "allocate" : "free", start, end);
  173. if (end <= start)
  174. return 0;
  175. trace_binder_update_page_range(alloc, allocate, start, end);
  176. if (allocate == 0)
  177. goto free_range;
  178. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  179. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  180. if (!page->page_ptr) {
  181. need_mm = true;
  182. break;
  183. }
  184. }
  185. if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
  186. mm = alloc->vma_vm_mm;
  187. if (mm) {
  188. down_read(&mm->mmap_sem);
  189. vma = alloc->vma;
  190. }
  191. if (!vma && need_mm) {
  192. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  193. "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
  194. alloc->pid);
  195. goto err_no_vma;
  196. }
  197. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  198. int ret;
  199. bool on_lru;
  200. size_t index;
  201. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  202. page = &alloc->pages[index];
  203. if (page->page_ptr) {
  204. trace_binder_alloc_lru_start(alloc, index);
  205. on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
  206. WARN_ON(!on_lru);
  207. trace_binder_alloc_lru_end(alloc, index);
  208. continue;
  209. }
  210. if (WARN_ON(!vma))
  211. goto err_page_ptr_cleared;
  212. trace_binder_alloc_page_start(alloc, index);
  213. page->page_ptr = alloc_page(GFP_KERNEL |
  214. __GFP_HIGHMEM |
  215. __GFP_ZERO);
  216. if (!page->page_ptr) {
  217. pr_err("%d: binder_alloc_buf failed for page at %pK\n",
  218. alloc->pid, page_addr);
  219. goto err_alloc_page_failed;
  220. }
  221. page->alloc = alloc;
  222. INIT_LIST_HEAD(&page->lru);
  223. user_page_addr = (uintptr_t)page_addr;
  224. ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
  225. if (ret) {
  226. pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  227. alloc->pid, user_page_addr);
  228. goto err_vm_insert_page_failed;
  229. }
  230. if (index + 1 > alloc->pages_high)
  231. alloc->pages_high = index + 1;
  232. trace_binder_alloc_page_end(alloc, index);
  233. /* vm_insert_page does not seem to increment the refcount */
  234. }
  235. if (mm) {
  236. up_read(&mm->mmap_sem);
  237. mmput(mm);
  238. }
  239. return 0;
  240. free_range:
  241. for (page_addr = end - PAGE_SIZE; page_addr >= start;
  242. page_addr -= PAGE_SIZE) {
  243. bool ret;
  244. size_t index;
  245. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  246. page = &alloc->pages[index];
  247. trace_binder_free_lru_start(alloc, index);
  248. ret = list_lru_add(&binder_alloc_lru, &page->lru);
  249. WARN_ON(!ret);
  250. trace_binder_free_lru_end(alloc, index);
  251. continue;
  252. err_vm_insert_page_failed:
  253. __free_page(page->page_ptr);
  254. page->page_ptr = NULL;
  255. err_alloc_page_failed:
  256. err_page_ptr_cleared:
  257. ;
  258. }
  259. err_no_vma:
  260. if (mm) {
  261. up_read(&mm->mmap_sem);
  262. mmput(mm);
  263. }
  264. return vma ? -ENOMEM : -ESRCH;
  265. }
  266. static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
  267. struct vm_area_struct *vma)
  268. {
  269. if (vma)
  270. alloc->vma_vm_mm = vma->vm_mm;
  271. /*
  272. * If we see alloc->vma is not NULL, buffer data structures set up
  273. * completely. Look at smp_rmb side binder_alloc_get_vma.
  274. * We also want to guarantee new alloc->vma_vm_mm is always visible
  275. * if alloc->vma is set.
  276. */
  277. smp_wmb();
  278. alloc->vma = vma;
  279. }
  280. static inline struct vm_area_struct *binder_alloc_get_vma(
  281. struct binder_alloc *alloc)
  282. {
  283. struct vm_area_struct *vma = NULL;
  284. if (alloc->vma) {
  285. /* Look at description in binder_alloc_set_vma */
  286. smp_rmb();
  287. vma = alloc->vma;
  288. }
  289. return vma;
  290. }
  291. static struct binder_buffer *binder_alloc_new_buf_locked(
  292. struct binder_alloc *alloc,
  293. size_t data_size,
  294. size_t offsets_size,
  295. size_t extra_buffers_size,
  296. int is_async)
  297. {
  298. struct rb_node *n = alloc->free_buffers.rb_node;
  299. struct binder_buffer *buffer;
  300. size_t buffer_size;
  301. struct rb_node *best_fit = NULL;
  302. void __user *has_page_addr;
  303. void __user *end_page_addr;
  304. size_t size, data_offsets_size;
  305. int ret;
  306. if (!binder_alloc_get_vma(alloc)) {
  307. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  308. "%d: binder_alloc_buf, no vma\n",
  309. alloc->pid);
  310. return ERR_PTR(-ESRCH);
  311. }
  312. data_offsets_size = ALIGN(data_size, sizeof(void *)) +
  313. ALIGN(offsets_size, sizeof(void *));
  314. if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
  315. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  316. "%d: got transaction with invalid size %zd-%zd\n",
  317. alloc->pid, data_size, offsets_size);
  318. return ERR_PTR(-EINVAL);
  319. }
  320. size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
  321. if (size < data_offsets_size || size < extra_buffers_size) {
  322. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  323. "%d: got transaction with invalid extra_buffers_size %zd\n",
  324. alloc->pid, extra_buffers_size);
  325. return ERR_PTR(-EINVAL);
  326. }
  327. if (is_async &&
  328. alloc->free_async_space < size + sizeof(struct binder_buffer)) {
  329. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  330. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  331. alloc->pid, size);
  332. return ERR_PTR(-ENOSPC);
  333. }
  334. /* Pad 0-size buffers so they get assigned unique addresses */
  335. size = max(size, sizeof(void *));
  336. while (n) {
  337. buffer = rb_entry(n, struct binder_buffer, rb_node);
  338. BUG_ON(!buffer->free);
  339. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  340. if (size < buffer_size) {
  341. best_fit = n;
  342. n = n->rb_left;
  343. } else if (size > buffer_size)
  344. n = n->rb_right;
  345. else {
  346. best_fit = n;
  347. break;
  348. }
  349. }
  350. if (best_fit == NULL) {
  351. size_t allocated_buffers = 0;
  352. size_t largest_alloc_size = 0;
  353. size_t total_alloc_size = 0;
  354. size_t free_buffers = 0;
  355. size_t largest_free_size = 0;
  356. size_t total_free_size = 0;
  357. for (n = rb_first(&alloc->allocated_buffers); n != NULL;
  358. n = rb_next(n)) {
  359. buffer = rb_entry(n, struct binder_buffer, rb_node);
  360. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  361. allocated_buffers++;
  362. total_alloc_size += buffer_size;
  363. if (buffer_size > largest_alloc_size)
  364. largest_alloc_size = buffer_size;
  365. }
  366. for (n = rb_first(&alloc->free_buffers); n != NULL;
  367. n = rb_next(n)) {
  368. buffer = rb_entry(n, struct binder_buffer, rb_node);
  369. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  370. free_buffers++;
  371. total_free_size += buffer_size;
  372. if (buffer_size > largest_free_size)
  373. largest_free_size = buffer_size;
  374. }
  375. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  376. "%d: binder_alloc_buf size %zd failed, no address space\n",
  377. alloc->pid, size);
  378. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  379. "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
  380. total_alloc_size, allocated_buffers,
  381. largest_alloc_size, total_free_size,
  382. free_buffers, largest_free_size);
  383. return ERR_PTR(-ENOSPC);
  384. }
  385. if (n == NULL) {
  386. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  387. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  388. }
  389. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  390. "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
  391. alloc->pid, size, buffer, buffer_size);
  392. has_page_addr = (void __user *)
  393. (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
  394. WARN_ON(n && buffer_size != size);
  395. end_page_addr =
  396. (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
  397. if (end_page_addr > has_page_addr)
  398. end_page_addr = has_page_addr;
  399. ret = binder_update_page_range(alloc, 1, (void __user *)
  400. PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
  401. if (ret)
  402. return ERR_PTR(ret);
  403. if (buffer_size != size) {
  404. struct binder_buffer *new_buffer;
  405. new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  406. if (!new_buffer) {
  407. pr_err("%s: %d failed to alloc new buffer struct\n",
  408. __func__, alloc->pid);
  409. goto err_alloc_buf_struct_failed;
  410. }
  411. new_buffer->user_data = (u8 __user *)buffer->user_data + size;
  412. list_add(&new_buffer->entry, &buffer->entry);
  413. new_buffer->free = 1;
  414. binder_insert_free_buffer(alloc, new_buffer);
  415. }
  416. rb_erase(best_fit, &alloc->free_buffers);
  417. buffer->free = 0;
  418. buffer->allow_user_free = 0;
  419. binder_insert_allocated_buffer_locked(alloc, buffer);
  420. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  421. "%d: binder_alloc_buf size %zd got %pK\n",
  422. alloc->pid, size, buffer);
  423. buffer->data_size = data_size;
  424. buffer->offsets_size = offsets_size;
  425. buffer->async_transaction = is_async;
  426. buffer->extra_buffers_size = extra_buffers_size;
  427. if (is_async) {
  428. alloc->free_async_space -= size + sizeof(struct binder_buffer);
  429. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  430. "%d: binder_alloc_buf size %zd async free %zd\n",
  431. alloc->pid, size, alloc->free_async_space);
  432. }
  433. return buffer;
  434. err_alloc_buf_struct_failed:
  435. binder_update_page_range(alloc, 0, (void __user *)
  436. PAGE_ALIGN((uintptr_t)buffer->user_data),
  437. end_page_addr);
  438. return ERR_PTR(-ENOMEM);
  439. }
  440. /**
  441. * binder_alloc_new_buf() - Allocate a new binder buffer
  442. * @alloc: binder_alloc for this proc
  443. * @data_size: size of user data buffer
  444. * @offsets_size: user specified buffer offset
  445. * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  446. * @is_async: buffer for async transaction
  447. *
  448. * Allocate a new buffer given the requested sizes. Returns
  449. * the kernel version of the buffer pointer. The size allocated
  450. * is the sum of the three given sizes (each rounded up to
  451. * pointer-sized boundary)
  452. *
  453. * Return: The allocated buffer or %NULL if error
  454. */
  455. struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
  456. size_t data_size,
  457. size_t offsets_size,
  458. size_t extra_buffers_size,
  459. int is_async)
  460. {
  461. struct binder_buffer *buffer;
  462. mutex_lock(&alloc->mutex);
  463. buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
  464. extra_buffers_size, is_async);
  465. mutex_unlock(&alloc->mutex);
  466. return buffer;
  467. }
  468. static void __user *buffer_start_page(struct binder_buffer *buffer)
  469. {
  470. return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
  471. }
  472. static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
  473. {
  474. return (void __user *)
  475. (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
  476. }
  477. static void binder_delete_free_buffer(struct binder_alloc *alloc,
  478. struct binder_buffer *buffer)
  479. {
  480. struct binder_buffer *prev, *next = NULL;
  481. bool to_free = true;
  482. BUG_ON(alloc->buffers.next == &buffer->entry);
  483. prev = binder_buffer_prev(buffer);
  484. BUG_ON(!prev->free);
  485. if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
  486. to_free = false;
  487. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  488. "%d: merge free, buffer %pK share page with %pK\n",
  489. alloc->pid, buffer->user_data,
  490. prev->user_data);
  491. }
  492. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  493. next = binder_buffer_next(buffer);
  494. if (buffer_start_page(next) == buffer_start_page(buffer)) {
  495. to_free = false;
  496. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  497. "%d: merge free, buffer %pK share page with %pK\n",
  498. alloc->pid,
  499. buffer->user_data,
  500. next->user_data);
  501. }
  502. }
  503. if (PAGE_ALIGNED(buffer->user_data)) {
  504. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  505. "%d: merge free, buffer start %pK is page aligned\n",
  506. alloc->pid, buffer->user_data);
  507. to_free = false;
  508. }
  509. if (to_free) {
  510. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  511. "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
  512. alloc->pid, buffer->user_data,
  513. prev->user_data,
  514. next ? next->user_data : NULL);
  515. binder_update_page_range(alloc, 0, buffer_start_page(buffer),
  516. buffer_start_page(buffer) + PAGE_SIZE);
  517. }
  518. list_del(&buffer->entry);
  519. kfree(buffer);
  520. }
  521. static void binder_free_buf_locked(struct binder_alloc *alloc,
  522. struct binder_buffer *buffer)
  523. {
  524. size_t size, buffer_size;
  525. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  526. size = ALIGN(buffer->data_size, sizeof(void *)) +
  527. ALIGN(buffer->offsets_size, sizeof(void *)) +
  528. ALIGN(buffer->extra_buffers_size, sizeof(void *));
  529. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  530. "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
  531. alloc->pid, buffer, size, buffer_size);
  532. BUG_ON(buffer->free);
  533. BUG_ON(size > buffer_size);
  534. BUG_ON(buffer->transaction != NULL);
  535. BUG_ON(buffer->user_data < alloc->buffer);
  536. BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
  537. if (buffer->async_transaction) {
  538. alloc->free_async_space += size + sizeof(struct binder_buffer);
  539. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  540. "%d: binder_free_buf size %zd async free %zd\n",
  541. alloc->pid, size, alloc->free_async_space);
  542. }
  543. binder_update_page_range(alloc, 0,
  544. (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
  545. (void __user *)(((uintptr_t)
  546. buffer->user_data + buffer_size) & PAGE_MASK));
  547. rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
  548. buffer->free = 1;
  549. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  550. struct binder_buffer *next = binder_buffer_next(buffer);
  551. if (next->free) {
  552. rb_erase(&next->rb_node, &alloc->free_buffers);
  553. binder_delete_free_buffer(alloc, next);
  554. }
  555. }
  556. if (alloc->buffers.next != &buffer->entry) {
  557. struct binder_buffer *prev = binder_buffer_prev(buffer);
  558. if (prev->free) {
  559. binder_delete_free_buffer(alloc, buffer);
  560. rb_erase(&prev->rb_node, &alloc->free_buffers);
  561. buffer = prev;
  562. }
  563. }
  564. binder_insert_free_buffer(alloc, buffer);
  565. }
  566. /**
  567. * binder_alloc_free_buf() - free a binder buffer
  568. * @alloc: binder_alloc for this proc
  569. * @buffer: kernel pointer to buffer
  570. *
  571. * Free the buffer allocated via binder_alloc_new_buffer()
  572. */
  573. void binder_alloc_free_buf(struct binder_alloc *alloc,
  574. struct binder_buffer *buffer)
  575. {
  576. mutex_lock(&alloc->mutex);
  577. binder_free_buf_locked(alloc, buffer);
  578. mutex_unlock(&alloc->mutex);
  579. }
  580. /**
  581. * binder_alloc_mmap_handler() - map virtual address space for proc
  582. * @alloc: alloc structure for this proc
  583. * @vma: vma passed to mmap()
  584. *
  585. * Called by binder_mmap() to initialize the space specified in
  586. * vma for allocating binder buffers
  587. *
  588. * Return:
  589. * 0 = success
  590. * -EBUSY = address space already mapped
  591. * -ENOMEM = failed to map memory to given address space
  592. */
  593. int binder_alloc_mmap_handler(struct binder_alloc *alloc,
  594. struct vm_area_struct *vma)
  595. {
  596. int ret;
  597. const char *failure_string;
  598. struct binder_buffer *buffer;
  599. mutex_lock(&binder_alloc_mmap_lock);
  600. if (alloc->buffer) {
  601. ret = -EBUSY;
  602. failure_string = "already mapped";
  603. goto err_already_mapped;
  604. }
  605. alloc->buffer = (void __user *)vma->vm_start;
  606. mutex_unlock(&binder_alloc_mmap_lock);
  607. alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
  608. sizeof(alloc->pages[0]),
  609. GFP_KERNEL);
  610. if (alloc->pages == NULL) {
  611. ret = -ENOMEM;
  612. failure_string = "alloc page array";
  613. goto err_alloc_pages_failed;
  614. }
  615. alloc->buffer_size = vma->vm_end - vma->vm_start;
  616. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  617. if (!buffer) {
  618. ret = -ENOMEM;
  619. failure_string = "alloc buffer struct";
  620. goto err_alloc_buf_struct_failed;
  621. }
  622. buffer->user_data = alloc->buffer;
  623. list_add(&buffer->entry, &alloc->buffers);
  624. buffer->free = 1;
  625. binder_insert_free_buffer(alloc, buffer);
  626. alloc->free_async_space = alloc->buffer_size / 2;
  627. binder_alloc_set_vma(alloc, vma);
  628. mmgrab(alloc->vma_vm_mm);
  629. return 0;
  630. err_alloc_buf_struct_failed:
  631. kfree(alloc->pages);
  632. alloc->pages = NULL;
  633. err_alloc_pages_failed:
  634. mutex_lock(&binder_alloc_mmap_lock);
  635. alloc->buffer = NULL;
  636. err_already_mapped:
  637. mutex_unlock(&binder_alloc_mmap_lock);
  638. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  639. "%s: %d %lx-%lx %s failed %d\n", __func__,
  640. alloc->pid, vma->vm_start, vma->vm_end,
  641. failure_string, ret);
  642. return ret;
  643. }
  644. void binder_alloc_deferred_release(struct binder_alloc *alloc)
  645. {
  646. struct rb_node *n;
  647. int buffers, page_count;
  648. struct binder_buffer *buffer;
  649. buffers = 0;
  650. mutex_lock(&alloc->mutex);
  651. BUG_ON(alloc->vma);
  652. while ((n = rb_first(&alloc->allocated_buffers))) {
  653. buffer = rb_entry(n, struct binder_buffer, rb_node);
  654. /* Transaction should already have been freed */
  655. BUG_ON(buffer->transaction);
  656. binder_free_buf_locked(alloc, buffer);
  657. buffers++;
  658. }
  659. while (!list_empty(&alloc->buffers)) {
  660. buffer = list_first_entry(&alloc->buffers,
  661. struct binder_buffer, entry);
  662. WARN_ON(!buffer->free);
  663. list_del(&buffer->entry);
  664. WARN_ON_ONCE(!list_empty(&alloc->buffers));
  665. kfree(buffer);
  666. }
  667. page_count = 0;
  668. if (alloc->pages) {
  669. int i;
  670. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  671. void __user *page_addr;
  672. bool on_lru;
  673. if (!alloc->pages[i].page_ptr)
  674. continue;
  675. on_lru = list_lru_del(&binder_alloc_lru,
  676. &alloc->pages[i].lru);
  677. page_addr = alloc->buffer + i * PAGE_SIZE;
  678. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  679. "%s: %d: page %d at %pK %s\n",
  680. __func__, alloc->pid, i, page_addr,
  681. on_lru ? "on lru" : "active");
  682. __free_page(alloc->pages[i].page_ptr);
  683. page_count++;
  684. }
  685. kfree(alloc->pages);
  686. }
  687. mutex_unlock(&alloc->mutex);
  688. if (alloc->vma_vm_mm)
  689. mmdrop(alloc->vma_vm_mm);
  690. binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
  691. "%s: %d buffers %d, pages %d\n",
  692. __func__, alloc->pid, buffers, page_count);
  693. }
  694. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  695. struct binder_buffer *buffer)
  696. {
  697. seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
  698. prefix, buffer->debug_id, buffer->user_data,
  699. buffer->data_size, buffer->offsets_size,
  700. buffer->extra_buffers_size,
  701. buffer->transaction ? "active" : "delivered");
  702. }
  703. /**
  704. * binder_alloc_print_allocated() - print buffer info
  705. * @m: seq_file for output via seq_printf()
  706. * @alloc: binder_alloc for this proc
  707. *
  708. * Prints information about every buffer associated with
  709. * the binder_alloc state to the given seq_file
  710. */
  711. void binder_alloc_print_allocated(struct seq_file *m,
  712. struct binder_alloc *alloc)
  713. {
  714. struct rb_node *n;
  715. mutex_lock(&alloc->mutex);
  716. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  717. print_binder_buffer(m, " buffer",
  718. rb_entry(n, struct binder_buffer, rb_node));
  719. mutex_unlock(&alloc->mutex);
  720. }
  721. /**
  722. * binder_alloc_print_pages() - print page usage
  723. * @m: seq_file for output via seq_printf()
  724. * @alloc: binder_alloc for this proc
  725. */
  726. void binder_alloc_print_pages(struct seq_file *m,
  727. struct binder_alloc *alloc)
  728. {
  729. struct binder_lru_page *page;
  730. int i;
  731. int active = 0;
  732. int lru = 0;
  733. int free = 0;
  734. mutex_lock(&alloc->mutex);
  735. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  736. page = &alloc->pages[i];
  737. if (!page->page_ptr)
  738. free++;
  739. else if (list_empty(&page->lru))
  740. active++;
  741. else
  742. lru++;
  743. }
  744. mutex_unlock(&alloc->mutex);
  745. seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
  746. seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
  747. }
  748. /**
  749. * binder_alloc_get_allocated_count() - return count of buffers
  750. * @alloc: binder_alloc for this proc
  751. *
  752. * Return: count of allocated buffers
  753. */
  754. int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  755. {
  756. struct rb_node *n;
  757. int count = 0;
  758. mutex_lock(&alloc->mutex);
  759. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  760. count++;
  761. mutex_unlock(&alloc->mutex);
  762. return count;
  763. }
  764. /**
  765. * binder_alloc_vma_close() - invalidate address space
  766. * @alloc: binder_alloc for this proc
  767. *
  768. * Called from binder_vma_close() when releasing address space.
  769. * Clears alloc->vma to prevent new incoming transactions from
  770. * allocating more buffers.
  771. */
  772. void binder_alloc_vma_close(struct binder_alloc *alloc)
  773. {
  774. binder_alloc_set_vma(alloc, NULL);
  775. }
  776. /**
  777. * binder_alloc_free_page() - shrinker callback to free pages
  778. * @item: item to free
  779. * @lock: lock protecting the item
  780. * @cb_arg: callback argument
  781. *
  782. * Called from list_lru_walk() in binder_shrink_scan() to free
  783. * up pages when the system is under memory pressure.
  784. */
  785. enum lru_status binder_alloc_free_page(struct list_head *item,
  786. struct list_lru_one *lru,
  787. spinlock_t *lock,
  788. void *cb_arg)
  789. {
  790. struct mm_struct *mm = NULL;
  791. struct binder_lru_page *page = container_of(item,
  792. struct binder_lru_page,
  793. lru);
  794. struct binder_alloc *alloc;
  795. uintptr_t page_addr;
  796. size_t index;
  797. struct vm_area_struct *vma;
  798. alloc = page->alloc;
  799. if (!mutex_trylock(&alloc->mutex))
  800. goto err_get_alloc_mutex_failed;
  801. if (!page->page_ptr)
  802. goto err_page_already_freed;
  803. index = page - alloc->pages;
  804. page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
  805. mm = alloc->vma_vm_mm;
  806. if (!mmget_not_zero(mm))
  807. goto err_mmget;
  808. if (!down_write_trylock(&mm->mmap_sem))
  809. goto err_down_write_mmap_sem_failed;
  810. vma = binder_alloc_get_vma(alloc);
  811. list_lru_isolate(lru, item);
  812. spin_unlock(lock);
  813. if (vma) {
  814. trace_binder_unmap_user_start(alloc, index);
  815. zap_page_range(vma, page_addr, PAGE_SIZE);
  816. trace_binder_unmap_user_end(alloc, index);
  817. }
  818. up_write(&mm->mmap_sem);
  819. trace_binder_unmap_kernel_start(alloc, index);
  820. __free_page(page->page_ptr);
  821. page->page_ptr = NULL;
  822. trace_binder_unmap_kernel_end(alloc, index);
  823. spin_lock(lock);
  824. mutex_unlock(&alloc->mutex);
  825. return LRU_REMOVED_RETRY;
  826. err_down_write_mmap_sem_failed:
  827. err_mmget:
  828. err_page_already_freed:
  829. mutex_unlock(&alloc->mutex);
  830. err_get_alloc_mutex_failed:
  831. return LRU_SKIP;
  832. }
  833. static unsigned long
  834. binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  835. {
  836. unsigned long ret = list_lru_count(&binder_alloc_lru);
  837. return ret;
  838. }
  839. static unsigned long
  840. binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  841. {
  842. unsigned long ret;
  843. ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
  844. NULL, sc->nr_to_scan);
  845. return ret;
  846. }
  847. static struct shrinker binder_shrinker = {
  848. .count_objects = binder_shrink_count,
  849. .scan_objects = binder_shrink_scan,
  850. .seeks = DEFAULT_SEEKS,
  851. };
  852. /**
  853. * binder_alloc_init() - called by binder_open() for per-proc initialization
  854. * @alloc: binder_alloc for this proc
  855. *
  856. * Called from binder_open() to initialize binder_alloc fields for
  857. * new binder proc
  858. */
  859. void binder_alloc_init(struct binder_alloc *alloc)
  860. {
  861. alloc->pid = current->group_leader->pid;
  862. mutex_init(&alloc->mutex);
  863. INIT_LIST_HEAD(&alloc->buffers);
  864. }
  865. int binder_alloc_shrinker_init(void)
  866. {
  867. int ret = list_lru_init(&binder_alloc_lru);
  868. if (ret == 0) {
  869. ret = register_shrinker(&binder_shrinker);
  870. if (ret)
  871. list_lru_destroy(&binder_alloc_lru);
  872. }
  873. return ret;
  874. }
  875. /**
  876. * check_buffer() - verify that buffer/offset is safe to access
  877. * @alloc: binder_alloc for this proc
  878. * @buffer: binder buffer to be accessed
  879. * @offset: offset into @buffer data
  880. * @bytes: bytes to access from offset
  881. *
  882. * Check that the @offset/@bytes are within the size of the given
  883. * @buffer and that the buffer is currently active and not freeable.
  884. * Offsets must also be multiples of sizeof(u32). The kernel is
  885. * allowed to touch the buffer in two cases:
  886. *
  887. * 1) when the buffer is being created:
  888. * (buffer->free == 0 && buffer->allow_user_free == 0)
  889. * 2) when the buffer is being torn down:
  890. * (buffer->free == 0 && buffer->transaction == NULL).
  891. *
  892. * Return: true if the buffer is safe to access
  893. */
  894. static inline bool check_buffer(struct binder_alloc *alloc,
  895. struct binder_buffer *buffer,
  896. binder_size_t offset, size_t bytes)
  897. {
  898. size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
  899. return buffer_size >= bytes &&
  900. offset <= buffer_size - bytes &&
  901. IS_ALIGNED(offset, sizeof(u32)) &&
  902. !buffer->free &&
  903. (!buffer->allow_user_free || !buffer->transaction);
  904. }
  905. /**
  906. * binder_alloc_get_page() - get kernel pointer for given buffer offset
  907. * @alloc: binder_alloc for this proc
  908. * @buffer: binder buffer to be accessed
  909. * @buffer_offset: offset into @buffer data
  910. * @pgoffp: address to copy final page offset to
  911. *
  912. * Lookup the struct page corresponding to the address
  913. * at @buffer_offset into @buffer->user_data. If @pgoffp is not
  914. * NULL, the byte-offset into the page is written there.
  915. *
  916. * The caller is responsible to ensure that the offset points
  917. * to a valid address within the @buffer and that @buffer is
  918. * not freeable by the user. Since it can't be freed, we are
  919. * guaranteed that the corresponding elements of @alloc->pages[]
  920. * cannot change.
  921. *
  922. * Return: struct page
  923. */
  924. static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
  925. struct binder_buffer *buffer,
  926. binder_size_t buffer_offset,
  927. pgoff_t *pgoffp)
  928. {
  929. binder_size_t buffer_space_offset = buffer_offset +
  930. (buffer->user_data - alloc->buffer);
  931. pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
  932. size_t index = buffer_space_offset >> PAGE_SHIFT;
  933. struct binder_lru_page *lru_page;
  934. lru_page = &alloc->pages[index];
  935. *pgoffp = pgoff;
  936. return lru_page->page_ptr;
  937. }
  938. /**
  939. * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
  940. * @alloc: binder_alloc for this proc
  941. * @buffer: binder buffer to be accessed
  942. * @buffer_offset: offset into @buffer data
  943. * @from: userspace pointer to source buffer
  944. * @bytes: bytes to copy
  945. *
  946. * Copy bytes from source userspace to target buffer.
  947. *
  948. * Return: bytes remaining to be copied
  949. */
  950. unsigned long
  951. binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
  952. struct binder_buffer *buffer,
  953. binder_size_t buffer_offset,
  954. const void __user *from,
  955. size_t bytes)
  956. {
  957. if (!check_buffer(alloc, buffer, buffer_offset, bytes))
  958. return bytes;
  959. while (bytes) {
  960. unsigned long size;
  961. unsigned long ret;
  962. struct page *page;
  963. pgoff_t pgoff;
  964. void *kptr;
  965. page = binder_alloc_get_page(alloc, buffer,
  966. buffer_offset, &pgoff);
  967. size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
  968. kptr = kmap(page) + pgoff;
  969. ret = copy_from_user(kptr, from, size);
  970. kunmap(page);
  971. if (ret)
  972. return bytes - size + ret;
  973. bytes -= size;
  974. from += size;
  975. buffer_offset += size;
  976. }
  977. return 0;
  978. }
  979. static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
  980. bool to_buffer,
  981. struct binder_buffer *buffer,
  982. binder_size_t buffer_offset,
  983. void *ptr,
  984. size_t bytes)
  985. {
  986. /* All copies must be 32-bit aligned and 32-bit size */
  987. BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
  988. while (bytes) {
  989. unsigned long size;
  990. struct page *page;
  991. pgoff_t pgoff;
  992. void *tmpptr;
  993. void *base_ptr;
  994. page = binder_alloc_get_page(alloc, buffer,
  995. buffer_offset, &pgoff);
  996. size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
  997. base_ptr = kmap_atomic(page);
  998. tmpptr = base_ptr + pgoff;
  999. if (to_buffer)
  1000. memcpy(tmpptr, ptr, size);
  1001. else
  1002. memcpy(ptr, tmpptr, size);
  1003. /*
  1004. * kunmap_atomic() takes care of flushing the cache
  1005. * if this device has VIVT cache arch
  1006. */
  1007. kunmap_atomic(base_ptr);
  1008. bytes -= size;
  1009. pgoff = 0;
  1010. ptr = ptr + size;
  1011. buffer_offset += size;
  1012. }
  1013. }
  1014. void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
  1015. struct binder_buffer *buffer,
  1016. binder_size_t buffer_offset,
  1017. void *src,
  1018. size_t bytes)
  1019. {
  1020. binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
  1021. src, bytes);
  1022. }
  1023. void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
  1024. void *dest,
  1025. struct binder_buffer *buffer,
  1026. binder_size_t buffer_offset,
  1027. size_t bytes)
  1028. {
  1029. binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
  1030. dest, bytes);
  1031. }