binder_alloc.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321
  1. /* binder_alloc.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2017 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <linux/list.h>
  19. #include <linux/mm.h>
  20. #include <linux/module.h>
  21. #include <linux/rtmutex.h>
  22. #include <linux/rbtree.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/slab.h>
  26. #include <linux/sched.h>
  27. #include <linux/list_lru.h>
  28. #include <linux/ratelimit.h>
  29. #include <asm/cacheflush.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/highmem.h>
  32. #include <linux/sizes.h>
  33. #include "binder_alloc.h"
  34. #include "binder_trace.h"
  35. struct list_lru binder_alloc_lru;
  36. static DEFINE_MUTEX(binder_alloc_mmap_lock);
  37. enum {
  38. BINDER_DEBUG_USER_ERROR = 1U << 0,
  39. BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
  40. BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
  41. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
  42. };
  43. static uint32_t binder_alloc_debug_mask = 0;
  44. module_param_named(debug_mask, binder_alloc_debug_mask,
  45. uint, 0644);
  46. #define binder_alloc_debug(mask, x...) \
  47. do { \
  48. if (binder_alloc_debug_mask & mask) \
  49. pr_info_ratelimited(x); \
  50. } while (0)
  51. static struct kmem_cache *binder_buffer_pool;
  52. int binder_buffer_pool_create(void)
  53. {
  54. binder_buffer_pool = KMEM_CACHE(binder_buffer, SLAB_HWCACHE_ALIGN);
  55. if (!binder_buffer_pool)
  56. return -ENOMEM;
  57. return 0;
  58. }
  59. void binder_buffer_pool_destroy(void)
  60. {
  61. kmem_cache_destroy(binder_buffer_pool);
  62. }
  63. static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
  64. {
  65. return list_entry(buffer->entry.next, struct binder_buffer, entry);
  66. }
  67. static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
  68. {
  69. return list_entry(buffer->entry.prev, struct binder_buffer, entry);
  70. }
  71. static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  72. struct binder_buffer *buffer)
  73. {
  74. if (list_is_last(&buffer->entry, &alloc->buffers))
  75. return alloc->buffer + alloc->buffer_size - buffer->user_data;
  76. return binder_buffer_next(buffer)->user_data - buffer->user_data;
  77. }
  78. static void binder_insert_free_buffer(struct binder_alloc *alloc,
  79. struct binder_buffer *new_buffer)
  80. {
  81. struct rb_node **p = &alloc->free_buffers.rb_node;
  82. struct rb_node *parent = NULL;
  83. struct binder_buffer *buffer;
  84. size_t buffer_size;
  85. size_t new_buffer_size;
  86. BUG_ON(!new_buffer->free);
  87. new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  88. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  89. "%d: add free buffer, size %zd, at %pK\n",
  90. alloc->pid, new_buffer_size, new_buffer);
  91. while (*p) {
  92. parent = *p;
  93. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  94. BUG_ON(!buffer->free);
  95. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  96. if (new_buffer_size < buffer_size)
  97. p = &parent->rb_left;
  98. else
  99. p = &parent->rb_right;
  100. }
  101. rb_link_node(&new_buffer->rb_node, parent, p);
  102. rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
  103. }
  104. static void binder_insert_allocated_buffer_locked(
  105. struct binder_alloc *alloc, struct binder_buffer *new_buffer)
  106. {
  107. struct rb_node **p = &alloc->allocated_buffers.rb_node;
  108. struct rb_node *parent = NULL;
  109. struct binder_buffer *buffer;
  110. BUG_ON(new_buffer->free);
  111. while (*p) {
  112. parent = *p;
  113. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  114. BUG_ON(buffer->free);
  115. if (new_buffer->user_data < buffer->user_data)
  116. p = &parent->rb_left;
  117. else if (new_buffer->user_data > buffer->user_data)
  118. p = &parent->rb_right;
  119. else
  120. BUG();
  121. }
  122. rb_link_node(&new_buffer->rb_node, parent, p);
  123. rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
  124. }
  125. static struct binder_buffer *binder_alloc_prepare_to_free_locked(
  126. struct binder_alloc *alloc,
  127. uintptr_t user_ptr)
  128. {
  129. struct rb_node *n = alloc->allocated_buffers.rb_node;
  130. struct binder_buffer *buffer;
  131. void __user *uptr;
  132. uptr = (void __user *)user_ptr;
  133. while (n) {
  134. buffer = rb_entry(n, struct binder_buffer, rb_node);
  135. BUG_ON(buffer->free);
  136. if (uptr < buffer->user_data)
  137. n = n->rb_left;
  138. else if (uptr > buffer->user_data)
  139. n = n->rb_right;
  140. else {
  141. /*
  142. * Guard against user threads attempting to
  143. * free the buffer when in use by kernel or
  144. * after it's already been freed.
  145. */
  146. if (!buffer->allow_user_free)
  147. return ERR_PTR(-EPERM);
  148. buffer->allow_user_free = 0;
  149. return buffer;
  150. }
  151. }
  152. return NULL;
  153. }
  154. /**
  155. * binder_alloc_prepare_to_free() - get buffer given user ptr
  156. * @alloc: binder_alloc for this proc
  157. * @user_ptr: User pointer to buffer data
  158. *
  159. * Validate userspace pointer to buffer data and return buffer corresponding to
  160. * that user pointer. Search the rb tree for buffer that matches user data
  161. * pointer.
  162. *
  163. * Return: Pointer to buffer or NULL
  164. */
  165. struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
  166. uintptr_t user_ptr)
  167. {
  168. struct binder_buffer *buffer;
  169. mutex_lock(&alloc->mutex);
  170. buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
  171. mutex_unlock(&alloc->mutex);
  172. return buffer;
  173. }
  174. static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
  175. void __user *start, void __user *end)
  176. {
  177. void __user *page_addr;
  178. unsigned long user_page_addr;
  179. struct binder_lru_page *page;
  180. struct vm_area_struct *vma = NULL;
  181. struct mm_struct *mm = NULL;
  182. bool need_mm = false;
  183. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  184. "%d: %s pages %pK-%pK\n", alloc->pid,
  185. allocate ? "allocate" : "free", start, end);
  186. if (end <= start)
  187. return 0;
  188. trace_binder_update_page_range(alloc, allocate, start, end);
  189. if (allocate == 0)
  190. goto free_range;
  191. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  192. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  193. if (!page->page_ptr) {
  194. need_mm = true;
  195. break;
  196. }
  197. }
  198. /* Same as mmget_not_zero() in later kernel versions */
  199. if (need_mm && atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
  200. mm = alloc->vma_vm_mm;
  201. if (mm) {
  202. down_read(&mm->mmap_sem);
  203. vma = alloc->vma;
  204. }
  205. if (!vma && need_mm) {
  206. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  207. "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
  208. alloc->pid);
  209. goto err_no_vma;
  210. }
  211. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  212. int ret;
  213. bool on_lru;
  214. size_t index;
  215. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  216. page = &alloc->pages[index];
  217. if (page->page_ptr) {
  218. trace_binder_alloc_lru_start(alloc, index);
  219. on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
  220. WARN_ON(!on_lru);
  221. trace_binder_alloc_lru_end(alloc, index);
  222. continue;
  223. }
  224. if (WARN_ON(!vma))
  225. goto err_page_ptr_cleared;
  226. trace_binder_alloc_page_start(alloc, index);
  227. page->page_ptr = alloc_page(GFP_KERNEL |
  228. __GFP_HIGHMEM |
  229. __GFP_ZERO);
  230. if (!page->page_ptr) {
  231. pr_err("%d: binder_alloc_buf failed for page at %pK\n",
  232. alloc->pid, page_addr);
  233. goto err_alloc_page_failed;
  234. }
  235. page->alloc = alloc;
  236. INIT_LIST_HEAD(&page->lru);
  237. user_page_addr = (uintptr_t)page_addr;
  238. ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
  239. if (ret) {
  240. pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  241. alloc->pid, user_page_addr);
  242. goto err_vm_insert_page_failed;
  243. }
  244. if (index + 1 > alloc->pages_high)
  245. alloc->pages_high = index + 1;
  246. trace_binder_alloc_page_end(alloc, index);
  247. /* vm_insert_page does not seem to increment the refcount */
  248. }
  249. if (mm) {
  250. up_read(&mm->mmap_sem);
  251. mmput(mm);
  252. }
  253. return 0;
  254. free_range:
  255. for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
  256. bool ret;
  257. size_t index;
  258. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  259. page = &alloc->pages[index];
  260. trace_binder_free_lru_start(alloc, index);
  261. ret = list_lru_add(&binder_alloc_lru, &page->lru);
  262. WARN_ON(!ret);
  263. trace_binder_free_lru_end(alloc, index);
  264. if (page_addr == start)
  265. break;
  266. continue;
  267. err_vm_insert_page_failed:
  268. __free_page(page->page_ptr);
  269. page->page_ptr = NULL;
  270. err_alloc_page_failed:
  271. err_page_ptr_cleared:
  272. if (page_addr == start)
  273. break;
  274. }
  275. err_no_vma:
  276. if (mm) {
  277. up_read(&mm->mmap_sem);
  278. mmput(mm);
  279. }
  280. return vma ? -ENOMEM : -ESRCH;
  281. }
  282. static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
  283. struct vm_area_struct *vma)
  284. {
  285. if (vma)
  286. alloc->vma_vm_mm = vma->vm_mm;
  287. /*
  288. * If we see alloc->vma is not NULL, buffer data structures set up
  289. * completely. Look at smp_rmb side binder_alloc_get_vma.
  290. * We also want to guarantee new alloc->vma_vm_mm is always visible
  291. * if alloc->vma is set.
  292. */
  293. smp_wmb();
  294. alloc->vma = vma;
  295. }
  296. static inline struct vm_area_struct *binder_alloc_get_vma(
  297. struct binder_alloc *alloc)
  298. {
  299. struct vm_area_struct *vma = NULL;
  300. if (alloc->vma) {
  301. /* Look at description in binder_alloc_set_vma */
  302. smp_rmb();
  303. vma = alloc->vma;
  304. }
  305. return vma;
  306. }
  307. static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
  308. {
  309. /*
  310. * Find the amount and size of buffers allocated by the current caller;
  311. * The idea is that once we cross the threshold, whoever is responsible
  312. * for the low async space is likely to try to send another async txn,
  313. * and at some point we'll catch them in the act. This is more efficient
  314. * than keeping a map per pid.
  315. */
  316. struct rb_node *n = alloc->free_buffers.rb_node;
  317. struct binder_buffer *buffer;
  318. size_t total_alloc_size = 0;
  319. size_t num_buffers = 0;
  320. for (n = rb_first(&alloc->allocated_buffers); n != NULL;
  321. n = rb_next(n)) {
  322. buffer = rb_entry(n, struct binder_buffer, rb_node);
  323. if (buffer->pid != pid)
  324. continue;
  325. if (!buffer->async_transaction)
  326. continue;
  327. total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
  328. + sizeof(struct binder_buffer);
  329. num_buffers++;
  330. }
  331. /*
  332. * Warn if this pid has more than 50 transactions, or more than 50% of
  333. * async space (which is 25% of total buffer size). Oneway spam is only
  334. * detected when the threshold is exceeded.
  335. */
  336. if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
  337. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  338. "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
  339. alloc->pid, pid, num_buffers, total_alloc_size);
  340. if (!alloc->oneway_spam_detected) {
  341. alloc->oneway_spam_detected = true;
  342. return true;
  343. }
  344. }
  345. return false;
  346. }
  347. static struct binder_buffer *binder_alloc_new_buf_locked(
  348. struct binder_alloc *alloc,
  349. size_t data_size,
  350. size_t offsets_size,
  351. size_t extra_buffers_size,
  352. int is_async,
  353. int pid)
  354. {
  355. struct rb_node *n = alloc->free_buffers.rb_node;
  356. struct binder_buffer *buffer;
  357. size_t buffer_size;
  358. struct rb_node *best_fit = NULL;
  359. void __user *has_page_addr;
  360. void __user *end_page_addr;
  361. size_t size, data_offsets_size;
  362. int ret;
  363. if (!binder_alloc_get_vma(alloc)) {
  364. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  365. "%d: binder_alloc_buf, no vma\n",
  366. alloc->pid);
  367. return ERR_PTR(-ESRCH);
  368. }
  369. data_offsets_size = ALIGN(data_size, sizeof(void *)) +
  370. ALIGN(offsets_size, sizeof(void *));
  371. if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
  372. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  373. "%d: got transaction with invalid size %zd-%zd\n",
  374. alloc->pid, data_size, offsets_size);
  375. return ERR_PTR(-EINVAL);
  376. }
  377. size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
  378. if (size < data_offsets_size || size < extra_buffers_size) {
  379. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  380. "%d: got transaction with invalid extra_buffers_size %zd\n",
  381. alloc->pid, extra_buffers_size);
  382. return ERR_PTR(-EINVAL);
  383. }
  384. if (is_async &&
  385. alloc->free_async_space < size + sizeof(struct binder_buffer)) {
  386. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  387. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  388. alloc->pid, size);
  389. return ERR_PTR(-ENOSPC);
  390. }
  391. /* Pad 0-size buffers so they get assigned unique addresses */
  392. size = max(size, sizeof(void *));
  393. while (n) {
  394. buffer = rb_entry(n, struct binder_buffer, rb_node);
  395. BUG_ON(!buffer->free);
  396. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  397. if (size < buffer_size) {
  398. best_fit = n;
  399. n = n->rb_left;
  400. } else if (size > buffer_size)
  401. n = n->rb_right;
  402. else {
  403. best_fit = n;
  404. break;
  405. }
  406. }
  407. if (best_fit == NULL) {
  408. size_t allocated_buffers = 0;
  409. size_t largest_alloc_size = 0;
  410. size_t total_alloc_size = 0;
  411. size_t free_buffers = 0;
  412. size_t largest_free_size = 0;
  413. size_t total_free_size = 0;
  414. for (n = rb_first(&alloc->allocated_buffers); n != NULL;
  415. n = rb_next(n)) {
  416. buffer = rb_entry(n, struct binder_buffer, rb_node);
  417. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  418. allocated_buffers++;
  419. total_alloc_size += buffer_size;
  420. if (buffer_size > largest_alloc_size)
  421. largest_alloc_size = buffer_size;
  422. }
  423. for (n = rb_first(&alloc->free_buffers); n != NULL;
  424. n = rb_next(n)) {
  425. buffer = rb_entry(n, struct binder_buffer, rb_node);
  426. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  427. free_buffers++;
  428. total_free_size += buffer_size;
  429. if (buffer_size > largest_free_size)
  430. largest_free_size = buffer_size;
  431. }
  432. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  433. "%d: binder_alloc_buf size %zd failed, no address space\n",
  434. alloc->pid, size);
  435. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  436. "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
  437. total_alloc_size, allocated_buffers,
  438. largest_alloc_size, total_free_size,
  439. free_buffers, largest_free_size);
  440. return ERR_PTR(-ENOSPC);
  441. }
  442. if (n == NULL) {
  443. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  444. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  445. }
  446. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  447. "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
  448. alloc->pid, size, buffer, buffer_size);
  449. has_page_addr = (void __user *)
  450. (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
  451. WARN_ON(n && buffer_size != size);
  452. end_page_addr =
  453. (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
  454. if (end_page_addr > has_page_addr)
  455. end_page_addr = has_page_addr;
  456. ret = binder_update_page_range(alloc, 1, (void __user *)
  457. PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
  458. if (ret)
  459. return ERR_PTR(ret);
  460. if (buffer_size != size) {
  461. struct binder_buffer *new_buffer;
  462. new_buffer = kmem_cache_zalloc(binder_buffer_pool, GFP_KERNEL);
  463. if (!new_buffer) {
  464. pr_err("%s: %d failed to alloc new buffer struct\n",
  465. __func__, alloc->pid);
  466. goto err_alloc_buf_struct_failed;
  467. }
  468. new_buffer->user_data = (u8 __user *)buffer->user_data + size;
  469. list_add(&new_buffer->entry, &buffer->entry);
  470. new_buffer->free = 1;
  471. binder_insert_free_buffer(alloc, new_buffer);
  472. }
  473. rb_erase(best_fit, &alloc->free_buffers);
  474. buffer->free = 0;
  475. buffer->allow_user_free = 0;
  476. binder_insert_allocated_buffer_locked(alloc, buffer);
  477. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  478. "%d: binder_alloc_buf size %zd got %pK\n",
  479. alloc->pid, size, buffer);
  480. buffer->data_size = data_size;
  481. buffer->offsets_size = offsets_size;
  482. buffer->async_transaction = is_async;
  483. buffer->extra_buffers_size = extra_buffers_size;
  484. buffer->pid = pid;
  485. buffer->oneway_spam_suspect = false;
  486. if (is_async) {
  487. alloc->free_async_space -= size + sizeof(struct binder_buffer);
  488. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  489. "%d: binder_alloc_buf size %zd async free %zd\n",
  490. alloc->pid, size, alloc->free_async_space);
  491. if (alloc->free_async_space < alloc->buffer_size / 10) {
  492. /*
  493. * Start detecting spammers once we have less than 20%
  494. * of async space left (which is less than 10% of total
  495. * buffer size).
  496. */
  497. buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
  498. } else {
  499. alloc->oneway_spam_detected = false;
  500. }
  501. }
  502. return buffer;
  503. err_alloc_buf_struct_failed:
  504. binder_update_page_range(alloc, 0, (void __user *)
  505. PAGE_ALIGN((uintptr_t)buffer->user_data),
  506. end_page_addr);
  507. return ERR_PTR(-ENOMEM);
  508. }
  509. /**
  510. * binder_alloc_new_buf() - Allocate a new binder buffer
  511. * @alloc: binder_alloc for this proc
  512. * @data_size: size of user data buffer
  513. * @offsets_size: user specified buffer offset
  514. * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  515. * @is_async: buffer for async transaction
  516. * @pid: pid to attribute allocation to (used for debugging)
  517. *
  518. * Allocate a new buffer given the requested sizes. Returns
  519. * the kernel version of the buffer pointer. The size allocated
  520. * is the sum of the three given sizes (each rounded up to
  521. * pointer-sized boundary)
  522. *
  523. * Return: The allocated buffer or %NULL if error
  524. */
  525. struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
  526. size_t data_size,
  527. size_t offsets_size,
  528. size_t extra_buffers_size,
  529. int is_async,
  530. int pid)
  531. {
  532. struct binder_buffer *buffer;
  533. mutex_lock(&alloc->mutex);
  534. buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
  535. extra_buffers_size, is_async, pid);
  536. mutex_unlock(&alloc->mutex);
  537. return buffer;
  538. }
  539. static void __user *buffer_start_page(struct binder_buffer *buffer)
  540. {
  541. return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
  542. }
  543. static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
  544. {
  545. return (void __user *)
  546. (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
  547. }
  548. static void binder_delete_free_buffer(struct binder_alloc *alloc,
  549. struct binder_buffer *buffer)
  550. {
  551. struct binder_buffer *prev, *next = NULL;
  552. bool to_free = true;
  553. BUG_ON(alloc->buffers.next == &buffer->entry);
  554. prev = binder_buffer_prev(buffer);
  555. BUG_ON(!prev->free);
  556. if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
  557. to_free = false;
  558. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  559. "%d: merge free, buffer %pK share page with %pK\n",
  560. alloc->pid, buffer->user_data,
  561. prev->user_data);
  562. }
  563. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  564. next = binder_buffer_next(buffer);
  565. if (buffer_start_page(next) == buffer_start_page(buffer)) {
  566. to_free = false;
  567. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  568. "%d: merge free, buffer %pK share page with %pK\n",
  569. alloc->pid,
  570. buffer->user_data,
  571. next->user_data);
  572. }
  573. }
  574. if (PAGE_ALIGNED(buffer->user_data)) {
  575. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  576. "%d: merge free, buffer start %pK is page aligned\n",
  577. alloc->pid, buffer->user_data);
  578. to_free = false;
  579. }
  580. if (to_free) {
  581. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  582. "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
  583. alloc->pid, buffer->user_data,
  584. prev->user_data,
  585. next ? next->user_data : NULL);
  586. binder_update_page_range(alloc, 0, buffer_start_page(buffer),
  587. buffer_start_page(buffer) + PAGE_SIZE);
  588. }
  589. list_del(&buffer->entry);
  590. kmem_cache_free(binder_buffer_pool, buffer);
  591. }
  592. static void binder_free_buf_locked(struct binder_alloc *alloc,
  593. struct binder_buffer *buffer)
  594. {
  595. size_t size, buffer_size;
  596. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  597. size = ALIGN(buffer->data_size, sizeof(void *)) +
  598. ALIGN(buffer->offsets_size, sizeof(void *)) +
  599. ALIGN(buffer->extra_buffers_size, sizeof(void *));
  600. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  601. "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
  602. alloc->pid, buffer, size, buffer_size);
  603. BUG_ON(buffer->free);
  604. BUG_ON(size > buffer_size);
  605. BUG_ON(buffer->transaction != NULL);
  606. BUG_ON(buffer->user_data < alloc->buffer);
  607. BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
  608. if (buffer->async_transaction) {
  609. alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
  610. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  611. "%d: binder_free_buf size %zd async free %zd\n",
  612. alloc->pid, size, alloc->free_async_space);
  613. }
  614. binder_update_page_range(alloc, 0,
  615. (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
  616. (void __user *)(((uintptr_t)
  617. buffer->user_data + buffer_size) & PAGE_MASK));
  618. rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
  619. buffer->free = 1;
  620. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  621. struct binder_buffer *next = binder_buffer_next(buffer);
  622. if (next->free) {
  623. rb_erase(&next->rb_node, &alloc->free_buffers);
  624. binder_delete_free_buffer(alloc, next);
  625. }
  626. }
  627. if (alloc->buffers.next != &buffer->entry) {
  628. struct binder_buffer *prev = binder_buffer_prev(buffer);
  629. if (prev->free) {
  630. binder_delete_free_buffer(alloc, buffer);
  631. rb_erase(&prev->rb_node, &alloc->free_buffers);
  632. buffer = prev;
  633. }
  634. }
  635. binder_insert_free_buffer(alloc, buffer);
  636. }
  637. static void binder_alloc_clear_buf(struct binder_alloc *alloc,
  638. struct binder_buffer *buffer);
  639. /**
  640. * binder_alloc_free_buf() - free a binder buffer
  641. * @alloc: binder_alloc for this proc
  642. * @buffer: kernel pointer to buffer
  643. *
  644. * Free the buffer allocated via binder_alloc_new_buffer()
  645. */
  646. void binder_alloc_free_buf(struct binder_alloc *alloc,
  647. struct binder_buffer *buffer)
  648. {
  649. /*
  650. * We could eliminate the call to binder_alloc_clear_buf()
  651. * from binder_alloc_deferred_release() by moving this to
  652. * binder_alloc_free_buf_locked(). However, that could
  653. * increase contention for the alloc mutex if clear_on_free
  654. * is used frequently for large buffers. The mutex is not
  655. * needed for correctness here.
  656. */
  657. if (buffer->clear_on_free) {
  658. binder_alloc_clear_buf(alloc, buffer);
  659. buffer->clear_on_free = false;
  660. }
  661. mutex_lock(&alloc->mutex);
  662. binder_free_buf_locked(alloc, buffer);
  663. mutex_unlock(&alloc->mutex);
  664. }
  665. /**
  666. * binder_alloc_mmap_handler() - map virtual address space for proc
  667. * @alloc: alloc structure for this proc
  668. * @vma: vma passed to mmap()
  669. *
  670. * Called by binder_mmap() to initialize the space specified in
  671. * vma for allocating binder buffers
  672. *
  673. * Return:
  674. * 0 = success
  675. * -EBUSY = address space already mapped
  676. * -ENOMEM = failed to map memory to given address space
  677. */
  678. int binder_alloc_mmap_handler(struct binder_alloc *alloc,
  679. struct vm_area_struct *vma)
  680. {
  681. int ret;
  682. const char *failure_string;
  683. struct binder_buffer *buffer;
  684. mutex_lock(&binder_alloc_mmap_lock);
  685. if (alloc->buffer_size) {
  686. ret = -EBUSY;
  687. failure_string = "already mapped";
  688. goto err_already_mapped;
  689. }
  690. alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
  691. SZ_4M);
  692. mutex_unlock(&binder_alloc_mmap_lock);
  693. alloc->buffer = (void __user *)vma->vm_start;
  694. alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
  695. sizeof(alloc->pages[0]),
  696. GFP_KERNEL);
  697. if (alloc->pages == NULL) {
  698. ret = -ENOMEM;
  699. failure_string = "alloc page array";
  700. goto err_alloc_pages_failed;
  701. }
  702. buffer = kmem_cache_zalloc(binder_buffer_pool, GFP_KERNEL);
  703. if (!buffer) {
  704. ret = -ENOMEM;
  705. failure_string = "alloc buffer struct";
  706. goto err_alloc_buf_struct_failed;
  707. }
  708. buffer->user_data = alloc->buffer;
  709. list_add(&buffer->entry, &alloc->buffers);
  710. buffer->free = 1;
  711. binder_insert_free_buffer(alloc, buffer);
  712. alloc->free_async_space = alloc->buffer_size / 2;
  713. binder_alloc_set_vma(alloc, vma);
  714. /* Same as mmgrab() in later kernel versions */
  715. atomic_inc(&alloc->vma_vm_mm->mm_count);
  716. return 0;
  717. err_alloc_buf_struct_failed:
  718. kfree(alloc->pages);
  719. alloc->pages = NULL;
  720. err_alloc_pages_failed:
  721. alloc->buffer = NULL;
  722. mutex_lock(&binder_alloc_mmap_lock);
  723. alloc->buffer_size = 0;
  724. err_already_mapped:
  725. mutex_unlock(&binder_alloc_mmap_lock);
  726. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  727. "%s: %d %lx-%lx %s failed %d\n", __func__,
  728. alloc->pid, vma->vm_start, vma->vm_end,
  729. failure_string, ret);
  730. return ret;
  731. }
  732. void binder_alloc_deferred_release(struct binder_alloc *alloc)
  733. {
  734. struct rb_node *n;
  735. int buffers, page_count;
  736. struct binder_buffer *buffer;
  737. buffers = 0;
  738. mutex_lock(&alloc->mutex);
  739. BUG_ON(alloc->vma);
  740. while ((n = rb_first(&alloc->allocated_buffers))) {
  741. buffer = rb_entry(n, struct binder_buffer, rb_node);
  742. /* Transaction should already have been freed */
  743. BUG_ON(buffer->transaction);
  744. if (buffer->clear_on_free) {
  745. binder_alloc_clear_buf(alloc, buffer);
  746. buffer->clear_on_free = false;
  747. }
  748. binder_free_buf_locked(alloc, buffer);
  749. buffers++;
  750. }
  751. while (!list_empty(&alloc->buffers)) {
  752. buffer = list_first_entry(&alloc->buffers,
  753. struct binder_buffer, entry);
  754. WARN_ON(!buffer->free);
  755. list_del(&buffer->entry);
  756. WARN_ON_ONCE(!list_empty(&alloc->buffers));
  757. kmem_cache_free(binder_buffer_pool, buffer);
  758. }
  759. page_count = 0;
  760. if (alloc->pages) {
  761. int i;
  762. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  763. void __user *page_addr;
  764. bool on_lru;
  765. if (!alloc->pages[i].page_ptr)
  766. continue;
  767. on_lru = list_lru_del(&binder_alloc_lru,
  768. &alloc->pages[i].lru);
  769. page_addr = alloc->buffer + i * PAGE_SIZE;
  770. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  771. "%s: %d: page %d at %pK %s\n",
  772. __func__, alloc->pid, i, page_addr,
  773. on_lru ? "on lru" : "active");
  774. __free_page(alloc->pages[i].page_ptr);
  775. page_count++;
  776. }
  777. kfree(alloc->pages);
  778. }
  779. mutex_unlock(&alloc->mutex);
  780. if (alloc->vma_vm_mm)
  781. mmdrop(alloc->vma_vm_mm);
  782. binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
  783. "%s: %d buffers %d, pages %d\n",
  784. __func__, alloc->pid, buffers, page_count);
  785. }
  786. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  787. struct binder_buffer *buffer)
  788. {
  789. seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
  790. prefix, buffer->debug_id, buffer->user_data,
  791. buffer->data_size, buffer->offsets_size,
  792. buffer->extra_buffers_size,
  793. buffer->transaction ? "active" : "delivered");
  794. }
  795. /**
  796. * binder_alloc_print_allocated() - print buffer info
  797. * @m: seq_file for output via seq_printf()
  798. * @alloc: binder_alloc for this proc
  799. *
  800. * Prints information about every buffer associated with
  801. * the binder_alloc state to the given seq_file
  802. */
  803. void binder_alloc_print_allocated(struct seq_file *m,
  804. struct binder_alloc *alloc)
  805. {
  806. struct rb_node *n;
  807. mutex_lock(&alloc->mutex);
  808. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  809. print_binder_buffer(m, " buffer",
  810. rb_entry(n, struct binder_buffer, rb_node));
  811. mutex_unlock(&alloc->mutex);
  812. }
  813. /**
  814. * binder_alloc_print_pages() - print page usage
  815. * @m: seq_file for output via seq_printf()
  816. * @alloc: binder_alloc for this proc
  817. */
  818. void binder_alloc_print_pages(struct seq_file *m,
  819. struct binder_alloc *alloc)
  820. {
  821. struct binder_lru_page *page;
  822. int i;
  823. int active = 0;
  824. int lru = 0;
  825. int free = 0;
  826. mutex_lock(&alloc->mutex);
  827. /*
  828. * Make sure the binder_alloc is fully initialized, otherwise we might
  829. * read inconsistent state.
  830. */
  831. if (binder_alloc_get_vma(alloc) != NULL) {
  832. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  833. page = &alloc->pages[i];
  834. if (!page->page_ptr)
  835. free++;
  836. else if (list_empty(&page->lru))
  837. active++;
  838. else
  839. lru++;
  840. }
  841. }
  842. mutex_unlock(&alloc->mutex);
  843. seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
  844. seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
  845. }
  846. /**
  847. * binder_alloc_get_allocated_count() - return count of buffers
  848. * @alloc: binder_alloc for this proc
  849. *
  850. * Return: count of allocated buffers
  851. */
  852. int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  853. {
  854. struct rb_node *n;
  855. int count = 0;
  856. mutex_lock(&alloc->mutex);
  857. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  858. count++;
  859. mutex_unlock(&alloc->mutex);
  860. return count;
  861. }
  862. /**
  863. * binder_alloc_vma_close() - invalidate address space
  864. * @alloc: binder_alloc for this proc
  865. *
  866. * Called from binder_vma_close() when releasing address space.
  867. * Clears alloc->vma to prevent new incoming transactions from
  868. * allocating more buffers.
  869. */
  870. void binder_alloc_vma_close(struct binder_alloc *alloc)
  871. {
  872. binder_alloc_set_vma(alloc, NULL);
  873. }
  874. /**
  875. * binder_alloc_free_page() - shrinker callback to free pages
  876. * @item: item to free
  877. * @lock: lock protecting the item
  878. * @cb_arg: callback argument
  879. *
  880. * Called from list_lru_walk() in binder_shrink_scan() to free
  881. * up pages when the system is under memory pressure.
  882. */
  883. enum lru_status binder_alloc_free_page(struct list_head *item,
  884. spinlock_t *lock,
  885. void *cb_arg)
  886. __must_hold(lock)
  887. {
  888. struct mm_struct *mm = NULL;
  889. struct binder_lru_page *page = container_of(item,
  890. struct binder_lru_page,
  891. lru);
  892. struct binder_alloc *alloc;
  893. uintptr_t page_addr;
  894. size_t index;
  895. struct vm_area_struct *vma;
  896. alloc = page->alloc;
  897. if (!mutex_trylock(&alloc->mutex))
  898. goto err_get_alloc_mutex_failed;
  899. if (!page->page_ptr)
  900. goto err_page_already_freed;
  901. index = page - alloc->pages;
  902. page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
  903. mm = alloc->vma_vm_mm;
  904. /* Same as mmget_not_zero() in later kernel versions */
  905. if (!atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
  906. goto err_mmget;
  907. if (!down_read_trylock(&mm->mmap_sem))
  908. goto err_down_read_mmap_sem_failed;
  909. vma = binder_alloc_get_vma(alloc);
  910. list_del_init(item);
  911. spin_unlock(lock);
  912. if (vma) {
  913. trace_binder_unmap_user_start(alloc, index);
  914. zap_page_range(vma, page_addr, PAGE_SIZE, NULL);
  915. trace_binder_unmap_user_end(alloc, index);
  916. }
  917. up_read(&mm->mmap_sem);
  918. trace_binder_unmap_kernel_start(alloc, index);
  919. __free_page(page->page_ptr);
  920. page->page_ptr = NULL;
  921. trace_binder_unmap_kernel_end(alloc, index);
  922. spin_lock(lock);
  923. mutex_unlock(&alloc->mutex);
  924. return LRU_REMOVED_RETRY;
  925. err_down_read_mmap_sem_failed:
  926. err_mmget:
  927. err_page_already_freed:
  928. mutex_unlock(&alloc->mutex);
  929. err_get_alloc_mutex_failed:
  930. return LRU_SKIP;
  931. }
  932. static unsigned long
  933. binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  934. {
  935. unsigned long ret = list_lru_count(&binder_alloc_lru);
  936. return ret;
  937. }
  938. static unsigned long
  939. binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  940. {
  941. unsigned long ret;
  942. ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
  943. NULL, sc->nr_to_scan);
  944. return ret;
  945. }
  946. static struct shrinker binder_shrinker = {
  947. .count_objects = binder_shrink_count,
  948. .scan_objects = binder_shrink_scan,
  949. .seeks = DEFAULT_SEEKS,
  950. };
  951. /**
  952. * binder_alloc_init() - called by binder_open() for per-proc initialization
  953. * @alloc: binder_alloc for this proc
  954. *
  955. * Called from binder_open() to initialize binder_alloc fields for
  956. * new binder proc
  957. */
  958. void binder_alloc_init(struct binder_alloc *alloc)
  959. {
  960. alloc->pid = current->group_leader->pid;
  961. mutex_init(&alloc->mutex);
  962. INIT_LIST_HEAD(&alloc->buffers);
  963. }
  964. int binder_alloc_shrinker_init(void)
  965. {
  966. int ret = list_lru_init(&binder_alloc_lru);
  967. if (ret == 0) {
  968. ret = register_shrinker(&binder_shrinker);
  969. if (ret)
  970. list_lru_destroy(&binder_alloc_lru);
  971. }
  972. return ret;
  973. }
  974. /**
  975. * check_buffer() - verify that buffer/offset is safe to access
  976. * @alloc: binder_alloc for this proc
  977. * @buffer: binder buffer to be accessed
  978. * @offset: offset into @buffer data
  979. * @bytes: bytes to access from offset
  980. *
  981. * Check that the @offset/@bytes are within the size of the given
  982. * @buffer and that the buffer is currently active and not freeable.
  983. * Offsets must also be multiples of sizeof(u32). The kernel is
  984. * allowed to touch the buffer in two cases:
  985. *
  986. * 1) when the buffer is being created:
  987. * (buffer->free == 0 && buffer->allow_user_free == 0)
  988. * 2) when the buffer is being torn down:
  989. * (buffer->free == 0 && buffer->transaction == NULL).
  990. *
  991. * Return: true if the buffer is safe to access
  992. */
  993. static inline bool check_buffer(struct binder_alloc *alloc,
  994. struct binder_buffer *buffer,
  995. binder_size_t offset, size_t bytes)
  996. {
  997. size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
  998. return buffer_size >= bytes &&
  999. offset <= buffer_size - bytes &&
  1000. IS_ALIGNED(offset, sizeof(u32)) &&
  1001. !buffer->free &&
  1002. (!buffer->allow_user_free || !buffer->transaction);
  1003. }
  1004. /**
  1005. * binder_alloc_get_page() - get kernel pointer for given buffer offset
  1006. * @alloc: binder_alloc for this proc
  1007. * @buffer: binder buffer to be accessed
  1008. * @buffer_offset: offset into @buffer data
  1009. * @pgoffp: address to copy final page offset to
  1010. *
  1011. * Lookup the struct page corresponding to the address
  1012. * at @buffer_offset into @buffer->user_data. If @pgoffp is not
  1013. * NULL, the byte-offset into the page is written there.
  1014. *
  1015. * The caller is responsible to ensure that the offset points
  1016. * to a valid address within the @buffer and that @buffer is
  1017. * not freeable by the user. Since it can't be freed, we are
  1018. * guaranteed that the corresponding elements of @alloc->pages[]
  1019. * cannot change.
  1020. *
  1021. * Return: struct page
  1022. */
  1023. static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
  1024. struct binder_buffer *buffer,
  1025. binder_size_t buffer_offset,
  1026. pgoff_t *pgoffp)
  1027. {
  1028. binder_size_t buffer_space_offset = buffer_offset +
  1029. (buffer->user_data - alloc->buffer);
  1030. pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
  1031. size_t index = buffer_space_offset >> PAGE_SHIFT;
  1032. struct binder_lru_page *lru_page;
  1033. lru_page = &alloc->pages[index];
  1034. *pgoffp = pgoff;
  1035. return lru_page->page_ptr;
  1036. }
  1037. /**
  1038. * binder_alloc_clear_buf() - zero out buffer
  1039. * @alloc: binder_alloc for this proc
  1040. * @buffer: binder buffer to be cleared
  1041. *
  1042. * memset the given buffer to 0
  1043. */
  1044. static void binder_alloc_clear_buf(struct binder_alloc *alloc,
  1045. struct binder_buffer *buffer)
  1046. {
  1047. size_t bytes = binder_alloc_buffer_size(alloc, buffer);
  1048. binder_size_t buffer_offset = 0;
  1049. while (bytes) {
  1050. unsigned long size;
  1051. struct page *page;
  1052. pgoff_t pgoff;
  1053. void *kptr;
  1054. page = binder_alloc_get_page(alloc, buffer,
  1055. buffer_offset, &pgoff);
  1056. size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
  1057. kptr = kmap(page) + pgoff;
  1058. memset(kptr, 0, size);
  1059. kunmap(page);
  1060. bytes -= size;
  1061. buffer_offset += size;
  1062. }
  1063. }
  1064. /**
  1065. * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
  1066. * @alloc: binder_alloc for this proc
  1067. * @buffer: binder buffer to be accessed
  1068. * @buffer_offset: offset into @buffer data
  1069. * @from: userspace pointer to source buffer
  1070. * @bytes: bytes to copy
  1071. *
  1072. * Copy bytes from source userspace to target buffer.
  1073. *
  1074. * Return: bytes remaining to be copied
  1075. */
  1076. unsigned long
  1077. binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
  1078. struct binder_buffer *buffer,
  1079. binder_size_t buffer_offset,
  1080. const void __user *from,
  1081. size_t bytes)
  1082. {
  1083. if (!check_buffer(alloc, buffer, buffer_offset, bytes))
  1084. return bytes;
  1085. while (bytes) {
  1086. unsigned long size;
  1087. unsigned long ret;
  1088. struct page *page;
  1089. pgoff_t pgoff;
  1090. void *kptr;
  1091. page = binder_alloc_get_page(alloc, buffer,
  1092. buffer_offset, &pgoff);
  1093. size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
  1094. kptr = kmap(page) + pgoff;
  1095. ret = copy_from_user(kptr, from, size);
  1096. kunmap(page);
  1097. if (ret)
  1098. return bytes - size + ret;
  1099. bytes -= size;
  1100. from += size;
  1101. buffer_offset += size;
  1102. }
  1103. return 0;
  1104. }
  1105. static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
  1106. bool to_buffer,
  1107. struct binder_buffer *buffer,
  1108. binder_size_t buffer_offset,
  1109. void *ptr,
  1110. size_t bytes)
  1111. {
  1112. /* All copies must be 32-bit aligned and 32-bit size */
  1113. BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
  1114. while (bytes) {
  1115. unsigned long size;
  1116. struct page *page;
  1117. pgoff_t pgoff;
  1118. void *tmpptr;
  1119. void *base_ptr;
  1120. page = binder_alloc_get_page(alloc, buffer,
  1121. buffer_offset, &pgoff);
  1122. size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
  1123. base_ptr = kmap_atomic(page);
  1124. tmpptr = base_ptr + pgoff;
  1125. if (to_buffer)
  1126. memcpy(tmpptr, ptr, size);
  1127. else
  1128. memcpy(ptr, tmpptr, size);
  1129. /*
  1130. * kunmap_atomic() takes care of flushing the cache
  1131. * if this device has VIVT cache arch
  1132. */
  1133. kunmap_atomic(base_ptr);
  1134. bytes -= size;
  1135. pgoff = 0;
  1136. ptr = ptr + size;
  1137. buffer_offset += size;
  1138. }
  1139. }
  1140. void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
  1141. struct binder_buffer *buffer,
  1142. binder_size_t buffer_offset,
  1143. void *src,
  1144. size_t bytes)
  1145. {
  1146. binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
  1147. src, bytes);
  1148. }
  1149. void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
  1150. void *dest,
  1151. struct binder_buffer *buffer,
  1152. binder_size_t buffer_offset,
  1153. size_t bytes)
  1154. {
  1155. binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
  1156. dest, bytes);
  1157. }