Parcel.cpp 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145
  1. /*
  2. * Copyright (C) 2005 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #define LOG_TAG "Parcel"
  17. //#define LOG_NDEBUG 0
  18. #include <binder/Parcel.h>
  19. #include <fcntl.h>
  20. #include <pthread.h>
  21. #include <binder/IPCThreadState.h>
  22. #include <binder/Binder.h>
  23. #include <binder/BpBinder.h>
  24. #include <binder/ProcessState.h>
  25. #include <binder/TextOutput.h>
  26. #include <errno.h>
  27. #include <utils/Debug.h>
  28. #include <utils/Log.h>
  29. #include <utils/String8.h>
  30. #include <utils/String16.h>
  31. #include <utils/misc.h>
  32. #include <utils/Flattenable.h>
  33. #include <cutils/ashmem.h>
  34. #include <private/binder/binder_module.h>
  35. #include <private/binder/Static.h>
  36. #include <inttypes.h>
  37. #include <stdio.h>
  38. #include <stdlib.h>
  39. #include <stdint.h>
  40. #include <sys/mman.h>
  41. #include <sys/stat.h>
  42. #include <sys/types.h>
  43. #include <unistd.h>
  44. #ifndef INT32_MAX
  45. #define INT32_MAX ((int32_t)(2147483647))
  46. #endif
  47. #define LOG_REFS(...)
  48. //#define LOG_REFS(...) ALOG(LOG_DEBUG, "Parcel", __VA_ARGS__)
  49. #define LOG_ALLOC(...)
  50. //#define LOG_ALLOC(...) ALOG(LOG_DEBUG, "Parcel", __VA_ARGS__)
  51. // ---------------------------------------------------------------------------
  52. // This macro should never be used at runtime, as a too large value
  53. // of s could cause an integer overflow. Instead, you should always
  54. // use the wrapper function pad_size()
  55. #define PAD_SIZE_UNSAFE(s) (((s)+3)&~3)
  56. static size_t pad_size(size_t s) {
  57. if (s > (SIZE_T_MAX - 3)) {
  58. abort();
  59. }
  60. return PAD_SIZE_UNSAFE(s);
  61. }
  62. // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
  63. #define STRICT_MODE_PENALTY_GATHER (0x40 << 16)
  64. // Note: must be kept in sync with android/os/Parcel.java's EX_HAS_REPLY_HEADER
  65. #define EX_HAS_REPLY_HEADER -128
  66. // XXX This can be made public if we want to provide
  67. // support for typed data.
  68. struct small_flat_data
  69. {
  70. uint32_t type;
  71. uint32_t data;
  72. };
  73. namespace android {
  74. static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
  75. static size_t gParcelGlobalAllocSize = 0;
  76. static size_t gParcelGlobalAllocCount = 0;
  77. // Maximum size of a blob to transfer in-place.
  78. static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
  79. enum {
  80. BLOB_INPLACE = 0,
  81. BLOB_ASHMEM_IMMUTABLE = 1,
  82. BLOB_ASHMEM_MUTABLE = 2,
  83. };
  84. static dev_t ashmem_rdev()
  85. {
  86. static dev_t __ashmem_rdev;
  87. static pthread_mutex_t __ashmem_rdev_lock = PTHREAD_MUTEX_INITIALIZER;
  88. pthread_mutex_lock(&__ashmem_rdev_lock);
  89. dev_t rdev = __ashmem_rdev;
  90. if (!rdev) {
  91. int fd = TEMP_FAILURE_RETRY(open("/dev/ashmem", O_RDONLY));
  92. if (fd >= 0) {
  93. struct stat st;
  94. int ret = TEMP_FAILURE_RETRY(fstat(fd, &st));
  95. close(fd);
  96. if ((ret >= 0) && S_ISCHR(st.st_mode)) {
  97. rdev = __ashmem_rdev = st.st_rdev;
  98. }
  99. }
  100. }
  101. pthread_mutex_unlock(&__ashmem_rdev_lock);
  102. return rdev;
  103. }
  104. void acquire_object(const sp<ProcessState>& proc,
  105. const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
  106. {
  107. switch (obj.type) {
  108. case BINDER_TYPE_BINDER:
  109. if (obj.binder) {
  110. LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie);
  111. reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
  112. }
  113. return;
  114. case BINDER_TYPE_WEAK_BINDER:
  115. if (obj.binder)
  116. reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who);
  117. return;
  118. case BINDER_TYPE_HANDLE: {
  119. const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
  120. if (b != NULL) {
  121. LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
  122. b->incStrong(who);
  123. }
  124. return;
  125. }
  126. case BINDER_TYPE_WEAK_HANDLE: {
  127. const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
  128. if (b != NULL) b.get_refs()->incWeak(who);
  129. return;
  130. }
  131. case BINDER_TYPE_FD: {
  132. if ((obj.cookie != 0) && (outAshmemSize != NULL)) {
  133. struct stat st;
  134. int ret = fstat(obj.handle, &st);
  135. if (!ret && S_ISCHR(st.st_mode) && (st.st_rdev == ashmem_rdev())) {
  136. // If we own an ashmem fd, keep track of how much memory it refers to.
  137. int size = ashmem_get_size_region(obj.handle);
  138. if (size > 0) {
  139. *outAshmemSize += size;
  140. }
  141. }
  142. }
  143. return;
  144. }
  145. }
  146. ALOGD("Invalid object type 0x%08x", obj.type);
  147. }
  148. void acquire_object(const sp<ProcessState>& proc,
  149. const flat_binder_object& obj, const void* who)
  150. {
  151. acquire_object(proc, obj, who, NULL);
  152. }
  153. static void release_object(const sp<ProcessState>& proc,
  154. const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
  155. {
  156. switch (obj.type) {
  157. case BINDER_TYPE_BINDER:
  158. if (obj.binder) {
  159. LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie);
  160. reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
  161. }
  162. return;
  163. case BINDER_TYPE_WEAK_BINDER:
  164. if (obj.binder)
  165. reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who);
  166. return;
  167. case BINDER_TYPE_HANDLE: {
  168. const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
  169. if (b != NULL) {
  170. LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
  171. b->decStrong(who);
  172. }
  173. return;
  174. }
  175. case BINDER_TYPE_WEAK_HANDLE: {
  176. const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
  177. if (b != NULL) b.get_refs()->decWeak(who);
  178. return;
  179. }
  180. case BINDER_TYPE_FD: {
  181. if (obj.cookie != 0) { // owned
  182. if (outAshmemSize != NULL) {
  183. struct stat st;
  184. int ret = fstat(obj.handle, &st);
  185. if (!ret && S_ISCHR(st.st_mode) && (st.st_rdev == ashmem_rdev())) {
  186. int size = ashmem_get_size_region(obj.handle);
  187. if (size > 0) {
  188. *outAshmemSize -= size;
  189. }
  190. }
  191. }
  192. close(obj.handle);
  193. #ifdef DISABLE_ASHMEM_TRACKING
  194. } else if (obj.cookie != 0) {
  195. close(obj.handle);
  196. #endif
  197. }
  198. return;
  199. }
  200. }
  201. ALOGE("Invalid object type 0x%08x", obj.type);
  202. }
  203. void release_object(const sp<ProcessState>& proc,
  204. const flat_binder_object& obj, const void* who)
  205. {
  206. release_object(proc, obj, who, NULL);
  207. }
  208. inline static status_t finish_flatten_binder(
  209. const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
  210. {
  211. return out->writeObject(flat, false);
  212. }
  213. status_t flatten_binder(const sp<ProcessState>& /*proc*/,
  214. const sp<IBinder>& binder, Parcel* out)
  215. {
  216. flat_binder_object obj;
  217. obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
  218. if (binder != NULL) {
  219. IBinder *local = binder->localBinder();
  220. if (!local) {
  221. BpBinder *proxy = binder->remoteBinder();
  222. if (proxy == NULL) {
  223. ALOGE("null proxy");
  224. }
  225. const int32_t handle = proxy ? proxy->handle() : 0;
  226. obj.type = BINDER_TYPE_HANDLE;
  227. obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
  228. obj.handle = handle;
  229. obj.cookie = 0;
  230. } else {
  231. obj.type = BINDER_TYPE_BINDER;
  232. obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
  233. obj.cookie = reinterpret_cast<uintptr_t>(local);
  234. }
  235. } else {
  236. obj.type = BINDER_TYPE_BINDER;
  237. obj.binder = 0;
  238. obj.cookie = 0;
  239. }
  240. return finish_flatten_binder(binder, obj, out);
  241. }
  242. status_t flatten_binder(const sp<ProcessState>& /*proc*/,
  243. const wp<IBinder>& binder, Parcel* out)
  244. {
  245. flat_binder_object obj;
  246. obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
  247. if (binder != NULL) {
  248. sp<IBinder> real = binder.promote();
  249. if (real != NULL) {
  250. IBinder *local = real->localBinder();
  251. if (!local) {
  252. BpBinder *proxy = real->remoteBinder();
  253. if (proxy == NULL) {
  254. ALOGE("null proxy");
  255. }
  256. const int32_t handle = proxy ? proxy->handle() : 0;
  257. obj.type = BINDER_TYPE_WEAK_HANDLE;
  258. obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
  259. obj.handle = handle;
  260. obj.cookie = 0;
  261. } else {
  262. obj.type = BINDER_TYPE_WEAK_BINDER;
  263. obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs());
  264. obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get());
  265. }
  266. return finish_flatten_binder(real, obj, out);
  267. }
  268. // XXX How to deal? In order to flatten the given binder,
  269. // we need to probe it for information, which requires a primary
  270. // reference... but we don't have one.
  271. //
  272. // The OpenBinder implementation uses a dynamic_cast<> here,
  273. // but we can't do that with the different reference counting
  274. // implementation we are using.
  275. ALOGE("Unable to unflatten Binder weak reference!");
  276. obj.type = BINDER_TYPE_BINDER;
  277. obj.binder = 0;
  278. obj.cookie = 0;
  279. return finish_flatten_binder(NULL, obj, out);
  280. } else {
  281. obj.type = BINDER_TYPE_BINDER;
  282. obj.binder = 0;
  283. obj.cookie = 0;
  284. return finish_flatten_binder(NULL, obj, out);
  285. }
  286. }
  287. inline static status_t finish_unflatten_binder(
  288. BpBinder* /*proxy*/, const flat_binder_object& /*flat*/,
  289. const Parcel& /*in*/)
  290. {
  291. return NO_ERROR;
  292. }
  293. status_t unflatten_binder(const sp<ProcessState>& proc,
  294. const Parcel& in, sp<IBinder>* out)
  295. {
  296. const flat_binder_object* flat = in.readObject(false);
  297. if (flat) {
  298. switch (flat->type) {
  299. case BINDER_TYPE_BINDER:
  300. *out = reinterpret_cast<IBinder*>(flat->cookie);
  301. return finish_unflatten_binder(NULL, *flat, in);
  302. case BINDER_TYPE_HANDLE:
  303. *out = proc->getStrongProxyForHandle(flat->handle);
  304. return finish_unflatten_binder(
  305. static_cast<BpBinder*>(out->get()), *flat, in);
  306. }
  307. }
  308. return BAD_TYPE;
  309. }
  310. status_t unflatten_binder(const sp<ProcessState>& proc,
  311. const Parcel& in, wp<IBinder>* out)
  312. {
  313. const flat_binder_object* flat = in.readObject(false);
  314. if (flat) {
  315. switch (flat->type) {
  316. case BINDER_TYPE_BINDER:
  317. *out = reinterpret_cast<IBinder*>(flat->cookie);
  318. return finish_unflatten_binder(NULL, *flat, in);
  319. case BINDER_TYPE_WEAK_BINDER:
  320. if (flat->binder != 0) {
  321. out->set_object_and_refs(
  322. reinterpret_cast<IBinder*>(flat->cookie),
  323. reinterpret_cast<RefBase::weakref_type*>(flat->binder));
  324. } else {
  325. *out = NULL;
  326. }
  327. return finish_unflatten_binder(NULL, *flat, in);
  328. case BINDER_TYPE_HANDLE:
  329. case BINDER_TYPE_WEAK_HANDLE:
  330. *out = proc->getWeakProxyForHandle(flat->handle);
  331. return finish_unflatten_binder(
  332. static_cast<BpBinder*>(out->unsafe_get()), *flat, in);
  333. }
  334. }
  335. return BAD_TYPE;
  336. }
  337. // ---------------------------------------------------------------------------
  338. Parcel::Parcel()
  339. {
  340. LOG_ALLOC("Parcel %p: constructing", this);
  341. initState();
  342. }
  343. Parcel::~Parcel()
  344. {
  345. freeDataNoInit();
  346. LOG_ALLOC("Parcel %p: destroyed", this);
  347. }
  348. size_t Parcel::getGlobalAllocSize() {
  349. pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
  350. size_t size = gParcelGlobalAllocSize;
  351. pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
  352. return size;
  353. }
  354. size_t Parcel::getGlobalAllocCount() {
  355. pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
  356. size_t count = gParcelGlobalAllocCount;
  357. pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
  358. return count;
  359. }
  360. const uint8_t* Parcel::data() const
  361. {
  362. return mData;
  363. }
  364. size_t Parcel::dataSize() const
  365. {
  366. return (mDataSize > mDataPos ? mDataSize : mDataPos);
  367. }
  368. size_t Parcel::dataAvail() const
  369. {
  370. // TODO: decide what to do about the possibility that this can
  371. // report an available-data size that exceeds a Java int's max
  372. // positive value, causing havoc. Fortunately this will only
  373. // happen if someone constructs a Parcel containing more than two
  374. // gigabytes of data, which on typical phone hardware is simply
  375. // not possible.
  376. return dataSize() - dataPosition();
  377. }
  378. size_t Parcel::dataPosition() const
  379. {
  380. return mDataPos;
  381. }
  382. size_t Parcel::dataCapacity() const
  383. {
  384. return mDataCapacity;
  385. }
  386. status_t Parcel::setDataSize(size_t size)
  387. {
  388. if (size > INT32_MAX) {
  389. // don't accept size_t values which may have come from an
  390. // inadvertent conversion from a negative int.
  391. return BAD_VALUE;
  392. }
  393. status_t err;
  394. err = continueWrite(size);
  395. if (err == NO_ERROR) {
  396. mDataSize = size;
  397. ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
  398. }
  399. return err;
  400. }
  401. void Parcel::setDataPosition(size_t pos) const
  402. {
  403. if (pos > INT32_MAX) {
  404. // don't accept size_t values which may have come from an
  405. // inadvertent conversion from a negative int.
  406. abort();
  407. }
  408. mDataPos = pos;
  409. mNextObjectHint = 0;
  410. mObjectsSorted = false;
  411. }
  412. status_t Parcel::setDataCapacity(size_t size)
  413. {
  414. if (size > INT32_MAX) {
  415. // don't accept size_t values which may have come from an
  416. // inadvertent conversion from a negative int.
  417. return BAD_VALUE;
  418. }
  419. if (size > mDataCapacity) return continueWrite(size);
  420. return NO_ERROR;
  421. }
  422. status_t Parcel::setData(const uint8_t* buffer, size_t len)
  423. {
  424. if (len > INT32_MAX) {
  425. // don't accept size_t values which may have come from an
  426. // inadvertent conversion from a negative int.
  427. return BAD_VALUE;
  428. }
  429. status_t err = restartWrite(len);
  430. if (err == NO_ERROR) {
  431. memcpy(const_cast<uint8_t*>(data()), buffer, len);
  432. mDataSize = len;
  433. mFdsKnown = false;
  434. }
  435. return err;
  436. }
  437. status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len)
  438. {
  439. const sp<ProcessState> proc(ProcessState::self());
  440. status_t err;
  441. const uint8_t *data = parcel->mData;
  442. const binder_size_t *objects = parcel->mObjects;
  443. size_t size = parcel->mObjectsSize;
  444. int startPos = mDataPos;
  445. int firstIndex = -1, lastIndex = -2;
  446. if (len == 0) {
  447. return NO_ERROR;
  448. }
  449. if (len > INT32_MAX) {
  450. // don't accept size_t values which may have come from an
  451. // inadvertent conversion from a negative int.
  452. return BAD_VALUE;
  453. }
  454. // range checks against the source parcel size
  455. if ((offset > parcel->mDataSize)
  456. || (len > parcel->mDataSize)
  457. || (offset + len > parcel->mDataSize)) {
  458. return BAD_VALUE;
  459. }
  460. // Count objects in range
  461. for (int i = 0; i < (int) size; i++) {
  462. size_t off = objects[i];
  463. if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
  464. if (firstIndex == -1) {
  465. firstIndex = i;
  466. }
  467. lastIndex = i;
  468. }
  469. }
  470. int numObjects = lastIndex - firstIndex + 1;
  471. if ((mDataSize+len) > mDataCapacity) {
  472. // grow data
  473. err = growData(len);
  474. if (err != NO_ERROR) {
  475. return err;
  476. }
  477. }
  478. // append data
  479. memcpy(mData + mDataPos, data + offset, len);
  480. mDataPos += len;
  481. mDataSize += len;
  482. err = NO_ERROR;
  483. if (numObjects > 0) {
  484. // grow objects
  485. if (mObjectsCapacity < mObjectsSize + numObjects) {
  486. size_t newSize = ((mObjectsSize + numObjects)*3)/2;
  487. if (newSize*sizeof(binder_size_t) < mObjectsSize) return NO_MEMORY; // overflow
  488. binder_size_t *objects =
  489. (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
  490. if (objects == (binder_size_t*)0) {
  491. return NO_MEMORY;
  492. }
  493. mObjects = objects;
  494. mObjectsCapacity = newSize;
  495. }
  496. // append and acquire objects
  497. int idx = mObjectsSize;
  498. for (int i = firstIndex; i <= lastIndex; i++) {
  499. size_t off = objects[i] - offset + startPos;
  500. mObjects[idx++] = off;
  501. mObjectsSize++;
  502. flat_binder_object* flat
  503. = reinterpret_cast<flat_binder_object*>(mData + off);
  504. #ifndef DISABLE_ASHMEM_TRACKING
  505. acquire_object(proc, *flat, this, &mOpenAshmemSize);
  506. #else
  507. acquire_object(proc, *flat, this);
  508. #endif
  509. if (flat->type == BINDER_TYPE_FD) {
  510. // If this is a file descriptor, we need to dup it so the
  511. // new Parcel now owns its own fd, and can declare that we
  512. // officially know we have fds.
  513. flat->handle = dup(flat->handle);
  514. flat->cookie = 1;
  515. mHasFds = mFdsKnown = true;
  516. if (!mAllowFds) {
  517. err = FDS_NOT_ALLOWED;
  518. }
  519. }
  520. }
  521. }
  522. return err;
  523. }
  524. bool Parcel::allowFds() const
  525. {
  526. return mAllowFds;
  527. }
  528. bool Parcel::pushAllowFds(bool allowFds)
  529. {
  530. const bool origValue = mAllowFds;
  531. if (!allowFds) {
  532. mAllowFds = false;
  533. }
  534. return origValue;
  535. }
  536. void Parcel::restoreAllowFds(bool lastValue)
  537. {
  538. mAllowFds = lastValue;
  539. }
  540. bool Parcel::hasFileDescriptors() const
  541. {
  542. if (!mFdsKnown) {
  543. scanForFds();
  544. }
  545. return mHasFds;
  546. }
  547. // Write RPC headers. (previously just the interface token)
  548. status_t Parcel::writeInterfaceToken(const String16& interface)
  549. {
  550. writeInt32(IPCThreadState::self()->getStrictModePolicy() |
  551. STRICT_MODE_PENALTY_GATHER);
  552. // currently the interface identification token is just its name as a string
  553. return writeString16(interface);
  554. }
  555. bool Parcel::checkInterface(IBinder* binder) const
  556. {
  557. return enforceInterface(binder->getInterfaceDescriptor());
  558. }
  559. bool Parcel::enforceInterface(const String16& interface,
  560. IPCThreadState* threadState) const
  561. {
  562. int32_t strictPolicy = readInt32();
  563. if (threadState == NULL) {
  564. threadState = IPCThreadState::self();
  565. }
  566. if ((threadState->getLastTransactionBinderFlags() &
  567. IBinder::FLAG_ONEWAY) != 0) {
  568. // For one-way calls, the callee is running entirely
  569. // disconnected from the caller, so disable StrictMode entirely.
  570. // Not only does disk/network usage not impact the caller, but
  571. // there's no way to commuicate back any violations anyway.
  572. threadState->setStrictModePolicy(0);
  573. } else {
  574. threadState->setStrictModePolicy(strictPolicy);
  575. }
  576. const String16 str(readString16());
  577. if (str == interface) {
  578. return true;
  579. } else {
  580. ALOGW("**** enforceInterface() expected '%s' but read '%s'",
  581. String8(interface).string(), String8(str).string());
  582. return false;
  583. }
  584. }
  585. const binder_size_t* Parcel::objects() const
  586. {
  587. return mObjects;
  588. }
  589. size_t Parcel::objectsCount() const
  590. {
  591. return mObjectsSize;
  592. }
  593. status_t Parcel::errorCheck() const
  594. {
  595. return mError;
  596. }
  597. void Parcel::setError(status_t err)
  598. {
  599. mError = err;
  600. }
  601. status_t Parcel::finishWrite(size_t len)
  602. {
  603. if (len > INT32_MAX) {
  604. // don't accept size_t values which may have come from an
  605. // inadvertent conversion from a negative int.
  606. return BAD_VALUE;
  607. }
  608. //printf("Finish write of %d\n", len);
  609. mDataPos += len;
  610. ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
  611. if (mDataPos > mDataSize) {
  612. mDataSize = mDataPos;
  613. ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
  614. }
  615. //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
  616. return NO_ERROR;
  617. }
  618. status_t Parcel::writeUnpadded(const void* data, size_t len)
  619. {
  620. if (len > INT32_MAX) {
  621. // don't accept size_t values which may have come from an
  622. // inadvertent conversion from a negative int.
  623. return BAD_VALUE;
  624. }
  625. size_t end = mDataPos + len;
  626. if (end < mDataPos) {
  627. // integer overflow
  628. return BAD_VALUE;
  629. }
  630. if (end <= mDataCapacity) {
  631. restart_write:
  632. memcpy(mData+mDataPos, data, len);
  633. return finishWrite(len);
  634. }
  635. status_t err = growData(len);
  636. if (err == NO_ERROR) goto restart_write;
  637. return err;
  638. }
  639. status_t Parcel::write(const void* data, size_t len)
  640. {
  641. if (len > INT32_MAX) {
  642. // don't accept size_t values which may have come from an
  643. // inadvertent conversion from a negative int.
  644. return BAD_VALUE;
  645. }
  646. void* const d = writeInplace(len);
  647. if (d) {
  648. memcpy(d, data, len);
  649. return NO_ERROR;
  650. }
  651. return mError;
  652. }
  653. void* Parcel::writeInplace(size_t len)
  654. {
  655. if (len > INT32_MAX) {
  656. // don't accept size_t values which may have come from an
  657. // inadvertent conversion from a negative int.
  658. return NULL;
  659. }
  660. const size_t padded = pad_size(len);
  661. // sanity check for integer overflow
  662. if (mDataPos+padded < mDataPos) {
  663. return NULL;
  664. }
  665. if ((mDataPos+padded) <= mDataCapacity) {
  666. restart_write:
  667. //printf("Writing %ld bytes, padded to %ld\n", len, padded);
  668. uint8_t* const data = mData+mDataPos;
  669. // Need to pad at end?
  670. if (padded != len) {
  671. #if BYTE_ORDER == BIG_ENDIAN
  672. static const uint32_t mask[4] = {
  673. 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
  674. };
  675. #endif
  676. #if BYTE_ORDER == LITTLE_ENDIAN
  677. static const uint32_t mask[4] = {
  678. 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
  679. };
  680. #endif
  681. //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
  682. // *reinterpret_cast<void**>(data+padded-4));
  683. *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
  684. }
  685. finishWrite(padded);
  686. return data;
  687. }
  688. status_t err = growData(padded);
  689. if (err == NO_ERROR) goto restart_write;
  690. return NULL;
  691. }
  692. status_t Parcel::writeInt32(int32_t val)
  693. {
  694. return writeAligned(val);
  695. }
  696. status_t Parcel::writeUint32(uint32_t val)
  697. {
  698. return writeAligned(val);
  699. }
  700. status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
  701. if (len > INT32_MAX) {
  702. // don't accept size_t values which may have come from an
  703. // inadvertent conversion from a negative int.
  704. return BAD_VALUE;
  705. }
  706. if (!val) {
  707. return writeInt32(-1);
  708. }
  709. status_t ret = writeInt32(static_cast<uint32_t>(len));
  710. if (ret == NO_ERROR) {
  711. ret = write(val, len * sizeof(*val));
  712. }
  713. return ret;
  714. }
  715. status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
  716. if (len > INT32_MAX) {
  717. // don't accept size_t values which may have come from an
  718. // inadvertent conversion from a negative int.
  719. return BAD_VALUE;
  720. }
  721. if (!val) {
  722. return writeInt32(-1);
  723. }
  724. status_t ret = writeInt32(static_cast<uint32_t>(len));
  725. if (ret == NO_ERROR) {
  726. ret = write(val, len * sizeof(*val));
  727. }
  728. return ret;
  729. }
  730. status_t Parcel::writeInt64(int64_t val)
  731. {
  732. return writeAligned(val);
  733. }
  734. status_t Parcel::writeUint64(uint64_t val)
  735. {
  736. return writeAligned(val);
  737. }
  738. status_t Parcel::writePointer(uintptr_t val)
  739. {
  740. return writeAligned<binder_uintptr_t>(val);
  741. }
  742. status_t Parcel::writeFloat(float val)
  743. {
  744. return writeAligned(val);
  745. }
  746. #if defined(__mips__) && defined(__mips_hard_float)
  747. status_t Parcel::writeDouble(double val)
  748. {
  749. union {
  750. double d;
  751. unsigned long long ll;
  752. } u;
  753. u.d = val;
  754. return writeAligned(u.ll);
  755. }
  756. #else
  757. status_t Parcel::writeDouble(double val)
  758. {
  759. return writeAligned(val);
  760. }
  761. #endif
  762. status_t Parcel::writeCString(const char* str)
  763. {
  764. return write(str, strlen(str)+1);
  765. }
  766. status_t Parcel::writeString8(const String8& str)
  767. {
  768. status_t err = writeInt32(str.bytes());
  769. // only write string if its length is more than zero characters,
  770. // as readString8 will only read if the length field is non-zero.
  771. // this is slightly different from how writeString16 works.
  772. if (str.bytes() > 0 && err == NO_ERROR) {
  773. err = write(str.string(), str.bytes()+1);
  774. }
  775. return err;
  776. }
  777. status_t Parcel::writeString16(const String16& str)
  778. {
  779. return writeString16(str.string(), str.size());
  780. }
  781. status_t Parcel::writeString16(const char16_t* str, size_t len)
  782. {
  783. if (str == NULL) return writeInt32(-1);
  784. status_t err = writeInt32(len);
  785. if (err == NO_ERROR) {
  786. len *= sizeof(char16_t);
  787. uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
  788. if (data) {
  789. memcpy(data, str, len);
  790. *reinterpret_cast<char16_t*>(data+len) = 0;
  791. return NO_ERROR;
  792. }
  793. err = mError;
  794. }
  795. return err;
  796. }
  797. status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
  798. {
  799. return flatten_binder(ProcessState::self(), val, this);
  800. }
  801. status_t Parcel::writeWeakBinder(const wp<IBinder>& val)
  802. {
  803. return flatten_binder(ProcessState::self(), val, this);
  804. }
  805. status_t Parcel::writeNativeHandle(const native_handle* handle)
  806. {
  807. if (!handle || handle->version != sizeof(native_handle))
  808. return BAD_TYPE;
  809. status_t err;
  810. err = writeInt32(handle->numFds);
  811. if (err != NO_ERROR) return err;
  812. err = writeInt32(handle->numInts);
  813. if (err != NO_ERROR) return err;
  814. for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
  815. err = writeDupFileDescriptor(handle->data[i]);
  816. if (err != NO_ERROR) {
  817. ALOGD("write native handle, write dup fd failed");
  818. return err;
  819. }
  820. err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
  821. return err;
  822. }
  823. status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership)
  824. {
  825. flat_binder_object obj;
  826. obj.type = BINDER_TYPE_FD;
  827. obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
  828. obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
  829. obj.handle = fd;
  830. obj.cookie = takeOwnership ? 1 : 0;
  831. return writeObject(obj, true);
  832. }
  833. status_t Parcel::writeDupFileDescriptor(int fd)
  834. {
  835. int dupFd = dup(fd);
  836. if (dupFd < 0) {
  837. return -errno;
  838. }
  839. status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
  840. if (err) {
  841. close(dupFd);
  842. }
  843. return err;
  844. }
  845. status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
  846. {
  847. if (len > INT32_MAX) {
  848. // don't accept size_t values which may have come from an
  849. // inadvertent conversion from a negative int.
  850. return BAD_VALUE;
  851. }
  852. status_t status;
  853. if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
  854. ALOGV("writeBlob: write in place");
  855. status = writeInt32(BLOB_INPLACE);
  856. if (status) return status;
  857. void* ptr = writeInplace(len);
  858. if (!ptr) return NO_MEMORY;
  859. outBlob->init(-1, ptr, len, false);
  860. return NO_ERROR;
  861. }
  862. ALOGV("writeBlob: write to ashmem");
  863. int fd = ashmem_create_region("Parcel Blob", len);
  864. if (fd < 0) return NO_MEMORY;
  865. int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
  866. if (result < 0) {
  867. status = result;
  868. } else {
  869. void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
  870. if (ptr == MAP_FAILED) {
  871. status = -errno;
  872. } else {
  873. if (!mutableCopy) {
  874. result = ashmem_set_prot_region(fd, PROT_READ);
  875. }
  876. if (result < 0) {
  877. status = result;
  878. } else {
  879. status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
  880. if (!status) {
  881. status = writeFileDescriptor(fd, true /*takeOwnership*/);
  882. if (!status) {
  883. outBlob->init(fd, ptr, len, mutableCopy);
  884. return NO_ERROR;
  885. }
  886. }
  887. }
  888. }
  889. ::munmap(ptr, len);
  890. }
  891. ::close(fd);
  892. return status;
  893. }
  894. status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
  895. {
  896. // Must match up with what's done in writeBlob.
  897. if (!mAllowFds) return FDS_NOT_ALLOWED;
  898. status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
  899. if (status) return status;
  900. return writeDupFileDescriptor(fd);
  901. }
  902. status_t Parcel::write(const FlattenableHelperInterface& val)
  903. {
  904. status_t err;
  905. // size if needed
  906. const size_t len = val.getFlattenedSize();
  907. const size_t fd_count = val.getFdCount();
  908. if ((len > INT32_MAX) || (fd_count > INT32_MAX)) {
  909. // don't accept size_t values which may have come from an
  910. // inadvertent conversion from a negative int.
  911. return BAD_VALUE;
  912. }
  913. err = this->writeInt32(len);
  914. if (err) return err;
  915. err = this->writeInt32(fd_count);
  916. if (err) return err;
  917. // payload
  918. void* const buf = this->writeInplace(len);
  919. if (buf == NULL)
  920. return BAD_VALUE;
  921. int* fds = NULL;
  922. if (fd_count) {
  923. fds = new int[fd_count];
  924. }
  925. err = val.flatten(buf, len, fds, fd_count);
  926. for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
  927. err = this->writeDupFileDescriptor( fds[i] );
  928. }
  929. if (fd_count) {
  930. delete [] fds;
  931. }
  932. return err;
  933. }
  934. status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
  935. {
  936. const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
  937. const bool enoughObjects = mObjectsSize < mObjectsCapacity;
  938. if (enoughData && enoughObjects) {
  939. restart_write:
  940. *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
  941. // remember if it's a file descriptor
  942. if (val.type == BINDER_TYPE_FD) {
  943. if (!mAllowFds) {
  944. // fail before modifying our object index
  945. return FDS_NOT_ALLOWED;
  946. }
  947. mHasFds = mFdsKnown = true;
  948. }
  949. // Need to write meta-data?
  950. if (nullMetaData || val.binder != 0) {
  951. mObjects[mObjectsSize] = mDataPos;
  952. #ifndef DISABLE_ASHMEM_TRACKING
  953. acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize);
  954. #else
  955. acquire_object(ProcessState::self(), val, this);
  956. #endif
  957. mObjectsSize++;
  958. }
  959. return finishWrite(sizeof(flat_binder_object));
  960. }
  961. if (!enoughData) {
  962. const status_t err = growData(sizeof(val));
  963. if (err != NO_ERROR) return err;
  964. }
  965. if (!enoughObjects) {
  966. size_t newSize = ((mObjectsSize+2)*3)/2;
  967. if (newSize*sizeof(binder_size_t) < mObjectsSize) return NO_MEMORY; // overflow
  968. binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
  969. if (objects == NULL) return NO_MEMORY;
  970. mObjects = objects;
  971. mObjectsCapacity = newSize;
  972. }
  973. goto restart_write;
  974. }
  975. status_t Parcel::writeNoException()
  976. {
  977. return writeInt32(0);
  978. }
  979. void Parcel::remove(size_t /*start*/, size_t /*amt*/)
  980. {
  981. LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
  982. }
  983. status_t Parcel::validateReadData(size_t upperBound) const
  984. {
  985. // Don't allow non-object reads on object data
  986. if (mObjectsSorted || mObjectsSize <= 1) {
  987. data_sorted:
  988. // Expect to check only against the next object
  989. if (mNextObjectHint < mObjectsSize && upperBound > mObjects[mNextObjectHint]) {
  990. // For some reason the current read position is greater than the next object
  991. // hint. Iterate until we find the right object
  992. size_t nextObject = mNextObjectHint;
  993. do {
  994. if (mDataPos < mObjects[nextObject] + sizeof(flat_binder_object)) {
  995. // Requested info overlaps with an object
  996. ALOGE("Attempt to read from protected data in Parcel %p", this);
  997. return PERMISSION_DENIED;
  998. }
  999. nextObject++;
  1000. } while (nextObject < mObjectsSize && upperBound > mObjects[nextObject]);
  1001. mNextObjectHint = nextObject;
  1002. }
  1003. return NO_ERROR;
  1004. }
  1005. // Quickly determine if mObjects is sorted.
  1006. binder_size_t* currObj = mObjects + mObjectsSize - 1;
  1007. binder_size_t* prevObj = currObj;
  1008. while (currObj > mObjects) {
  1009. prevObj--;
  1010. if(*prevObj > *currObj) {
  1011. goto data_unsorted;
  1012. }
  1013. currObj--;
  1014. }
  1015. mObjectsSorted = true;
  1016. goto data_sorted;
  1017. data_unsorted:
  1018. // Insertion Sort mObjects
  1019. // Great for mostly sorted lists. If randomly sorted or reverse ordered mObjects become common,
  1020. // switch to std::sort(mObjects, mObjects + mObjectsSize);
  1021. for (binder_size_t* iter0 = mObjects + 1; iter0 < mObjects + mObjectsSize; iter0++) {
  1022. binder_size_t temp = *iter0;
  1023. binder_size_t* iter1 = iter0 - 1;
  1024. while (iter1 >= mObjects && *iter1 > temp) {
  1025. *(iter1 + 1) = *iter1;
  1026. iter1--;
  1027. }
  1028. *(iter1 + 1) = temp;
  1029. }
  1030. mNextObjectHint = 0;
  1031. mObjectsSorted = true;
  1032. goto data_sorted;
  1033. }
  1034. status_t Parcel::read(void* outData, size_t len) const
  1035. {
  1036. if (len > INT32_MAX) {
  1037. // don't accept size_t values which may have come from an
  1038. // inadvertent conversion from a negative int.
  1039. return BAD_VALUE;
  1040. }
  1041. if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
  1042. && len <= pad_size(len)) {
  1043. if (mObjectsSize > 0) {
  1044. status_t err = validateReadData(mDataPos + pad_size(len));
  1045. if(err != NO_ERROR) {
  1046. // Still increment the data position by the expected length
  1047. mDataPos += pad_size(len);
  1048. ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
  1049. return err;
  1050. }
  1051. }
  1052. memcpy(outData, mData+mDataPos, len);
  1053. mDataPos += pad_size(len);
  1054. ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
  1055. return NO_ERROR;
  1056. }
  1057. return NOT_ENOUGH_DATA;
  1058. }
  1059. const void* Parcel::readInplace(size_t len) const
  1060. {
  1061. if (len > INT32_MAX) {
  1062. // don't accept size_t values which may have come from an
  1063. // inadvertent conversion from a negative int.
  1064. return NULL;
  1065. }
  1066. if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
  1067. && len <= pad_size(len)) {
  1068. if (mObjectsSize > 0) {
  1069. status_t err = validateReadData(mDataPos + pad_size(len));
  1070. if(err != NO_ERROR) {
  1071. // Still increment the data position by the expected length
  1072. mDataPos += pad_size(len);
  1073. ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
  1074. return NULL;
  1075. }
  1076. }
  1077. const void* data = mData+mDataPos;
  1078. mDataPos += pad_size(len);
  1079. ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
  1080. return data;
  1081. }
  1082. return NULL;
  1083. }
  1084. template<class T>
  1085. status_t Parcel::readAligned(T *pArg) const {
  1086. COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
  1087. if ((mDataPos+sizeof(T)) <= mDataSize) {
  1088. if (mObjectsSize > 0) {
  1089. status_t err = validateReadData(mDataPos + sizeof(T));
  1090. if(err != NO_ERROR) {
  1091. // Still increment the data position by the expected length
  1092. mDataPos += sizeof(T);
  1093. return err;
  1094. }
  1095. }
  1096. const void* data = mData+mDataPos;
  1097. mDataPos += sizeof(T);
  1098. *pArg = *reinterpret_cast<const T*>(data);
  1099. return NO_ERROR;
  1100. } else {
  1101. return NOT_ENOUGH_DATA;
  1102. }
  1103. }
  1104. template<class T>
  1105. T Parcel::readAligned() const {
  1106. T result;
  1107. if (readAligned(&result) != NO_ERROR) {
  1108. result = 0;
  1109. }
  1110. return result;
  1111. }
  1112. template<class T>
  1113. status_t Parcel::writeAligned(T val) {
  1114. COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
  1115. if ((mDataPos+sizeof(val)) <= mDataCapacity) {
  1116. restart_write:
  1117. *reinterpret_cast<T*>(mData+mDataPos) = val;
  1118. return finishWrite(sizeof(val));
  1119. }
  1120. status_t err = growData(sizeof(val));
  1121. if (err == NO_ERROR) goto restart_write;
  1122. return err;
  1123. }
  1124. status_t Parcel::readInt32(int32_t *pArg) const
  1125. {
  1126. return readAligned(pArg);
  1127. }
  1128. int32_t Parcel::readInt32() const
  1129. {
  1130. return readAligned<int32_t>();
  1131. }
  1132. status_t Parcel::readUint32(uint32_t *pArg) const
  1133. {
  1134. return readAligned(pArg);
  1135. }
  1136. uint32_t Parcel::readUint32() const
  1137. {
  1138. return readAligned<uint32_t>();
  1139. }
  1140. status_t Parcel::readInt64(int64_t *pArg) const
  1141. {
  1142. return readAligned(pArg);
  1143. }
  1144. int64_t Parcel::readInt64() const
  1145. {
  1146. return readAligned<int64_t>();
  1147. }
  1148. status_t Parcel::readUint64(uint64_t *pArg) const
  1149. {
  1150. return readAligned(pArg);
  1151. }
  1152. uint64_t Parcel::readUint64() const
  1153. {
  1154. return readAligned<uint64_t>();
  1155. }
  1156. status_t Parcel::readPointer(uintptr_t *pArg) const
  1157. {
  1158. status_t ret;
  1159. binder_uintptr_t ptr;
  1160. ret = readAligned(&ptr);
  1161. if (!ret)
  1162. *pArg = ptr;
  1163. return ret;
  1164. }
  1165. uintptr_t Parcel::readPointer() const
  1166. {
  1167. return readAligned<binder_uintptr_t>();
  1168. }
  1169. status_t Parcel::readFloat(float *pArg) const
  1170. {
  1171. return readAligned(pArg);
  1172. }
  1173. float Parcel::readFloat() const
  1174. {
  1175. return readAligned<float>();
  1176. }
  1177. #if defined(__mips__) && defined(__mips_hard_float)
  1178. status_t Parcel::readDouble(double *pArg) const
  1179. {
  1180. union {
  1181. double d;
  1182. unsigned long long ll;
  1183. } u;
  1184. u.d = 0;
  1185. status_t status;
  1186. status = readAligned(&u.ll);
  1187. *pArg = u.d;
  1188. return status;
  1189. }
  1190. double Parcel::readDouble() const
  1191. {
  1192. union {
  1193. double d;
  1194. unsigned long long ll;
  1195. } u;
  1196. u.ll = readAligned<unsigned long long>();
  1197. return u.d;
  1198. }
  1199. #else
  1200. status_t Parcel::readDouble(double *pArg) const
  1201. {
  1202. return readAligned(pArg);
  1203. }
  1204. double Parcel::readDouble() const
  1205. {
  1206. return readAligned<double>();
  1207. }
  1208. #endif
  1209. status_t Parcel::readIntPtr(intptr_t *pArg) const
  1210. {
  1211. return readAligned(pArg);
  1212. }
  1213. intptr_t Parcel::readIntPtr() const
  1214. {
  1215. return readAligned<intptr_t>();
  1216. }
  1217. const char* Parcel::readCString() const
  1218. {
  1219. const size_t avail = mDataSize-mDataPos;
  1220. if (avail > 0) {
  1221. const char* str = reinterpret_cast<const char*>(mData+mDataPos);
  1222. // is the string's trailing NUL within the parcel's valid bounds?
  1223. const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
  1224. if (eos) {
  1225. const size_t len = eos - str;
  1226. mDataPos += pad_size(len+1);
  1227. ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
  1228. return str;
  1229. }
  1230. }
  1231. return NULL;
  1232. }
  1233. String8 Parcel::readString8() const
  1234. {
  1235. int32_t size = readInt32();
  1236. // watch for potential int overflow adding 1 for trailing NUL
  1237. if (size > 0 && size < INT32_MAX) {
  1238. const char* str = (const char*)readInplace(size+1);
  1239. if (str) return String8(str, size);
  1240. }
  1241. return String8();
  1242. }
  1243. String16 Parcel::readString16() const
  1244. {
  1245. size_t len;
  1246. const char16_t* str = readString16Inplace(&len);
  1247. if (str) return String16(str, len);
  1248. ALOGE("Reading a NULL string not supported here.");
  1249. return String16();
  1250. }
  1251. const char16_t* Parcel::readString16Inplace(size_t* outLen) const
  1252. {
  1253. int32_t size = readInt32();
  1254. // watch for potential int overflow from size+1
  1255. if (size >= 0 && size < INT32_MAX) {
  1256. *outLen = size;
  1257. const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
  1258. if (str != NULL) {
  1259. return str;
  1260. }
  1261. }
  1262. *outLen = 0;
  1263. return NULL;
  1264. }
  1265. sp<IBinder> Parcel::readStrongBinder() const
  1266. {
  1267. sp<IBinder> val;
  1268. unflatten_binder(ProcessState::self(), *this, &val);
  1269. return val;
  1270. }
  1271. wp<IBinder> Parcel::readWeakBinder() const
  1272. {
  1273. wp<IBinder> val;
  1274. unflatten_binder(ProcessState::self(), *this, &val);
  1275. return val;
  1276. }
  1277. int32_t Parcel::readExceptionCode() const
  1278. {
  1279. int32_t exception_code = readAligned<int32_t>();
  1280. if (exception_code == EX_HAS_REPLY_HEADER) {
  1281. int32_t header_start = dataPosition();
  1282. int32_t header_size = readAligned<int32_t>();
  1283. // Skip over fat responses headers. Not used (or propagated) in
  1284. // native code
  1285. setDataPosition(header_start + header_size);
  1286. // And fat response headers are currently only used when there are no
  1287. // exceptions, so return no error:
  1288. return 0;
  1289. }
  1290. return exception_code;
  1291. }
  1292. native_handle* Parcel::readNativeHandle() const
  1293. {
  1294. int numFds, numInts;
  1295. status_t err;
  1296. err = readInt32(&numFds);
  1297. if (err != NO_ERROR) return 0;
  1298. err = readInt32(&numInts);
  1299. if (err != NO_ERROR) return 0;
  1300. native_handle* h = native_handle_create(numFds, numInts);
  1301. if (!h) {
  1302. return 0;
  1303. }
  1304. for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
  1305. h->data[i] = dup(readFileDescriptor());
  1306. if (h->data[i] < 0) {
  1307. for (int j = 0; j < i; j++) {
  1308. close(h->data[j]);
  1309. }
  1310. native_handle_delete(h);
  1311. return 0;
  1312. }
  1313. }
  1314. err = read(h->data + numFds, sizeof(int)*numInts);
  1315. if (err != NO_ERROR) {
  1316. native_handle_close(h);
  1317. native_handle_delete(h);
  1318. h = 0;
  1319. }
  1320. return h;
  1321. }
  1322. int Parcel::readFileDescriptor() const
  1323. {
  1324. const flat_binder_object* flat = readObject(true);
  1325. if (flat) {
  1326. switch (flat->type) {
  1327. case BINDER_TYPE_FD:
  1328. //ALOGI("Returning file descriptor %ld from parcel %p", flat->handle, this);
  1329. return flat->handle;
  1330. }
  1331. }
  1332. return BAD_TYPE;
  1333. }
  1334. status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
  1335. {
  1336. int32_t blobType;
  1337. status_t status = readInt32(&blobType);
  1338. if (status) return status;
  1339. if (blobType == BLOB_INPLACE) {
  1340. ALOGV("readBlob: read in place");
  1341. const void* ptr = readInplace(len);
  1342. if (!ptr) return BAD_VALUE;
  1343. outBlob->init(-1, const_cast<void*>(ptr), len, false);
  1344. return NO_ERROR;
  1345. }
  1346. ALOGV("readBlob: read from ashmem");
  1347. bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
  1348. int fd = readFileDescriptor();
  1349. if (fd == int(BAD_TYPE)) return BAD_VALUE;
  1350. void* ptr = ::mmap(NULL, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
  1351. MAP_SHARED, fd, 0);
  1352. if (ptr == MAP_FAILED) return NO_MEMORY;
  1353. outBlob->init(fd, ptr, len, isMutable);
  1354. return NO_ERROR;
  1355. }
  1356. status_t Parcel::read(FlattenableHelperInterface& val) const
  1357. {
  1358. // size
  1359. const size_t len = this->readInt32();
  1360. const size_t fd_count = this->readInt32();
  1361. if (len > INT32_MAX) {
  1362. // don't accept size_t values which may have come from an
  1363. // inadvertent conversion from a negative int.
  1364. return BAD_VALUE;
  1365. }
  1366. // payload
  1367. void const* const buf = this->readInplace(pad_size(len));
  1368. if (buf == NULL)
  1369. return BAD_VALUE;
  1370. int* fds = NULL;
  1371. if (fd_count) {
  1372. fds = new int[fd_count];
  1373. }
  1374. status_t err = NO_ERROR;
  1375. for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
  1376. fds[i] = dup(this->readFileDescriptor());
  1377. if (fds[i] < 0) {
  1378. err = BAD_VALUE;
  1379. ALOGE("dup() failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
  1380. i, fds[i], fd_count, strerror(errno));
  1381. }
  1382. }
  1383. if (err == NO_ERROR) {
  1384. err = val.unflatten(buf, len, fds, fd_count);
  1385. }
  1386. if (fd_count) {
  1387. delete [] fds;
  1388. }
  1389. return err;
  1390. }
  1391. const flat_binder_object* Parcel::readObject(bool nullMetaData) const
  1392. {
  1393. const size_t DPOS = mDataPos;
  1394. if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
  1395. const flat_binder_object* obj
  1396. = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
  1397. mDataPos = DPOS + sizeof(flat_binder_object);
  1398. if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
  1399. // When transferring a NULL object, we don't write it into
  1400. // the object list, so we don't want to check for it when
  1401. // reading.
  1402. ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
  1403. return obj;
  1404. }
  1405. // Ensure that this object is valid...
  1406. binder_size_t* const OBJS = mObjects;
  1407. const size_t N = mObjectsSize;
  1408. size_t opos = mNextObjectHint;
  1409. if (N > 0) {
  1410. ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
  1411. this, DPOS, opos);
  1412. // Start at the current hint position, looking for an object at
  1413. // the current data position.
  1414. if (opos < N) {
  1415. while (opos < (N-1) && OBJS[opos] < DPOS) {
  1416. opos++;
  1417. }
  1418. } else {
  1419. opos = N-1;
  1420. }
  1421. if (OBJS[opos] == DPOS) {
  1422. // Found it!
  1423. ALOGV("Parcel %p found obj %zu at index %zu with forward search",
  1424. this, DPOS, opos);
  1425. mNextObjectHint = opos+1;
  1426. ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
  1427. return obj;
  1428. }
  1429. // Look backwards for it...
  1430. while (opos > 0 && OBJS[opos] > DPOS) {
  1431. opos--;
  1432. }
  1433. if (OBJS[opos] == DPOS) {
  1434. // Found it!
  1435. ALOGV("Parcel %p found obj %zu at index %zu with backward search",
  1436. this, DPOS, opos);
  1437. mNextObjectHint = opos+1;
  1438. ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
  1439. return obj;
  1440. }
  1441. }
  1442. ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
  1443. this, DPOS);
  1444. }
  1445. return NULL;
  1446. }
  1447. void Parcel::closeFileDescriptors()
  1448. {
  1449. size_t i = mObjectsSize;
  1450. if (i > 0) {
  1451. //ALOGI("Closing file descriptors for %zu objects...", i);
  1452. }
  1453. while (i > 0) {
  1454. i--;
  1455. const flat_binder_object* flat
  1456. = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
  1457. if (flat->type == BINDER_TYPE_FD) {
  1458. //ALOGI("Closing fd: %ld", flat->handle);
  1459. close(flat->handle);
  1460. }
  1461. }
  1462. }
  1463. uintptr_t Parcel::ipcData() const
  1464. {
  1465. return reinterpret_cast<uintptr_t>(mData);
  1466. }
  1467. size_t Parcel::ipcDataSize() const
  1468. {
  1469. return (mDataSize > mDataPos ? mDataSize : mDataPos);
  1470. }
  1471. uintptr_t Parcel::ipcObjects() const
  1472. {
  1473. return reinterpret_cast<uintptr_t>(mObjects);
  1474. }
  1475. size_t Parcel::ipcObjectsCount() const
  1476. {
  1477. return mObjectsSize;
  1478. }
  1479. void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
  1480. const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
  1481. {
  1482. binder_size_t minOffset = 0;
  1483. freeDataNoInit();
  1484. mError = NO_ERROR;
  1485. mData = const_cast<uint8_t*>(data);
  1486. mDataSize = mDataCapacity = dataSize;
  1487. //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
  1488. mDataPos = 0;
  1489. ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
  1490. mObjects = const_cast<binder_size_t*>(objects);
  1491. mObjectsSize = mObjectsCapacity = objectsCount;
  1492. mNextObjectHint = 0;
  1493. mObjectsSorted = false;
  1494. mOwner = relFunc;
  1495. mOwnerCookie = relCookie;
  1496. for (size_t i = 0; i < mObjectsSize; i++) {
  1497. binder_size_t offset = mObjects[i];
  1498. if (offset < minOffset) {
  1499. ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
  1500. __func__, (uint64_t)offset, (uint64_t)minOffset);
  1501. mObjectsSize = 0;
  1502. break;
  1503. }
  1504. minOffset = offset + sizeof(flat_binder_object);
  1505. }
  1506. scanForFds();
  1507. }
  1508. void Parcel::print(TextOutput& to, uint32_t /*flags*/) const
  1509. {
  1510. to << "Parcel(";
  1511. if (errorCheck() != NO_ERROR) {
  1512. const status_t err = errorCheck();
  1513. to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
  1514. } else if (dataSize() > 0) {
  1515. const uint8_t* DATA = data();
  1516. to << indent << HexDump(DATA, dataSize()) << dedent;
  1517. const binder_size_t* OBJS = objects();
  1518. const size_t N = objectsCount();
  1519. for (size_t i=0; i<N; i++) {
  1520. const flat_binder_object* flat
  1521. = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
  1522. to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
  1523. << TypeCode(flat->type & 0x7f7f7f00)
  1524. << " = " << flat->binder;
  1525. }
  1526. } else {
  1527. to << "NULL";
  1528. }
  1529. to << ")";
  1530. }
  1531. void Parcel::releaseObjects()
  1532. {
  1533. const sp<ProcessState> proc(ProcessState::self());
  1534. size_t i = mObjectsSize;
  1535. uint8_t* const data = mData;
  1536. binder_size_t* const objects = mObjects;
  1537. while (i > 0) {
  1538. i--;
  1539. const flat_binder_object* flat
  1540. = reinterpret_cast<flat_binder_object*>(data+objects[i]);
  1541. #ifndef DISABLE_ASHMEM_TRACKING
  1542. release_object(proc, *flat, this, &mOpenAshmemSize);
  1543. #else
  1544. release_object(proc, *flat, this);
  1545. #endif
  1546. }
  1547. }
  1548. void Parcel::acquireObjects()
  1549. {
  1550. const sp<ProcessState> proc(ProcessState::self());
  1551. size_t i = mObjectsSize;
  1552. uint8_t* const data = mData;
  1553. binder_size_t* const objects = mObjects;
  1554. while (i > 0) {
  1555. i--;
  1556. const flat_binder_object* flat
  1557. = reinterpret_cast<flat_binder_object*>(data+objects[i]);
  1558. #ifndef DISABLE_ASHMEM_TRACKING
  1559. acquire_object(proc, *flat, this, &mOpenAshmemSize);
  1560. #else
  1561. acquire_object(proc, *flat, this);
  1562. #endif
  1563. }
  1564. }
  1565. void Parcel::freeData()
  1566. {
  1567. freeDataNoInit();
  1568. initState();
  1569. }
  1570. void Parcel::freeDataNoInit()
  1571. {
  1572. if (mOwner) {
  1573. LOG_ALLOC("Parcel %p: freeing other owner data", this);
  1574. //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
  1575. mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
  1576. } else {
  1577. LOG_ALLOC("Parcel %p: freeing allocated data", this);
  1578. releaseObjects();
  1579. if (mData) {
  1580. LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
  1581. pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
  1582. gParcelGlobalAllocSize -= mDataCapacity;
  1583. gParcelGlobalAllocCount--;
  1584. pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
  1585. free(mData);
  1586. }
  1587. if (mObjects) free(mObjects);
  1588. }
  1589. }
  1590. status_t Parcel::growData(size_t len)
  1591. {
  1592. if (len > INT32_MAX) {
  1593. // don't accept size_t values which may have come from an
  1594. // inadvertent conversion from a negative int.
  1595. return BAD_VALUE;
  1596. }
  1597. size_t newSize = ((mDataSize+len)*3)/2;
  1598. return (newSize <= mDataSize)
  1599. ? (status_t) NO_MEMORY
  1600. : continueWrite(newSize);
  1601. }
  1602. status_t Parcel::restartWrite(size_t desired)
  1603. {
  1604. if (desired > INT32_MAX) {
  1605. // don't accept size_t values which may have come from an
  1606. // inadvertent conversion from a negative int.
  1607. return BAD_VALUE;
  1608. }
  1609. if (mOwner) {
  1610. freeData();
  1611. return continueWrite(desired);
  1612. }
  1613. uint8_t* data = (uint8_t*)realloc(mData, desired);
  1614. if (!data && desired > mDataCapacity) {
  1615. mError = NO_MEMORY;
  1616. return NO_MEMORY;
  1617. }
  1618. releaseObjects();
  1619. if (data) {
  1620. LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
  1621. pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
  1622. gParcelGlobalAllocSize += desired;
  1623. gParcelGlobalAllocSize -= mDataCapacity;
  1624. pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
  1625. mData = data;
  1626. mDataCapacity = desired;
  1627. }
  1628. mDataSize = mDataPos = 0;
  1629. ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
  1630. ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
  1631. free(mObjects);
  1632. mObjects = NULL;
  1633. mObjectsSize = mObjectsCapacity = 0;
  1634. mNextObjectHint = 0;
  1635. mObjectsSorted = false;
  1636. mHasFds = false;
  1637. mFdsKnown = true;
  1638. mAllowFds = true;
  1639. return NO_ERROR;
  1640. }
  1641. status_t Parcel::continueWrite(size_t desired)
  1642. {
  1643. if (desired > INT32_MAX) {
  1644. // don't accept size_t values which may have come from an
  1645. // inadvertent conversion from a negative int.
  1646. return BAD_VALUE;
  1647. }
  1648. // If shrinking, first adjust for any objects that appear
  1649. // after the new data size.
  1650. size_t objectsSize = mObjectsSize;
  1651. if (desired < mDataSize) {
  1652. if (desired == 0) {
  1653. objectsSize = 0;
  1654. } else {
  1655. while (objectsSize > 0) {
  1656. if (mObjects[objectsSize-1] < desired)
  1657. break;
  1658. objectsSize--;
  1659. }
  1660. }
  1661. }
  1662. if (mOwner) {
  1663. // If the size is going to zero, just release the owner's data.
  1664. if (desired == 0) {
  1665. freeData();
  1666. return NO_ERROR;
  1667. }
  1668. // If there is a different owner, we need to take
  1669. // posession.
  1670. uint8_t* data = (uint8_t*)malloc(desired);
  1671. if (!data) {
  1672. mError = NO_MEMORY;
  1673. return NO_MEMORY;
  1674. }
  1675. binder_size_t* objects = NULL;
  1676. if (objectsSize) {
  1677. objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
  1678. if (!objects) {
  1679. free(data);
  1680. mError = NO_MEMORY;
  1681. return NO_MEMORY;
  1682. }
  1683. // Little hack to only acquire references on objects
  1684. // we will be keeping.
  1685. size_t oldObjectsSize = mObjectsSize;
  1686. mObjectsSize = objectsSize;
  1687. acquireObjects();
  1688. mObjectsSize = oldObjectsSize;
  1689. }
  1690. if (mData) {
  1691. memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
  1692. }
  1693. if (objects && mObjects) {
  1694. memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
  1695. }
  1696. //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
  1697. mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
  1698. mOwner = NULL;
  1699. LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
  1700. pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
  1701. gParcelGlobalAllocSize += desired;
  1702. gParcelGlobalAllocCount++;
  1703. pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
  1704. mData = data;
  1705. mObjects = objects;
  1706. mDataSize = (mDataSize < desired) ? mDataSize : desired;
  1707. ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
  1708. mDataCapacity = desired;
  1709. mObjectsSize = mObjectsCapacity = objectsSize;
  1710. mNextObjectHint = 0;
  1711. mObjectsSorted = false;
  1712. } else if (mData) {
  1713. if (objectsSize < mObjectsSize) {
  1714. // Need to release refs on any objects we are dropping.
  1715. const sp<ProcessState> proc(ProcessState::self());
  1716. for (size_t i=objectsSize; i<mObjectsSize; i++) {
  1717. const flat_binder_object* flat
  1718. = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
  1719. if (flat->type == BINDER_TYPE_FD) {
  1720. // will need to rescan because we may have lopped off the only FDs
  1721. mFdsKnown = false;
  1722. }
  1723. #ifndef DISABLE_ASHMEM_TRACKING
  1724. release_object(proc, *flat, this, &mOpenAshmemSize);
  1725. #else
  1726. release_object(proc, *flat, this);
  1727. #endif
  1728. }
  1729. binder_size_t* objects =
  1730. (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
  1731. if (objects) {
  1732. mObjects = objects;
  1733. }
  1734. mObjectsSize = objectsSize;
  1735. mNextObjectHint = 0;
  1736. mObjectsSorted = false;
  1737. }
  1738. // We own the data, so we can just do a realloc().
  1739. if (desired > mDataCapacity) {
  1740. uint8_t* data = (uint8_t*)realloc(mData, desired);
  1741. if (data) {
  1742. LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
  1743. desired);
  1744. pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
  1745. gParcelGlobalAllocSize += desired;
  1746. gParcelGlobalAllocSize -= mDataCapacity;
  1747. pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
  1748. mData = data;
  1749. mDataCapacity = desired;
  1750. } else if (desired > mDataCapacity) {
  1751. mError = NO_MEMORY;
  1752. return NO_MEMORY;
  1753. }
  1754. } else {
  1755. if (mDataSize > desired) {
  1756. mDataSize = desired;
  1757. ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
  1758. }
  1759. if (mDataPos > desired) {
  1760. mDataPos = desired;
  1761. ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
  1762. }
  1763. }
  1764. } else {
  1765. // This is the first data. Easy!
  1766. uint8_t* data = (uint8_t*)malloc(desired);
  1767. if (!data) {
  1768. mError = NO_MEMORY;
  1769. return NO_MEMORY;
  1770. }
  1771. if(!(mDataCapacity == 0 && mObjects == NULL
  1772. && mObjectsCapacity == 0)) {
  1773. ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
  1774. }
  1775. LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
  1776. pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
  1777. gParcelGlobalAllocSize += desired;
  1778. gParcelGlobalAllocCount++;
  1779. pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
  1780. mData = data;
  1781. mDataSize = mDataPos = 0;
  1782. ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
  1783. ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
  1784. mDataCapacity = desired;
  1785. }
  1786. return NO_ERROR;
  1787. }
  1788. void Parcel::initState()
  1789. {
  1790. LOG_ALLOC("Parcel %p: initState", this);
  1791. mError = NO_ERROR;
  1792. mData = 0;
  1793. mDataSize = 0;
  1794. mDataCapacity = 0;
  1795. mDataPos = 0;
  1796. ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
  1797. ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
  1798. mObjects = NULL;
  1799. mObjectsSize = 0;
  1800. mObjectsCapacity = 0;
  1801. mNextObjectHint = 0;
  1802. mObjectsSorted = false;
  1803. mHasFds = false;
  1804. mFdsKnown = true;
  1805. mAllowFds = true;
  1806. mOwner = NULL;
  1807. #ifndef DISABLE_ASHMEM_TRACKING
  1808. mOpenAshmemSize = 0;
  1809. #endif
  1810. }
  1811. void Parcel::scanForFds() const
  1812. {
  1813. bool hasFds = false;
  1814. for (size_t i=0; i<mObjectsSize; i++) {
  1815. const flat_binder_object* flat
  1816. = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
  1817. if (flat->type == BINDER_TYPE_FD) {
  1818. hasFds = true;
  1819. break;
  1820. }
  1821. }
  1822. mHasFds = hasFds;
  1823. mFdsKnown = true;
  1824. }
  1825. size_t Parcel::getBlobAshmemSize() const
  1826. {
  1827. // This used to return the size of all blobs that were written to ashmem, now we're returning
  1828. // the ashmem currently referenced by this Parcel, which should be equivalent.
  1829. // TODO: Remove method once ABI can be changed.
  1830. #ifndef DISABLE_ASHMEM_TRACKING
  1831. return mOpenAshmemSize;
  1832. #else
  1833. return 0;
  1834. #endif
  1835. }
  1836. size_t Parcel::getOpenAshmemSize() const
  1837. {
  1838. #ifndef DISABLE_ASHMEM_TRACKING
  1839. return mOpenAshmemSize;
  1840. #else
  1841. return 0;
  1842. #endif
  1843. }
  1844. // --- Parcel::Blob ---
  1845. Parcel::Blob::Blob() :
  1846. mFd(-1), mData(NULL), mSize(0), mMutable(false) {
  1847. }
  1848. Parcel::Blob::~Blob() {
  1849. release();
  1850. }
  1851. void Parcel::Blob::release() {
  1852. if (mFd != -1 && mData) {
  1853. ::munmap(mData, mSize);
  1854. }
  1855. clear();
  1856. }
  1857. void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
  1858. mFd = fd;
  1859. mData = data;
  1860. mSize = size;
  1861. mMutable = isMutable;
  1862. }
  1863. void Parcel::Blob::clear() {
  1864. mFd = -1;
  1865. mData = NULL;
  1866. mSize = 0;
  1867. mMutable = false;
  1868. }
  1869. }; // namespace android