handle-io.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. /*
  2. * handle-io.c: Module to give Windows front ends the general
  3. * ability to deal with consoles, pipes, serial ports, or any other
  4. * type of data stream accessed through a Windows API HANDLE rather
  5. * than a WinSock SOCKET.
  6. *
  7. * We do this by spawning a subthread to continuously try to read
  8. * from the handle. Every time a read successfully returns some
  9. * data, the subthread sets an event object which is picked up by
  10. * the main thread, and the main thread then sets an event in
  11. * return to instruct the subthread to resume reading.
  12. *
  13. * Output works precisely the other way round, in a second
  14. * subthread. The output subthread should not be attempting to
  15. * write all the time, because it hasn't always got data _to_
  16. * write; so the output thread waits for an event object notifying
  17. * it to _attempt_ a write, and then it sets an event in return
  18. * when one completes.
  19. *
  20. * (It's terribly annoying having to spawn a subthread for each
  21. * direction of each handle. Technically it isn't necessary for
  22. * serial ports, since we could use overlapped I/O within the main
  23. * thread and wait directly on the event objects in the OVERLAPPED
  24. * structures. However, we can't use this trick for some types of
  25. * file handle at all - for some reason Windows restricts use of
  26. * OVERLAPPED to files which were opened with the overlapped flag -
  27. * and so we must use threads for those. This being the case, it's
  28. * simplest just to use threads for everything rather than trying
  29. * to keep track of multiple completely separate mechanisms.)
  30. */
  31. #include <assert.h>
  32. #include "putty.h"
  33. /* ----------------------------------------------------------------------
  34. * Generic definitions.
  35. */
  36. typedef struct handle_list_node handle_list_node;
  37. struct handle_list_node {
  38. handle_list_node *next, *prev;
  39. };
  40. static void add_to_ready_list(handle_list_node *node);
  41. /*
  42. * Maximum amount of backlog we will allow to build up on an input
  43. * handle before we stop reading from it.
  44. */
  45. #define MAX_BACKLOG 32768
  46. struct handle_generic {
  47. /*
  48. * Initial fields common to both handle_input and handle_output
  49. * structures.
  50. *
  51. * The three HANDLEs are set up at initialisation time and are
  52. * thereafter read-only to both main thread and subthread.
  53. * `moribund' is only used by the main thread; `done' is
  54. * written by the main thread before signalling to the
  55. * subthread. `defunct' and `busy' are used only by the main
  56. * thread.
  57. */
  58. HANDLE h; /* the handle itself */
  59. handle_list_node ready_node; /* for linking on to the ready list */
  60. HANDLE ev_from_main; /* event used to signal back to us */
  61. bool moribund; /* are we going to kill this soon? */
  62. bool done; /* request subthread to terminate */
  63. bool defunct; /* has the subthread already gone? */
  64. bool busy; /* operation currently in progress? */
  65. void *privdata; /* for client to remember who they are */
  66. };
  67. typedef enum { HT_INPUT, HT_OUTPUT } HandleType;
  68. /* ----------------------------------------------------------------------
  69. * Input threads.
  70. */
  71. /*
  72. * Data required by an input thread.
  73. */
  74. struct handle_input {
  75. /*
  76. * Copy of the handle_generic structure.
  77. */
  78. HANDLE h; /* the handle itself */
  79. handle_list_node ready_node; /* for linking on to the ready list */
  80. HANDLE ev_from_main; /* event used to signal back to us */
  81. bool moribund; /* are we going to kill this soon? */
  82. bool done; /* request subthread to terminate */
  83. bool defunct; /* has the subthread already gone? */
  84. bool busy; /* operation currently in progress? */
  85. void *privdata; /* for client to remember who they are */
  86. /*
  87. * Data set at initialisation and then read-only.
  88. */
  89. int flags;
  90. /*
  91. * Data set by the input thread before marking the handle ready,
  92. * and read by the main thread after receiving that signal.
  93. */
  94. char buffer[4096]; /* the data read from the handle */
  95. DWORD len; /* how much data that was */
  96. int readerr; /* lets us know about read errors */
  97. /*
  98. * Callback function called by this module when data arrives on
  99. * an input handle.
  100. */
  101. handle_inputfn_t gotdata;
  102. };
  103. /*
  104. * The actual thread procedure for an input thread.
  105. */
  106. static DWORD WINAPI handle_input_threadfunc(void *param)
  107. {
  108. struct handle_input *ctx = (struct handle_input *) param;
  109. OVERLAPPED ovl, *povl;
  110. HANDLE oev;
  111. bool readret, finished;
  112. int readlen;
  113. if (ctx->flags & HANDLE_FLAG_OVERLAPPED) {
  114. povl = &ovl;
  115. oev = CreateEvent(NULL, true, false, NULL);
  116. } else {
  117. povl = NULL;
  118. }
  119. if (ctx->flags & HANDLE_FLAG_UNITBUFFER)
  120. readlen = 1;
  121. else
  122. readlen = sizeof(ctx->buffer);
  123. while (1) {
  124. if (povl) {
  125. memset(povl, 0, sizeof(OVERLAPPED));
  126. povl->hEvent = oev;
  127. }
  128. readret = ReadFile(ctx->h, ctx->buffer,readlen, &ctx->len, povl);
  129. if (!readret)
  130. ctx->readerr = GetLastError();
  131. else
  132. ctx->readerr = 0;
  133. if (povl && !readret && ctx->readerr == ERROR_IO_PENDING) {
  134. WaitForSingleObject(povl->hEvent, INFINITE);
  135. readret = GetOverlappedResult(ctx->h, povl, &ctx->len, false);
  136. if (!readret)
  137. ctx->readerr = GetLastError();
  138. else
  139. ctx->readerr = 0;
  140. }
  141. if (!readret) {
  142. /*
  143. * Windows apparently sends ERROR_BROKEN_PIPE when a
  144. * pipe we're reading from is closed normally from the
  145. * writing end. This is ludicrous; if that situation
  146. * isn't a natural EOF, _nothing_ is. So if we get that
  147. * particular error, we pretend it's EOF.
  148. */
  149. if (ctx->readerr == ERROR_BROKEN_PIPE)
  150. ctx->readerr = 0;
  151. ctx->len = 0;
  152. }
  153. if (readret && ctx->len == 0 &&
  154. (ctx->flags & HANDLE_FLAG_IGNOREEOF))
  155. continue;
  156. /*
  157. * If we just set ctx->len to 0, that means the read operation
  158. * has returned end-of-file. Telling that to the main thread
  159. * will cause it to set its 'defunct' flag and dispose of the
  160. * handle structure at the next opportunity, in which case we
  161. * mustn't touch ctx at all after the SetEvent. (Hence we do
  162. * even _this_ check before the SetEvent.)
  163. */
  164. finished = (ctx->len == 0);
  165. add_to_ready_list(&ctx->ready_node);
  166. if (finished)
  167. break;
  168. WaitForSingleObject(ctx->ev_from_main, INFINITE);
  169. if (ctx->done) {
  170. /*
  171. * The main thread has asked us to shut down. Send back an
  172. * event indicating that we've done so. Hereafter we must
  173. * not touch ctx at all, because the main thread might
  174. * have freed it.
  175. */
  176. add_to_ready_list(&ctx->ready_node);
  177. break;
  178. }
  179. }
  180. if (povl)
  181. CloseHandle(oev);
  182. return 0;
  183. }
  184. /*
  185. * This is called after a successful read, or from the
  186. * `unthrottle' function. It decides whether or not to begin a new
  187. * read operation.
  188. */
  189. static void handle_throttle(struct handle_input *ctx, int backlog)
  190. {
  191. if (ctx->defunct)
  192. return;
  193. /*
  194. * If there's a read operation already in progress, do nothing:
  195. * when that completes, we'll come back here and be in a
  196. * position to make a better decision.
  197. */
  198. if (ctx->busy)
  199. return;
  200. /*
  201. * Otherwise, we must decide whether to start a new read based
  202. * on the size of the backlog.
  203. */
  204. if (backlog < MAX_BACKLOG) {
  205. SetEvent(ctx->ev_from_main);
  206. ctx->busy = true;
  207. }
  208. }
  209. /* ----------------------------------------------------------------------
  210. * Output threads.
  211. */
  212. /*
  213. * Data required by an output thread.
  214. */
  215. struct handle_output {
  216. /*
  217. * Copy of the handle_generic structure.
  218. */
  219. HANDLE h; /* the handle itself */
  220. handle_list_node ready_node; /* for linking on to the ready list */
  221. HANDLE ev_from_main; /* event used to signal back to us */
  222. bool moribund; /* are we going to kill this soon? */
  223. bool done; /* request subthread to terminate */
  224. bool defunct; /* has the subthread already gone? */
  225. bool busy; /* operation currently in progress? */
  226. void *privdata; /* for client to remember who they are */
  227. /*
  228. * Data set at initialisation and then read-only.
  229. */
  230. int flags;
  231. /*
  232. * Data set by the main thread before signalling ev_from_main,
  233. * and read by the input thread after receiving that signal.
  234. */
  235. const char *buffer; /* the data to write */
  236. DWORD len; /* how much data there is */
  237. /*
  238. * Data set by the input thread before marking this handle as
  239. * ready, and read by the main thread after receiving that signal.
  240. */
  241. DWORD lenwritten; /* how much data we actually wrote */
  242. int writeerr; /* return value from WriteFile */
  243. /*
  244. * Data only ever read or written by the main thread.
  245. */
  246. bufchain queued_data; /* data still waiting to be written */
  247. enum { EOF_NO, EOF_PENDING, EOF_SENT } outgoingeof;
  248. /*
  249. * Callback function called when the backlog in the bufchain
  250. * drops.
  251. */
  252. handle_outputfn_t sentdata;
  253. struct handle *sentdata_param;
  254. };
  255. static DWORD WINAPI handle_output_threadfunc(void *param)
  256. {
  257. struct handle_output *ctx = (struct handle_output *) param;
  258. OVERLAPPED ovl, *povl;
  259. HANDLE oev;
  260. bool writeret;
  261. if (ctx->flags & HANDLE_FLAG_OVERLAPPED) {
  262. povl = &ovl;
  263. oev = CreateEvent(NULL, true, false, NULL);
  264. } else {
  265. povl = NULL;
  266. }
  267. while (1) {
  268. WaitForSingleObject(ctx->ev_from_main, INFINITE);
  269. if (ctx->done) {
  270. /*
  271. * The main thread has asked us to shut down. Send back an
  272. * event indicating that we've done so. Hereafter we must
  273. * not touch ctx at all, because the main thread might
  274. * have freed it.
  275. */
  276. add_to_ready_list(&ctx->ready_node);
  277. break;
  278. }
  279. if (povl) {
  280. memset(povl, 0, sizeof(OVERLAPPED));
  281. povl->hEvent = oev;
  282. }
  283. writeret = WriteFile(ctx->h, ctx->buffer, ctx->len,
  284. &ctx->lenwritten, povl);
  285. if (!writeret)
  286. ctx->writeerr = GetLastError();
  287. else
  288. ctx->writeerr = 0;
  289. if (povl && !writeret && GetLastError() == ERROR_IO_PENDING) {
  290. writeret = GetOverlappedResult(ctx->h, povl,
  291. &ctx->lenwritten, true);
  292. if (!writeret)
  293. ctx->writeerr = GetLastError();
  294. else
  295. ctx->writeerr = 0;
  296. }
  297. add_to_ready_list(&ctx->ready_node);
  298. if (!writeret) {
  299. /*
  300. * The write operation has suffered an error. Telling that
  301. * to the main thread will cause it to set its 'defunct'
  302. * flag and dispose of the handle structure at the next
  303. * opportunity, so we must not touch ctx at all after
  304. * this.
  305. */
  306. break;
  307. }
  308. }
  309. if (povl)
  310. CloseHandle(oev);
  311. return 0;
  312. }
  313. static void handle_try_output(struct handle_output *ctx)
  314. {
  315. if (!ctx->busy && bufchain_size(&ctx->queued_data)) {
  316. ptrlen data = bufchain_prefix(&ctx->queued_data);
  317. ctx->buffer = data.ptr;
  318. ctx->len = min(data.len, ~(DWORD)0);
  319. SetEvent(ctx->ev_from_main);
  320. ctx->busy = true;
  321. } else if (!ctx->busy && bufchain_size(&ctx->queued_data) == 0 &&
  322. ctx->outgoingeof == EOF_PENDING) {
  323. ctx->sentdata(ctx->sentdata_param, 0, 0, true);
  324. ctx->h = INVALID_HANDLE_VALUE;
  325. ctx->outgoingeof = EOF_SENT;
  326. }
  327. }
  328. /* ----------------------------------------------------------------------
  329. * Unified code handling both input and output threads.
  330. */
  331. struct handle {
  332. HandleType type;
  333. union {
  334. struct handle_generic g;
  335. struct handle_input i;
  336. struct handle_output o;
  337. } u;
  338. };
  339. /*
  340. * Linked list storing the current list of handles ready to have
  341. * something done to them by the main thread.
  342. */
  343. static handle_list_node ready_head[1];
  344. static CRITICAL_SECTION ready_critsec[1];
  345. /*
  346. * Event object used by all subthreads to signal that they've just put
  347. * something on the ready list, i.e. that the ready list is non-empty.
  348. */
  349. static HANDLE ready_event = INVALID_HANDLE_VALUE;
  350. static void add_to_ready_list(handle_list_node *node)
  351. {
  352. /*
  353. * Called from subthreads, when their handle has done something
  354. * that they need the main thread to respond to. We append the
  355. * given list node to the end of the ready list, and set
  356. * ready_event to signal to the main thread that the ready list is
  357. * now non-empty.
  358. */
  359. EnterCriticalSection(ready_critsec);
  360. node->next = ready_head;
  361. node->prev = ready_head->prev;
  362. node->next->prev = node->prev->next = node;
  363. SetEvent(ready_event);
  364. LeaveCriticalSection(ready_critsec);
  365. }
  366. static void remove_from_ready_list(handle_list_node *node)
  367. {
  368. /*
  369. * Called from the main thread, just before destroying a 'struct
  370. * handle' completely: as a precaution, we make absolutely sure
  371. * it's not linked on the ready list, just in case somehow it
  372. * still was.
  373. */
  374. EnterCriticalSection(ready_critsec);
  375. node->next->prev = node->prev;
  376. node->prev->next = node->next;
  377. node->next = node->prev = node;
  378. LeaveCriticalSection(ready_critsec);
  379. }
  380. static void handle_ready(struct handle *h); /* process one handle (below) */
  381. static void handle_ready_callback(void *vctx)
  382. {
  383. /*
  384. * Called when the main thread detects ready_event, indicating
  385. * that at least one handle is on the ready list. We empty the
  386. * whole list and process the handles one by one.
  387. *
  388. * It's possible that other handles may be destroyed, and hence
  389. * taken _off_ the ready list, during this processing. That
  390. * shouldn't cause a deadlock, because according to the API docs,
  391. * it's safe to call EnterCriticalSection twice in the same thread
  392. * - the second call will return immediately because that thread
  393. * already owns the critsec. (And then it takes two calls to
  394. * LeaveCriticalSection to release it again, which is just what we
  395. * want here.)
  396. */
  397. EnterCriticalSection(ready_critsec);
  398. while (ready_head->next != ready_head) {
  399. handle_list_node *node = ready_head->next;
  400. node->prev->next = node->next;
  401. node->next->prev = node->prev;
  402. node->next = node->prev = node;
  403. handle_ready(container_of(node, struct handle, u.g.ready_node));
  404. }
  405. LeaveCriticalSection(ready_critsec);
  406. }
  407. static inline void ensure_ready_event_setup(void)
  408. {
  409. if (ready_event == INVALID_HANDLE_VALUE) {
  410. ready_head->prev = ready_head->next = ready_head;
  411. InitializeCriticalSection(ready_critsec);
  412. ready_event = CreateEvent(NULL, false, false, NULL);
  413. add_handle_wait(ready_event, handle_ready_callback, NULL);
  414. }
  415. }
  416. struct handle *handle_input_new(HANDLE handle, handle_inputfn_t gotdata,
  417. void *privdata, int flags)
  418. {
  419. struct handle *h = snew(struct handle);
  420. DWORD in_threadid; /* required for Win9x */
  421. h->type = HT_INPUT;
  422. h->u.i.h = handle;
  423. h->u.i.ev_from_main = CreateEvent(NULL, false, false, NULL);
  424. h->u.i.gotdata = gotdata;
  425. h->u.i.defunct = false;
  426. h->u.i.moribund = false;
  427. h->u.i.done = false;
  428. h->u.i.privdata = privdata;
  429. h->u.i.flags = flags;
  430. ensure_ready_event_setup();
  431. HANDLE hThread = CreateThread(NULL, 0, handle_input_threadfunc,
  432. &h->u.i, 0, &in_threadid);
  433. if (hThread)
  434. CloseHandle(hThread); /* we don't need the thread handle */
  435. h->u.i.busy = true;
  436. return h;
  437. }
  438. struct handle *handle_output_new(HANDLE handle, handle_outputfn_t sentdata,
  439. void *privdata, int flags)
  440. {
  441. struct handle *h = snew(struct handle);
  442. DWORD out_threadid; /* required for Win9x */
  443. h->type = HT_OUTPUT;
  444. h->u.o.h = handle;
  445. h->u.o.ev_from_main = CreateEvent(NULL, false, false, NULL);
  446. h->u.o.busy = false;
  447. h->u.o.defunct = false;
  448. h->u.o.moribund = false;
  449. h->u.o.done = false;
  450. h->u.o.privdata = privdata;
  451. bufchain_init(&h->u.o.queued_data);
  452. h->u.o.outgoingeof = EOF_NO;
  453. h->u.o.sentdata = sentdata;
  454. h->u.o.sentdata_param = h;
  455. h->u.o.flags = flags;
  456. ensure_ready_event_setup();
  457. HANDLE hThread = CreateThread(NULL, 0, handle_output_threadfunc,
  458. &h->u.o, 0, &out_threadid);
  459. if (hThread)
  460. CloseHandle(hThread); /* we don't need the thread handle */
  461. return h;
  462. }
  463. size_t handle_write(struct handle *h, const void *data, size_t len)
  464. {
  465. assert(h->type == HT_OUTPUT);
  466. assert(h->u.o.outgoingeof == EOF_NO);
  467. bufchain_add(&h->u.o.queued_data, data, len);
  468. handle_try_output(&h->u.o);
  469. return bufchain_size(&h->u.o.queued_data);
  470. }
  471. void handle_write_eof(struct handle *h)
  472. {
  473. /*
  474. * This function is called when we want to proactively send an
  475. * end-of-file notification on the handle. We can only do this by
  476. * actually closing the handle - so never call this on a
  477. * bidirectional handle if we're still interested in its incoming
  478. * direction!
  479. */
  480. assert(h->type == HT_OUTPUT);
  481. if (h->u.o.outgoingeof == EOF_NO) {
  482. h->u.o.outgoingeof = EOF_PENDING;
  483. handle_try_output(&h->u.o);
  484. }
  485. }
  486. static void handle_destroy(struct handle *h)
  487. {
  488. if (h->type == HT_OUTPUT)
  489. bufchain_clear(&h->u.o.queued_data);
  490. CloseHandle(h->u.g.ev_from_main);
  491. remove_from_ready_list(&h->u.g.ready_node);
  492. sfree(h);
  493. }
  494. void handle_free(struct handle *h)
  495. {
  496. assert(h && !h->u.g.moribund);
  497. if (h->u.g.busy) {
  498. /*
  499. * If the handle is currently busy, we cannot immediately free
  500. * it, because its subthread is in the middle of something.
  501. * (Exception: foreign handles don't have a subthread.)
  502. *
  503. * Instead we must wait until it's finished its current
  504. * operation, because otherwise the subthread will write to
  505. * invalid memory after we free its context from under it. So
  506. * we set the moribund flag, which will be noticed next time
  507. * an operation completes.
  508. */
  509. h->u.g.moribund = true;
  510. } else if (h->u.g.defunct) {
  511. /*
  512. * There isn't even a subthread; we can go straight to
  513. * handle_destroy.
  514. */
  515. handle_destroy(h);
  516. } else {
  517. /*
  518. * The subthread is alive but not busy, so we now signal it
  519. * to die. Set the moribund flag to indicate that it will
  520. * want destroying after that.
  521. */
  522. h->u.g.moribund = true;
  523. h->u.g.done = true;
  524. h->u.g.busy = true;
  525. SetEvent(h->u.g.ev_from_main);
  526. }
  527. }
  528. static void handle_ready(struct handle *h)
  529. {
  530. if (h->u.g.moribund) {
  531. /*
  532. * A moribund handle is one which we have either already
  533. * signalled to die, or are waiting until its current I/O op
  534. * completes to do so. Either way, it's treated as already
  535. * dead from the external user's point of view, so we ignore
  536. * the actual I/O result. We just signal the thread to die if
  537. * we haven't yet done so, or destroy the handle if not.
  538. */
  539. if (h->u.g.done) {
  540. handle_destroy(h);
  541. } else {
  542. h->u.g.done = true;
  543. h->u.g.busy = true;
  544. SetEvent(h->u.g.ev_from_main);
  545. }
  546. return;
  547. }
  548. switch (h->type) {
  549. int backlog;
  550. case HT_INPUT:
  551. h->u.i.busy = false;
  552. /*
  553. * A signal on an input handle means data has arrived.
  554. */
  555. if (h->u.i.len == 0) {
  556. /*
  557. * EOF, or (nearly equivalently) read error.
  558. */
  559. h->u.i.defunct = true;
  560. h->u.i.gotdata(h, NULL, 0, h->u.i.readerr);
  561. } else {
  562. backlog = h->u.i.gotdata(h, h->u.i.buffer, h->u.i.len, 0);
  563. handle_throttle(&h->u.i, backlog);
  564. }
  565. break;
  566. case HT_OUTPUT:
  567. h->u.o.busy = false;
  568. /*
  569. * A signal on an output handle means we have completed a
  570. * write. Call the callback to indicate that the output
  571. * buffer size has decreased, or to indicate an error.
  572. */
  573. if (h->u.o.writeerr) {
  574. /*
  575. * Write error. Send a negative value to the callback,
  576. * and mark the thread as defunct (because the output
  577. * thread is terminating by now).
  578. */
  579. h->u.o.defunct = true;
  580. h->u.o.sentdata(h, 0, h->u.o.writeerr, false);
  581. } else {
  582. bufchain_consume(&h->u.o.queued_data, h->u.o.lenwritten);
  583. noise_ultralight(NOISE_SOURCE_IOLEN, h->u.o.lenwritten);
  584. h->u.o.sentdata(h, bufchain_size(&h->u.o.queued_data), 0, false);
  585. handle_try_output(&h->u.o);
  586. }
  587. break;
  588. }
  589. }
  590. void handle_unthrottle(struct handle *h, size_t backlog)
  591. {
  592. assert(h->type == HT_INPUT);
  593. handle_throttle(&h->u.i, backlog);
  594. }
  595. size_t handle_backlog(struct handle *h)
  596. {
  597. assert(h->type == HT_OUTPUT);
  598. return bufchain_size(&h->u.o.queued_data);
  599. }
  600. void *handle_get_privdata(struct handle *h)
  601. {
  602. return h->u.g.privdata;
  603. }
  604. static void handle_sink_write(BinarySink *bs, const void *data, size_t len)
  605. {
  606. handle_sink *sink = BinarySink_DOWNCAST(bs, handle_sink);
  607. handle_write(sink->h, data, len);
  608. }
  609. void handle_sink_init(handle_sink *sink, struct handle *h)
  610. {
  611. sink->h = h;
  612. BinarySink_INIT(sink, handle_sink_write);
  613. }