blob: a46cf1f2d77cf1ec00de1e05ba7929008f3502ad [file] [log] [blame]
bigbiff bigbiffaf32bb92018-12-18 18:39:53 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <android-base/logging.h>
18#include <android-base/properties.h>
19#include <asyncio/AsyncIO.h>
20#include <dirent.h>
21#include <errno.h>
22#include <fcntl.h>
23#include <memory>
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
27#include <sys/eventfd.h>
28#include <sys/ioctl.h>
29#include <sys/mman.h>
30#include <sys/poll.h>
31#include <sys/stat.h>
32#include <sys/types.h>
33#include <unistd.h>
34
35#include "PosixAsyncIO.h"
36#include "MtpDescriptors.h"
37#include "MtpFfsHandle.h"
38#include "mtp.h"
39#include "MtpDebug.h"
40
41namespace {
42
43constexpr unsigned AIO_BUFS_MAX = 128;
44constexpr unsigned AIO_BUF_LEN = 16384;
45
46constexpr unsigned FFS_NUM_EVENTS = 5;
47
48constexpr unsigned MAX_FILE_CHUNK_SIZE = AIO_BUFS_MAX * AIO_BUF_LEN;
49
50constexpr uint32_t MAX_MTP_FILE_SIZE = 0xFFFFFFFF;
bigbiff01dc4582021-08-26 19:18:22 -040051// Note: POLL_TIMEOUT_MS = 0 means return immediately i.e. no sleep.
52// And this will cause high CPU usage.
53constexpr int32_t POLL_TIMEOUT_MS = 500;
bigbiff bigbiffaf32bb92018-12-18 18:39:53 -050054
55struct timespec ZERO_TIMEOUT = { 0, 0 };
56
57struct mtp_device_status {
58 uint16_t wLength;
59 uint16_t wCode;
60};
61
62} // anonymous namespace
63
64int MtpFfsHandle::getPacketSize(int ffs_fd) {
65 struct usb_endpoint_descriptor desc;
66 if (ioctl(ffs_fd, FUNCTIONFS_ENDPOINT_DESC, reinterpret_cast<unsigned long>(&desc))) {
67 MTPE("Could not get FFS bulk-in descriptor\n");
68 return MAX_PACKET_SIZE_HS;
69 } else {
70 return desc.wMaxPacketSize;
71 }
72}
73
74MtpFfsHandle::MtpFfsHandle(int controlFd) {
75 mControl.reset(controlFd);
Ray Chif5aa84d2021-06-01 16:40:33 +080076 mBatchCancel = android::base::GetBoolProperty("sys.usb.mtp.batchcancel", false);
bigbiff bigbiffaf32bb92018-12-18 18:39:53 -050077}
78
79MtpFfsHandle::~MtpFfsHandle() {}
80
81void MtpFfsHandle::closeEndpoints() {
82 mIntr.reset();
83 mBulkIn.reset();
84 mBulkOut.reset();
85}
86
87bool MtpFfsHandle::openEndpoints(bool ptp) {
88 if (mBulkIn < 0) {
89 mBulkIn.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_IN : FFS_MTP_EP_IN, O_RDWR)));
90 if (mBulkIn < 0) {
91 MTPE("cannot open bulk in ep\n");
92 return false;
93 }
94 }
95
96 if (mBulkOut < 0) {
97 mBulkOut.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_OUT : FFS_MTP_EP_OUT, O_RDWR)));
98 if (mBulkOut < 0) {
99 MTPE("cannot open bulk out ep\n");
100 return false;
101 }
102 }
103
104 if (mIntr < 0) {
105 mIntr.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_INTR : FFS_MTP_EP_INTR, O_RDWR)));
106 if (mIntr < 0) {
107 MTPE("cannot open intr ep\n");
108 return false;
109 }
110 }
111 return true;
112}
113
114void MtpFfsHandle::advise(int fd) {
115 for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
116 if (posix_madvise(mIobuf[i].bufs.data(), MAX_FILE_CHUNK_SIZE,
Chih-Hung Hsieha434c362020-04-21 11:41:49 -0700117 POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED) != 0)
bigbiff bigbiffaf32bb92018-12-18 18:39:53 -0500118 MTPE("Failed to madvise\n");
119 }
120 if (posix_fadvise(fd, 0, 0,
Chih-Hung Hsieha434c362020-04-21 11:41:49 -0700121 POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE | POSIX_FADV_WILLNEED) != 0)
bigbiff bigbiffaf32bb92018-12-18 18:39:53 -0500122 MTPE("Failed to fadvise\n");
123}
124
125bool MtpFfsHandle::writeDescriptors(bool ptp) {
126 return ::writeDescriptors(mControl, ptp);
127}
128
129void MtpFfsHandle::closeConfig() {
130 mControl.reset();
131}
132
133int MtpFfsHandle::doAsync(void* data, size_t len, bool read, bool zero_packet) {
134 struct io_event ioevs[AIO_BUFS_MAX];
135 size_t total = 0;
136
137 while (total < len) {
138 size_t this_len = std::min(len - total, static_cast<size_t>(AIO_BUF_LEN * AIO_BUFS_MAX));
139 int num_bufs = this_len / AIO_BUF_LEN + (this_len % AIO_BUF_LEN == 0 ? 0 : 1);
140 for (int i = 0; i < num_bufs; i++) {
141 mIobuf[0].buf[i] = reinterpret_cast<unsigned char*>(data) + total + i * AIO_BUF_LEN;
142 }
143 int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, this_len, read);
144 if (ret < 0) return -1;
145 ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
146 if (ret < 0) return -1;
147 total += ret;
148 if (static_cast<size_t>(ret) < this_len) break;
149 }
150
151 int packet_size = getPacketSize(read ? mBulkOut : mBulkIn);
152 if (len % packet_size == 0 && zero_packet) {
153 int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, 0, read);
154 if (ret < 0) return -1;
155 ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
156 if (ret < 0) return -1;
157 }
158
159 for (unsigned i = 0; i < AIO_BUFS_MAX; i++) {
160 mIobuf[0].buf[i] = mIobuf[0].bufs.data() + i * AIO_BUF_LEN;
161 }
162 return total;
163}
164
165int MtpFfsHandle::read(void* data, size_t len) {
166 // Zero packets are handled by receiveFile()
167 return doAsync(data, len, true, false);
168}
169
170int MtpFfsHandle::write(const void* data, size_t len) {
171 return doAsync(const_cast<void*>(data), len, false, true);
172}
173
174int MtpFfsHandle::handleEvent() {
175
176 std::vector<usb_functionfs_event> events(FFS_NUM_EVENTS);
177 usb_functionfs_event *event = events.data();
178 int nbytes = TEMP_FAILURE_RETRY(::read(mControl, event,
179 events.size() * sizeof(usb_functionfs_event)));
180 if (nbytes == -1) {
181 return -1;
182 }
183 int ret = 0;
184 for (size_t n = nbytes / sizeof *event; n; --n, ++event) {
185 switch (event->type) {
186 case FUNCTIONFS_BIND:
187 case FUNCTIONFS_ENABLE:
188 ret = 0;
189 errno = 0;
190 break;
191 case FUNCTIONFS_UNBIND:
192 case FUNCTIONFS_DISABLE:
193 errno = ESHUTDOWN;
194 ret = -1;
195 break;
196 case FUNCTIONFS_SETUP:
197 if (handleControlRequest(&event->u.setup) == -1)
198 ret = -1;
199 break;
200 case FUNCTIONFS_SUSPEND:
201 case FUNCTIONFS_RESUME:
202 break;
203 default:
204 MTPE("Mtp Event (unknown)\n");
205 }
206 }
207 return ret;
208}
209
210int MtpFfsHandle::handleControlRequest(const struct usb_ctrlrequest *setup) {
211 uint8_t type = setup->bRequestType;
212 uint8_t code = setup->bRequest;
213 uint16_t length = setup->wLength;
214 uint16_t index = setup->wIndex;
215 uint16_t value = setup->wValue;
216 std::vector<char> buf;
217 buf.resize(length);
218 int ret = 0;
219
220 if (!(type & USB_DIR_IN)) {
221 if (::read(mControl, buf.data(), length) != length) {
222 MTPE("Mtp error ctrlreq read data");
223 }
224 }
225
226 if ((type & USB_TYPE_MASK) == USB_TYPE_CLASS && index == 0 && value == 0) {
227 switch(code) {
228 case MTP_REQ_RESET:
229 case MTP_REQ_CANCEL:
230 errno = ECANCELED;
231 ret = -1;
232 break;
233 case MTP_REQ_GET_DEVICE_STATUS:
234 {
235 if (length < sizeof(struct mtp_device_status) + 4) {
236 errno = EINVAL;
237 return -1;
238 }
239 struct mtp_device_status *st = reinterpret_cast<struct mtp_device_status*>(buf.data());
240 st->wLength = htole16(sizeof(st));
241 if (mCanceled) {
242 st->wLength += 4;
243 st->wCode = MTP_RESPONSE_TRANSACTION_CANCELLED;
244 uint16_t *endpoints = reinterpret_cast<uint16_t*>(st + 1);
245 endpoints[0] = ioctl(mBulkIn, FUNCTIONFS_ENDPOINT_REVMAP);
246 endpoints[1] = ioctl(mBulkOut, FUNCTIONFS_ENDPOINT_REVMAP);
247 mCanceled = false;
248 } else {
249 st->wCode = MTP_RESPONSE_OK;
250 }
251 length = st->wLength;
252 break;
253 }
254 default:
255 MTPE("Unrecognized Mtp class request!\n");
256 }
257 } else {
258 MTPE("Unrecognized request type\n");
259 }
260
261 if (type & USB_DIR_IN) {
262 if (::write(mControl, buf.data(), length) != length) {
263 MTPE("Mtp error ctrlreq write data");
264 }
265 }
266 return 0;
267}
268
269int MtpFfsHandle::start(bool ptp) {
270 if (!openEndpoints(ptp))
271 return -1;
272
273 for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
274 mIobuf[i].bufs.resize(MAX_FILE_CHUNK_SIZE);
275 mIobuf[i].iocb.resize(AIO_BUFS_MAX);
276 mIobuf[i].iocbs.resize(AIO_BUFS_MAX);
277 mIobuf[i].buf.resize(AIO_BUFS_MAX);
278 for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
279 mIobuf[i].buf[j] = mIobuf[i].bufs.data() + j * AIO_BUF_LEN;
280 mIobuf[i].iocb[j] = &mIobuf[i].iocbs[j];
281 }
282 }
283
284 memset(&mCtx, 0, sizeof(mCtx));
285 if (io_setup(AIO_BUFS_MAX, &mCtx) < 0) {
286 MTPE("unable to setup aio");
287 return -1;
288 }
289 mEventFd.reset(eventfd(0, EFD_NONBLOCK));
290 mPollFds[0].fd = mControl;
291 mPollFds[0].events = POLLIN;
292 mPollFds[1].fd = mEventFd;
293 mPollFds[1].events = POLLIN;
294
295 mCanceled = false;
296 return 0;
297}
298
299void MtpFfsHandle::close() {
300 io_destroy(mCtx);
301 closeEndpoints();
302 closeConfig();
303}
304
305int MtpFfsHandle::waitEvents(__attribute__((unused)) struct io_buffer *buf, int min_events, struct io_event *events,
306 int *counter) {
307 int num_events = 0;
308 int ret = 0;
309 int error = 0;
310
311 while (num_events < min_events) {
bigbiff01dc4582021-08-26 19:18:22 -0400312 if (poll(mPollFds, 2, POLL_TIMEOUT_MS) == -1) {
bigbiff bigbiffaf32bb92018-12-18 18:39:53 -0500313 MTPE("Mtp error during poll()\n");
314 return -1;
315 }
316 if (mPollFds[0].revents & POLLIN) {
317 mPollFds[0].revents = 0;
318 if (handleEvent() == -1) {
319 error = errno;
320 }
321 }
322 if (mPollFds[1].revents & POLLIN) {
323 mPollFds[1].revents = 0;
324 uint64_t ev_cnt = 0;
325
326 if (::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1) {
327 MTPE("Mtp unable to read eventfd\n");
328 error = errno;
329 continue;
330 }
331
332 // It's possible that io_getevents will return more events than the eventFd reported,
333 // since events may appear in the time between the calls. In this case, the eventFd will
334 // show up as readable next iteration, but there will be fewer or no events to actually
335 // wait for. Thus we never want io_getevents to block.
336 int this_events = TEMP_FAILURE_RETRY(io_getevents(mCtx, 0, AIO_BUFS_MAX, events, &ZERO_TIMEOUT));
337 if (this_events == -1) {
338 MTPE("Mtp error getting events");
339 error = errno;
340 }
341 // Add up the total amount of data and find errors on the way.
342 for (unsigned j = 0; j < static_cast<unsigned>(this_events); j++) {
343 if (events[j].res < 0) {
344 errno = -events[j].res;
345 MTPE("Mtp got error event\n");
346 error = errno;
347 }
348 ret += events[j].res;
349 }
350 num_events += this_events;
351 if (counter)
352 *counter += this_events;
353 }
354 if (error) {
355 errno = error;
356 ret = -1;
357 break;
358 }
359 }
360 return ret;
361}
362
363void MtpFfsHandle::cancelTransaction() {
364 // Device cancels by stalling both bulk endpoints.
365 if (::read(mBulkIn, nullptr, 0) != -1 || errno != EBADMSG)
366 MTPE("Mtp stall failed on bulk in\n");
367 if (::write(mBulkOut, nullptr, 0) != -1 || errno != EBADMSG)
368 MTPE("Mtp stall failed on bulk out\n");
369 mCanceled = true;
370 errno = ECANCELED;
371}
372
373int MtpFfsHandle::cancelEvents(struct iocb **iocb, struct io_event *events, unsigned start,
Ray Chif5aa84d2021-06-01 16:40:33 +0800374 unsigned end, bool is_batch_cancel) {
bigbiff bigbiffaf32bb92018-12-18 18:39:53 -0500375 // Some manpages for io_cancel are out of date and incorrect.
376 // io_cancel will return -EINPROGRESS on success and does
377 // not place the event in the given memory. We have to use
378 // io_getevents to wait for all the events we cancelled.
379 int ret = 0;
380 unsigned num_events = 0;
381 int save_errno = errno;
382 errno = 0;
383
384 for (unsigned j = start; j < end; j++) {
385 if (io_cancel(mCtx, iocb[j], nullptr) != -1 || errno != EINPROGRESS) {
386 MTPE("Mtp couldn't cancel request\n");
387 } else {
388 num_events++;
389 }
Ray Chif5aa84d2021-06-01 16:40:33 +0800390 if (is_batch_cancel && num_events == 1) {
391 num_events = end - start;
392 break;
393 }
bigbiff bigbiffaf32bb92018-12-18 18:39:53 -0500394 }
395 if (num_events != end - start) {
396 ret = -1;
397 errno = EIO;
398 }
399 int evs = TEMP_FAILURE_RETRY(io_getevents(mCtx, num_events, AIO_BUFS_MAX, events, nullptr));
400 if (static_cast<unsigned>(evs) != num_events) {
401 MTPE("Mtp couldn't cancel all requests\n");
402 ret = -1;
403 }
404
405 uint64_t ev_cnt = 0;
406 if (num_events && ::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1)
407 MTPE("Mtp Unable to read event fd\n");
408
409 if (ret == 0) {
410 // Restore errno since it probably got overriden with EINPROGRESS.
411 errno = save_errno;
412 }
413 return ret;
414}
415
416int MtpFfsHandle::iobufSubmit(struct io_buffer *buf, int fd, unsigned length, bool read) {
417 int ret = 0;
418 buf->actual = AIO_BUFS_MAX;
419 for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
420 unsigned rq_length = std::min(AIO_BUF_LEN, length - AIO_BUF_LEN * j);
421 io_prep(buf->iocb[j], fd, buf->buf[j], rq_length, 0, read);
422 buf->iocb[j]->aio_flags |= IOCB_FLAG_RESFD;
423 buf->iocb[j]->aio_resfd = mEventFd;
424
425 // Not enough data, so table is truncated.
426 if (rq_length < AIO_BUF_LEN || length == AIO_BUF_LEN * (j + 1)) {
427 buf->actual = j + 1;
428 break;
429 }
430 }
431
432 ret = io_submit(mCtx, buf->actual, buf->iocb.data());
433 if (ret != static_cast<int>(buf->actual)) {
434 MTPE("Mtp io_submit\n");
435 if (ret != -1) {
436 errno = EIO;
437 }
438 ret = -1;
439 }
440 return ret;
441}
442
443int MtpFfsHandle::receiveFile(mtp_file_range mfr, bool zero_packet) {
444 // When receiving files, the incoming length is given in 32 bits.
445 // A >=4G file is given as 0xFFFFFFFF
446 uint32_t file_length = mfr.length;
447 uint64_t offset = mfr.offset;
448
449 struct aiocb aio;
450 aio.aio_fildes = mfr.fd;
451 aio.aio_buf = nullptr;
452 struct aiocb *aiol[] = {&aio};
453
454 int ret = -1;
455 unsigned i = 0;
456 size_t length;
457 struct io_event ioevs[AIO_BUFS_MAX];
458 bool has_write = false;
459 bool error = false;
460 bool write_error = false;
461 int packet_size = getPacketSize(mBulkOut);
462 bool short_packet = false;
463 advise(mfr.fd);
464
465 // Break down the file into pieces that fit in buffers
466 while (file_length > 0 || has_write) {
467 // Queue an asynchronous read from USB.
468 if (file_length > 0) {
469 length = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE), file_length);
470 if (iobufSubmit(&mIobuf[i], mBulkOut, length, true) == -1)
471 error = true;
472 }
473
474 // Get the return status of the last write request.
475 if (has_write) {
476 aio_suspend(aiol, 1, nullptr);
477 int written = aio_return(&aio);
478 if (static_cast<size_t>(written) < aio.aio_nbytes) {
479 errno = written == -1 ? aio_error(&aio) : EIO;
480 MTPE("Mtp error writing to disk\n");
481 write_error = true;
482 }
483 has_write = false;
484 }
485
486 if (error) {
487 return -1;
488 }
489
490 // Get the result of the read request, and queue a write to disk.
491 if (file_length > 0) {
492 unsigned num_events = 0;
493 ret = 0;
494 unsigned short_i = mIobuf[i].actual;
495 while (num_events < short_i) {
496 // Get all events up to the short read, if there is one.
497 // We must wait for each event since data transfer could end at any time.
498 int this_events = 0;
499 int event_ret = waitEvents(&mIobuf[i], 1, ioevs, &this_events);
500 num_events += this_events;
501
502 if (event_ret == -1) {
Ray Chif5aa84d2021-06-01 16:40:33 +0800503 cancelEvents(mIobuf[i].iocb.data(), ioevs, num_events, mIobuf[i].actual,
504 mBatchCancel);
bigbiff bigbiffaf32bb92018-12-18 18:39:53 -0500505 return -1;
506 }
507 ret += event_ret;
508 for (int j = 0; j < this_events; j++) {
509 // struct io_event contains a pointer to the associated struct iocb as a __u64.
510 if (static_cast<__u64>(ioevs[j].res) <
511 reinterpret_cast<struct iocb*>(ioevs[j].obj)->aio_nbytes) {
512 // We've found a short event. Store the index since
513 // events won't necessarily arrive in the order they are queued.
514 short_i = (ioevs[j].obj - reinterpret_cast<uint64_t>(mIobuf[i].iocbs.data()))
515 / sizeof(struct iocb) + 1;
516 short_packet = true;
517 }
518 }
519 }
520 if (short_packet) {
Ray Chia37df752021-06-23 12:19:38 +0800521 if (cancelEvents(mIobuf[i].iocb.data(), ioevs, short_i, mIobuf[i].actual,
522 mBatchCancel)) {
bigbiff bigbiffaf32bb92018-12-18 18:39:53 -0500523 write_error = true;
524 }
525 }
526 if (file_length == MAX_MTP_FILE_SIZE) {
527 // For larger files, receive until a short packet is received.
528 if (static_cast<size_t>(ret) < length) {
529 file_length = 0;
530 }
531 } else if (ret < static_cast<int>(length)) {
532 // If file is less than 4G and we get a short packet, it's an error.
533 errno = EIO;
534 MTPE("Mtp got unexpected short packet\n");
535 return -1;
536 } else {
537 file_length -= ret;
538 }
539
540 if (write_error) {
541 cancelTransaction();
542 return -1;
543 }
544
545 // Enqueue a new write request
546 aio_prepare(&aio, mIobuf[i].bufs.data(), ret, offset);
547 aio_write(&aio);
548
549 offset += ret;
550 i = (i + 1) % NUM_IO_BUFS;
551 has_write = true;
552 }
553 }
554 if ((ret % packet_size == 0 && !short_packet) || zero_packet) {
555 // Receive an empty packet if size is a multiple of the endpoint size
556 // and we didn't already get an empty packet from the header or large file.
557 if (read(mIobuf[0].bufs.data(), packet_size) != 0) {
558 return -1;
559 }
560 }
561 return 0;
562}
563
564int MtpFfsHandle::sendFile(mtp_file_range mfr) {
565 uint64_t file_length = mfr.length;
566 uint32_t given_length = std::min(static_cast<uint64_t>(MAX_MTP_FILE_SIZE),
567 file_length + sizeof(mtp_data_header));
568 uint64_t offset = mfr.offset;
569 int packet_size = getPacketSize(mBulkIn);
570
571 // If file_length is larger than a size_t, truncating would produce the wrong comparison.
572 // Instead, promote the left side to 64 bits, then truncate the small result.
573 int init_read_len = std::min(
574 static_cast<uint64_t>(packet_size - sizeof(mtp_data_header)), file_length);
575
576 advise(mfr.fd);
577
578 struct aiocb aio;
579 aio.aio_fildes = mfr.fd;
580 struct aiocb *aiol[] = {&aio};
581 int ret = 0;
582 int length, num_read;
583 unsigned i = 0;
584 struct io_event ioevs[AIO_BUFS_MAX];
585 bool error = false;
586 bool has_write = false;
587
588 // Send the header data
589 mtp_data_header *header = reinterpret_cast<mtp_data_header*>(mIobuf[0].bufs.data());
590 header->length = htole32(given_length);
591 header->type = htole16(2); // data packet
592 header->command = htole16(mfr.command);
593 header->transaction_id = htole32(mfr.transaction_id);
594
595 // Some hosts don't support header/data separation even though MTP allows it
596 // Handle by filling first packet with initial file data
597 if (TEMP_FAILURE_RETRY(pread(mfr.fd, mIobuf[0].bufs.data() +
598 sizeof(mtp_data_header), init_read_len, offset))
599 != init_read_len) return -1;
600 if (doAsync(mIobuf[0].bufs.data(), sizeof(mtp_data_header) + init_read_len,
601 false, false /* zlps are handled below */) == -1)
602 return -1;
603 file_length -= init_read_len;
604 offset += init_read_len;
605 ret = init_read_len + sizeof(mtp_data_header);
606
607 // Break down the file into pieces that fit in buffers
608 while(file_length > 0 || has_write) {
609 if (file_length > 0) {
610 // Queue up a read from disk.
611 length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
612 aio_prepare(&aio, mIobuf[i].bufs.data(), length, offset);
613 aio_read(&aio);
614 }
615
616 if (has_write) {
617 // Wait for usb write. Cancel unwritten portion if there's an error.
618 int num_events = 0;
619 if (waitEvents(&mIobuf[(i-1)%NUM_IO_BUFS], mIobuf[(i-1)%NUM_IO_BUFS].actual, ioevs,
620 &num_events) != ret) {
621 error = true;
622 cancelEvents(mIobuf[(i-1)%NUM_IO_BUFS].iocb.data(), ioevs, num_events,
Ray Chif5aa84d2021-06-01 16:40:33 +0800623 mIobuf[(i-1)%NUM_IO_BUFS].actual, false);
bigbiff bigbiffaf32bb92018-12-18 18:39:53 -0500624 }
625 has_write = false;
626 }
627
628 if (file_length > 0) {
629 // Wait for the previous read to finish
630 aio_suspend(aiol, 1, nullptr);
631 num_read = aio_return(&aio);
632 if (static_cast<size_t>(num_read) < aio.aio_nbytes) {
633 errno = num_read == -1 ? aio_error(&aio) : EIO;
634 MTPE("Mtp error reading from disk\n");
635 cancelTransaction();
636 return -1;
637 }
638
639 file_length -= num_read;
640 offset += num_read;
641
642 if (error) {
643 return -1;
644 }
645
646 // Queue up a write to usb.
647 if (iobufSubmit(&mIobuf[i], mBulkIn, num_read, false) == -1) {
648 return -1;
649 }
650 has_write = true;
651 ret = num_read;
652 }
653
654 i = (i + 1) % NUM_IO_BUFS;
655 }
656
657 if (ret % packet_size == 0) {
658 // If the last packet wasn't short, send a final empty packet
659 if (write(mIobuf[0].bufs.data(), 0) != 0) {
660 return -1;
661 }
662 }
663 return 0;
664}
665
666int MtpFfsHandle::sendEvent(mtp_event me) {
667 // Mimic the behavior of f_mtp by sending the event async.
668 // Events aren't critical to the connection, so we don't need to check the return value.
669 char *temp = new char[me.length];
670 memcpy(temp, me.data, me.length);
671 me.data = temp;
672 std::thread t([this, me]() { return this->doSendEvent(me); });
673 t.detach();
674 return 0;
675}
676
677void MtpFfsHandle::doSendEvent(mtp_event me) {
678 unsigned length = me.length;
679 int ret = ::write(mIntr, me.data, length);
680 if (static_cast<unsigned>(ret) != length)
681 MTPE("Mtp error sending event thread!\n");
682 delete[] reinterpret_cast<char*>(me.data);
683}
684