Requestthreads are started in the Camera Open process. Requestthreads are used to manage threads that submit capture requests to HAL devices.

frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp

status_t Camera3Device::initialize(CameraModule *module)
{.../** Start up request queue thread */
    mRequestThread = new RequestThread(this, mStatusTracker, device, aeLockAvailable);
    res = mRequestThread->run(String8::format("C3Dev-%d-ReqQueue", mId).string()); . }Copy the code

When the RequestThread class calls run, it starts threadLoop(), which returns true and keeps calling threadLoop() until it returns false.



Core steps:

  1. Call waitForNextRequestBatch() to wait for the next batch of requests
  2. PrepareHalRequests () is called to prepare a batch of HAL requests and output buffers
  3. Submit a batch of requests to HAL

frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp

bool Camera3Device::RequestThread::threadLoop() {
    ATRACE_CALL(a);status_t res;

    // Handle the paused state
    if (waitIfPaused()) {
        return true;
    }

    // Wait for the next batch of requests
    waitForNextRequestBatch(a);if (mNextRequests.size() = =0) {
        return true;
    }

    // Get the latest request ID (if any)
    int latestRequestId;
    camera_metadata_entry_t requestIdEntry = mNextRequests[mNextRequests.size() - 1].
            captureRequest->mSettings.find(ANDROID_REQUEST_ID);
    if (requestIdEntry.count > 0) {
        latestRequestId = requestIdEntry.data.i32[0];
    } else {
        ALOGW("%s: Did not have android.request.id set in the request.", __FUNCTION__);
        latestRequestId = NAME_NOT_FOUND;
    }

    // Prepare HAL request and output buffers
    res = prepareHalRequests(a);if (res == TIMED_OUT) {
        If the output buffer times out, this is not a fatal error
        cleanUpFailedRequests(/*sendRequestError*/ true);
        return true;
    } else if(res ! = OK) {cleanUpFailedRequests(/*sendRequestError*/ false);
        return false;
    }

    A new request / / notify waitUntilRequestProcessed thread ID
    {
        Mutex::Autolock al(mLatestRequestMutex);

        mLatestRequestId = latestRequestId;
        mLatestRequestSignal.signal(a); }// Submit a batch of requests to HAL.
    // The refresh lock is used only when multiple requests are submitted in batches.
    bool useFlushLock = mNextRequests.size(a) >1;

    if (useFlushLock) {
        mFlushLock.lock(a); }ALOGVV("%s: %d: submitting %d requests in a batch.", __FUNCTION__, __LINE__,
            mNextRequests.size());
    for (auto& nextRequest : mNextRequests) {
        // Submit the request and block until it is ready for the next one
        ATRACE_ASYNC_BEGIN("frame capture", nextRequest.halRequest.frame_number);
        ATRACE_BEGIN("camera3->process_capture_request");
        res = mHal3Device->ops->process_capture_request(mHal3Device, &nextRequest.halRequest);
        ATRACE_END(a);if(res ! = OK) {// This should only fail for malformed requests or device-level errors, so consider that all errors are fatal.
            // Faulty metadata failures should be notified by notify.
            SET_ERR("RequestThread: Unable to submit capture request %d to HAL"
                    " device: %s (%d)", nextRequest.halRequest.frame_number, strerror(-res),
                    res);
            cleanUpFailedRequests(/*sendRequestError*/ false);
            if (useFlushLock) {
                mFlushLock.unlock(a); }return false;
        }

        // Mark that the request was submitted successfully.
        nextRequest.submitted = true;

        // Update the latest request sent to HAL
        if(nextRequest.halRequest.settings ! =NULL) { // If it hasn't changed, don't update it
            Mutex::Autolock al(mLatestRequestMutex);

            camera_metadata_t* cloned = clone_camera_metadata(nextRequest.halRequest.settings);
            mLatestRequest.acquire(cloned);
        }

        if(nextRequest.halRequest.settings ! =NULL) {
            nextRequest.captureRequest->mSettings.unlock(nextRequest.halRequest.settings);
        }

        // Delete all previously queued triggers (after unlocking)
        res = removeTriggers(mPrevRequest);
        if(res ! = OK) {SET_ERR("RequestThread: Unable to remove triggers "
                  "(capture request %d, HAL device: %s (%d)",
                  nextRequest.halRequest.frame_number, strerror(-res), res);
            cleanUpFailedRequests(/*sendRequestError*/ false);
            if (useFlushLock) {
                mFlushLock.unlock(a); }return false; }}if (useFlushLock) {
        mFlushLock.unlock(a); }// Unset to the current request
    {
        Mutex::Autolock l(mRequestLock);
        mNextRequests.clear(a); }return true;
}
Copy the code

Wait for the next batch of requests, then place them in mNextRequests. If mNextRequests times out, it will be empty. The waitForNextRequestLocked() method is called to retrieve CaptureRequest, assign values to NextRequest members, and finally add NextreQuests to mNextRequests. If additional requests exist, continue to call waitForNextRequestLocked() to retrieve CaptureRequest one by one, assign values to NextRequest members, and eventually add quests to mNextRequests.

Camera3_capture_request_t structure:

A single request for image capture/buffer reprocessing, sent by the framework to the Camera HAL device in process_capture_request().

The request contains the Settings for this capture and a set of output buffers to which the generated image data is written. It can optionally include an input buffer, in which case the request is used to reprocess the input buffer rather than capture images taken by the new camera sensor. The capture is identified by frame_number.

In response, the camera HAL device must asynchronously send the Camera3_CAPture_result structure to the framework using the process_Capture_result () callback.

frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp

void Camera3Device::RequestThread::waitForNextRequestBatch() {
    // Optimized a bit for the simple steady-state case (single repeating
    // request), to avoid putting that request in the queue temporarily.
    Mutex::Autolock l(mRequestLock);

    assert(mNextRequests.empty());

    NextRequest nextRequest;
    nextRequest.captureRequest = waitForNextRequestLocked(a);if (nextRequest.captureRequest == nullptr) {
        return;
    }

    nextRequest.halRequest = camera3_capture_request_t(a); nextRequest.submitted =false;
    mNextRequests.add(nextRequest);

    // Wait for additional requests
    const size_t batchSize = nextRequest.captureRequest->mBatchSize;

    for (size_t i = 1; i < batchSize; i++) {
        NextRequest additionalRequest;
        additionalRequest.captureRequest = waitForNextRequestLocked(a);if (additionalRequest.captureRequest == nullptr) {
            break;
        }

        additionalRequest.halRequest = camera3_capture_request_t(a); additionalRequest.submitted =false;
        mNextRequests.add(additionalRequest);
    }

    if (mNextRequests.size() < batchSize) {
        ALOGE("RequestThread: only get %d out of %d requests. Skipping requests.",
                mNextRequests.size(), batchSize);
        cleanUpFailedRequests(/*sendRequestError*/true);
    }

    return;
}
Copy the code

Wait for the request and return NULL if timeout occurs. Must be invoked while holding mRequestLock. WaitForNextRequestLocked () is used primarily to get the next CaptureRequest, first iterating through mRepeatingRequests, fetching its first element and giving it to nextRequest. Its remaining elements are then inserted into the mRequestQueue. A later call to waitForNextRequestLocked() takes the element from mRequestQueue and assigns it to nextRequest.

frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp

sp<Camera3Device::CaptureRequest>
        Camera3Device::RequestThread::waitForNextRequestLocked() {
    status_t res;
    sp<CaptureRequest> nextRequest;

    while (mRequestQueue.empty()) {
        if(! mRepeatingRequests.empty()) {
            // Always place all requests atomically into the Repeating Request List.
            // Ensure that the complete sequence is captured by the application.
            const RequestList &requests = mRepeatingRequests;
            RequestList::const_iterator firstRequest =
                    requests.begin(a); nextRequest = *firstRequest; mRequestQueue.insert(mRequestQueue.end(),
                    ++firstRequest,
                    requests.end());
            // No need to wait any longer

            mRepeatingLastFrameNumber = mFrameNumber + requests.size() - 1;

            break;
        }

        res = mRequestSignal.waitRelative(mRequestLock, kRequestTimeout);

        if ((mRequestQueue.empty() && mRepeatingRequests.empty()) ||
                exitPending()) {
            Mutex::Autolock pl(mPauseLock);
            if (mPaused == false) {
                ALOGV("%s: RequestThread: Going idle", __FUNCTION__);
                mPaused = true;
                // Let the tracker know
                sp<StatusTracker> statusTracker = mStatusTracker.promote(a);if(statusTracker ! =0) {
                    statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE); }}// Stop waiting for now and let thread management happen
            return NULL; }}if (nextRequest == NULL) {
        // There is no Repeating Request yet, so the queue must now have an entry.
        RequestList::iterator firstRequest =
                mRequestQueue.begin(a); nextRequest = *firstRequest; mRequestQueue.erase(firstRequest);
    }

    // If we have cancelled the pause by clearing mDoPause with setPaused,
    / / need to update the internal suspended state (the capture/setRepeatingRequest directly cancel the pause).
    Mutex::Autolock pl(mPauseLock);
    if (mPaused) {
        ALOGV("%s: RequestThread: Unpaused", __FUNCTION__);
        sp<StatusTracker> statusTracker = mStatusTracker.promote(a);if(statusTracker ! =0) {
            statusTracker->markComponentActive(mStatusId);
        }
    }
    mPaused = false;

    // Check if it has been reconfigured since the last time, and if so, reset the preview request.
    // Cannot use "NULL request == repeat" between configuration calls.
    if (mReconfigured) {
        mPrevRequest.clear(a); mReconfigured =false;
    }

    if(nextRequest ! =NULL) {
        nextRequest->mResultExtras.frameNumber = mFrameNumber++;
        nextRequest->mResultExtras.afTriggerId = mCurrentAfTriggerId;
        nextRequest->mResultExtras.precaptureTriggerId = mCurrentPreCaptureTriggerId;

        // Since RequestThread::clear() removes the buffer from the input stream,
        // So get the correct buffer here before unlocking mRequestLock
        if(nextRequest->mInputStream ! =NULL) {
            res = nextRequest->mInputStream->getInputBuffer(&nextRequest->mInputBuffer);
            if(res ! = OK) {// Unable to get input buffer from gralloc queue - this may be due to disconnection of queue or other producer misconduct,
                // Therefore not fatal error
                ALOGE("%s: Can't get input buffer, skipping request:"
                        " %s (%d)", __FUNCTION__, strerror(-res), res);
                if(mListener ! =NULL) {
                    mListener->notifyError(
                            ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
                            nextRequest->mResultExtras);
                }
                return NULL; }}}handleAePrecaptureCancelRequest(nextRequest);

    return nextRequest;
}
Copy the code

Now what does the call prepareHalRequests() do to prepare a batch of HAL requests and output buffers?

HAL request and output buffers are prepared in mNextRequests. If any of the output buffers times out, TIMED_OUT is returned. If an error is returned, the caller should clear the pending request batch. The code logic for preparing the output buffer is a bit convoluted, which I’ll focus on below.

Camera3_stream_buffer_t structure:

A single buffer from the Camera3 stream. It includes a handle to the parent stream, a handle to the Gralloc buffer itself, and a synchronization fence. Buffers do not specify whether to use them for input or output; This depends on the type of parent stream and how the buffer is passed to the HAL device.

frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp

status_t Camera3Device::RequestThread::prepareHalRequests() {
    ATRACE_CALL(a);for (auto& nextRequest : mNextRequests) {
        sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
        camera3_capture_request_t* halRequest = &nextRequest.halRequest;
        Vector<camera3_stream_buffer_t>* outputBuffers = &nextRequest.outputBuffers;

        // Prepare the request to HAL
        halRequest->frame_number = captureRequest->mResultExtras.frameNumber;

        // Insert all queued triggers (before locking metadata)
        status_t res = insertTriggers(captureRequest);

        if (res < 0) {
            SET_ERR("RequestThread: Unable to insert triggers "
                    "(capture request %d, HAL device: %s (%d)",
                    halRequest->frame_number, strerror(-res), res);
            return INVALID_OPERATION;
        }
        int triggerCount = res;
        bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0);
        mPrevTriggers = triggerCount;

        // If the request is the same as last time, or we had a trigger last time
        if(mPrevRequest ! = captureRequest || triggersMixedIn) {/** * Insert virtual trigger ID */ if trigger ID is set but not set
            res = addDummyTriggerIds(captureRequest);
            if(res ! = OK) {SET_ERR("RequestThread: Unable to insert dummy trigger IDs "
                        "(capture request %d, HAL device: %s (%d)",
                        halRequest->frame_number, strerror(-res), res);
                return INVALID_OPERATION;
            }

            /** * The request should be pre-sorted */
            captureRequest->mSettings.sort(a); halRequest->settings = captureRequest->mSettings.getAndLock(a); mPrevRequest = captureRequest;ALOGVV("%s: Request settings are NEW", __FUNCTION__);

            IF_ALOGV() {
                camera_metadata_ro_entry_t e = camera_metadata_ro_entry_t(a);find_camera_metadata_ro_entry(
                        halRequest->settings,
                        ANDROID_CONTROL_AF_TRIGGER,
                        &e
                );
                if (e.count > 0) {
                    ALOGV("%s: Request (frame num %d) had AF trigger 0x%x",
                          __FUNCTION__,
                          halRequest->frame_number,
                          e.data.u8[0]); }}}else {
            // leave request.settings NULL to indicate 'reuse latest given'
            ALOGVV("%s: Request settings are REUSED",
                   __FUNCTION__);
        }

        uint32_t totalNumBuffers = 0;

        // Populates the buffer
        if(captureRequest->mInputStream ! =NULL) {
            halRequest->input_buffer = &captureRequest->mInputBuffer;
            totalNumBuffers += 1;
        } else {
            halRequest->input_buffer = NULL;
        }

        outputBuffers->insertAt(camera3_stream_buffer_t(), 0,
                captureRequest->mOutputStreams.size());
        halRequest->output_buffers = outputBuffers->array(a);for (size_t i = 0; i < captureRequest->mOutputStreams.size(a); i++) { res = captureRequest->mOutputStreams.editItemAt(i)->
                    getBuffer(&outputBuffers->editItemAt(i));
            if(res ! = OK) {// Could not get the output buffer from the gralloc queue - this could be due to a deprecated queue or other user misconduct,
                // Therefore not fatal error
                ALOGE("RequestThread: Can't get output buffer, skipping request:"
                        " %s (%d)".strerror(-res), res);

                return TIMED_OUT;
            }
            halRequest->num_output_buffers++;
        }
        totalNumBuffers += halRequest->num_output_buffers;

        // Log requests in an ongoing queue
        sp<Camera3Device> parent = mParent.promote(a);if (parent == NULL) {
            // It should not happen, and there is no place to send errors, so just log it
            CLOGE("RequestThread: Parent is gone");
            return INVALID_OPERATION;
        }
        res = parent->registerInFlight(halRequest->frame_number,
                totalNumBuffers, captureRequest->mResultExtras,
                /*hasInput*/halRequest->input_buffer ! =NULL,
                captureRequest->mAeTriggerCancelOverride);
        ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
               ", burstId = %" PRId32 ".",
                __FUNCTION__,
                captureRequest->mResultExtras.requestId, captureRequest->mResultExtras.frameNumber,
                captureRequest->mResultExtras.burstId);
        if(res ! = OK) {SET_ERR("RequestThread: Unable to register new in-flight request:"
                    " %s (%d)".strerror(-res), res);
            returnINVALID_OPERATION; }}return OK;
}
Copy the code

First, take a look at the CaptureRequest class, which is implemented in Camera3Device.

frameworks/av/services/camera/libcameraservice/device3/Camera3Device.h

class Camera3Device :
            public CameraDeviceBase,
            private camera3_callback_ops {
    ......
  private:   
    class CaptureRequest : public LightRefBase<CaptureRequest> {
      public:
        CameraMetadata                      mSettings;
        sp<camera3::Camera3Stream>          mInputStream;
        camera3_stream_buffer_t             mInputBuffer;
        Vector<sp<camera3::Camera3OutputStreamInterface> >
                                            mOutputStreams;
        CaptureResultExtras                 mResultExtras;
        // Used to cancel AE precapture triggering for devices that do not support CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
        AeTriggerCancelOverride_t           mAeTriggerCancelOverride;
        // The number of requests that should be submitted to HAL at a time.
        For example, if the batch size is 8,
        // This request and the following 7 requests will be submitted to HAL simultaneously.
        // Batch processing of the next seven requests will be ignored by the requesting thread.
        intmBatchSize; }; . }Copy the code

MOutputStreams CaptureRequest members is a Vector, the Vector of each object is a pointer to a camera3: : Camera3OutputStreamInterface strong reference. Camera3: : Camera3OutputStreamInterface is just an interface, it is in Camera3Device class createStream (…). Method, where the Camera3OutputStream object (for managing a single output stream from the camera device) is created and added to the KeyedVector vector pointed to by mOutputStreams. In the subsequent process, Camera3Device: : createCaptureRequest (…). Method takes the output stream from the mOutputStreams member of the Camera3Device class and pushes the stream into the mOutputStreams vector of the CaptureRequest member.

Camera3OutputStreamInterface (used to manage a single output data from the camera equipment flow) interface inherits from Camera3StreamInterface interface.

frameworks/av/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h

class Camera3OutputStreamInterface : public virtual Camera3StreamInterface {
    ......
}
Copy the code

The Camera3StreamInterface interface is used to manage a single input and/or output data stream from the camera device. GetBuffer (…) is defined. Pure virtual functions.

GetBuffer (…). The camera3_STREAM_buffer is filled with the next valid buffer of the stream to hand over to HAL. This method can only be called after finishConfiguration is called. For bidirectional flows, this approach works with output-side buffers.

frameworks/av/services/camera/libcameraservice/device3/Camera3StreamInterface.h

class Camera3StreamInterface : public virtual RefBase {
    ......
    virtual status_t getBuffer(camera3_stream_buffer *buffer) = 0; . }Copy the code

Now it is not difficult to know RequestThread… prepareHalRequests () method call CaptureRequest members mOutputStreams vector objects in getBuffer (…). The Camera3OutputStream class getBuffer(…) is called. Concrete implementation.

frameworks/av/services/camera/libcameraservice/device3/Camera3OutputStream.h

class Camera3OutputStream :
        public Camera3IOStreamBase,
        public Camera3OutputStreamInterface {
    ......
}
Copy the code

Find getBuffer(…) in the Camera3OutputStream class Method finds that its implementation is actually in the Camera3Stream class. Camera3OutputStream does not inherit directly from Camera3Stream, but directly from Camera3IOStreamBase, which in turn inherits from Camera3Stream.

frameworks/av/services/camera/libcameraservice/device3/Camera3OutputStream.h

class Camera3IOStreamBase :
        public Camera3Stream {
    ......
}
Copy the code



The function that actually gets buffers is getBufferLocked(…). , which is in the Camera3OutputStream class.

frameworks/av/services/camera/libcameraservice/device3/Camera3Stream.cpp

status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer) {
    ATRACE_CALL(a);Mutex::Autolock l(mLock);
    status_t res = OK;

    // This function should be called only if the flow has been configured.
    if(mState ! = STATE_CONFIGURED) {ALOGE("%s: Stream %d: Can't get buffers if stream is not in CONFIGURED state %d",
                __FUNCTION__, mId, mState);
        return INVALID_OPERATION;
    }

    // If the limit is about to be reached, wait for the new buffer to return
    if (getHandoutOutputBufferCountLocked() == camera3_stream::max_buffers) {
        ALOGV("%s: Already dequeued max output buffers (%d), wait for next returned one.",
                __FUNCTION__, camera3_stream::max_buffers);
        res = mOutputBufferReturnedSignal.waitRelative(mLock, kWaitForBufferDuration);
        if(res ! = OK) {if (res == TIMED_OUT) {
                ALOGE("%s: wait for output buffer return timed out after %lldms (max_buffers %d)",
                        __FUNCTION__, kWaitForBufferDuration / 1000000LL,
                        camera3_stream::max_buffers);
            }
            returnres; }}// The function that actually gets buffer
    res = getBufferLocked(buffer);
    if (res == OK) {
        // Activate the BufferListener callback function
        fireBufferListenersLocked(*buffer, /*acquired*/true./*output*/true);
    }

    return res;
}
Copy the code

MConsumer refers to the Surface, which inherits the ANativeWindow.

frameworks/av/services/camera/libcameraservice/device3/Camera3OutputStream.cpp

status_t Camera3OutputStream::getBufferLocked(camera3_stream_buffer *buffer) {
    ATRACE_CALL(a);status_t res;

    if ((res = getBufferPreconditionCheckLocked()) != OK) {
        return res;
    }

    ANativeWindowBuffer* anb;
    int fenceFd;

    /** * Temporarily releases the lock to avoid deadlocks if Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring(). * This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock. * Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable(). * This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock * StreamingProcessor lock. * Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock * and try to lock bufferQueue lock. * Then there is circular locking dependency. */
    sp<ANativeWindow> currentConsumer = mConsumer;
    mLock.unlock(a); res = currentConsumer->dequeueBuffer(currentConsumer.get(), &anb, &fenceFd);
    mLock.lock(a);if(res ! = OK) {ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
                __FUNCTION__, mId, strerror(-res), res);
        return res;
    }

    /** * HAL now owns FenceFD except in the case of an error, * in which case we reassign it to acquire_fence */
    handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
                        /*releaseFence*/- 1, CAMERA3_BUFFER_STATUS_OK, /*output*/true);

    return OK;
}
Copy the code

Let’s take a look at the definition of the ANativeWindow structure. Focusing on the dequeueBuffer pointer, EGL calls hook to get the buffer. This call may block if no buffers are available.

system/core/include/system/window.h

struct ANativeWindow
{
#ifdef __cplusplus
    ANativeWindow()
        : flags(0), minSwapInterval(0), maxSwapInterval(0), xdpi(0), ydpi(0)
    {
        common.magic = ANDROID_NATIVE_WINDOW_MAGIC;
        common.version = sizeof(ANativeWindow);
        memset(common.reserved, 0.sizeof(common.reserved));
    }

    /* Implement the methods that sp
      
        expects so that it can be used to automatically refcount ANativeWindow's. */
      
    void incStrong(const void* /*id*/) const {
        common.incRef(const_cast<android_native_base_t*>(&common));
    }
    void decStrong(const void* /*id*/) const {
        common.decRef(const_cast<android_native_base_t*>(&common));
    }
#endif.int     (*dequeueBuffer)(struct ANativeWindow* window,
                struct ANativeWindowBuffer** buffer, int* fenceFd); . }Copy the code

The Surface class is an implementation of ANativeWindow that inputs the graphics buffer to the BufferQueue.

frameworks/native/include/gui/Surface.h

class Surface
    : public ANativeObjectBase<ANativeWindow, Surface, RefBase>
{
    ......
}
Copy the code

Initializes the ANativeWindow function pointer in the Surface constructor. ANativeWindow: : for hook_dequeueBuffer dequeueBuffer assignment.

frameworks/native/libs/gui/Surface.cpp

Surface::Surface(
        const sp<IGraphicBufferProducer>& bufferProducer,
        bool controlledByApp)
    : mGraphicBufferProducer(bufferProducer),
      mGenerationNumber(0) { ANativeWindow::setSwapInterval = hook_setSwapInterval; ANativeWindow::dequeueBuffer = hook_dequeueBuffer; ANativeWindow::cancelBuffer = hook_cancelBuffer; ANativeWindow::queueBuffer = hook_queueBuffer; ANativeWindow::query = hook_query; ANativeWindow::perform = hook_perform; ANativeWindow::dequeueBuffer_DEPRECATED = hook_dequeueBuffer_DEPRECATED; ANativeWindow::cancelBuffer_DEPRECATED = hook_cancelBuffer_DEPRECATED; ANativeWindow::lockBuffer_DEPRECATED = hook_lockBuffer_DEPRECATED; ANativeWindow::queueBuffer_DEPRECATED = hook_queueBuffer_DEPRECATED; . }Copy the code

First, transform ANativeWindow into Surface. Then call the Surface class dequeueBuffer(…) with two input arguments. Function.

frameworks/native/libs/gui/Surface.cpp

int Surface::hook_dequeueBuffer(ANativeWindow* window,
        ANativeWindowBuffer** buffer, int* fenceFd) {
    Surface* c = getSelf(window);
    return c->dequeueBuffer(buffer, fenceFd);
}
Copy the code

Surface: : dequeueBuffer (…). First call IGraphicBufferProducer: : dequeueBuffer, then obtained from the GraphicBuffer buffer.

frameworks/native/libs/gui/Surface.cpp

int Surface::dequeueBuffer(android_native_buffer_t** buffer, int* fenceFd) {
    ATRACE_CALL(a);ALOGV("Surface::dequeueBuffer");

    uint32_t reqWidth;
    uint32_t reqHeight;
    bool swapIntervalZero;
    PixelFormat reqFormat;
    uint32_t reqUsage;

    {
        Mutex::Autolock lock(mMutex);

        reqWidth = mReqWidth ? mReqWidth : mUserWidth;
        reqHeight = mReqHeight ? mReqHeight : mUserHeight;

        swapIntervalZero = mSwapIntervalZero;
        reqFormat = mReqFormat;
        reqUsage = mReqUsage;
    } // Drop the lock so that we can still touch the Surface while blocking in IGBP::dequeueBuffer

    int buf = - 1;
    sp<Fence> fence;
    status_t result = mGraphicBufferProducer->dequeueBuffer(&buf, &fence, swapIntervalZero,
            reqWidth, reqHeight, reqFormat, reqUsage);

    if (result < 0) {
        ALOGV("dequeueBuffer: IGraphicBufferProducer::dequeueBuffer(%d, %d, %d, %d, %d)"
             "failed: %d", swapIntervalZero, reqWidth, reqHeight, reqFormat,
             reqUsage, result);
        return result;
    }

    Mutex::Autolock lock(mMutex);

    sp<GraphicBuffer>& gbuf(mSlots[buf].buffer);

    // this should never happen
    ALOGE_IF(fence == NULL."Surface::dequeueBuffer: received null Fence! buf=%d", buf);

    if (result & IGraphicBufferProducer::RELEASE_ALL_BUFFERS) {
        freeAllBuffers(a); }if ((result & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) || gbuf == 0) {
        result = mGraphicBufferProducer->requestBuffer(buf, &gbuf);
        if(result ! = NO_ERROR) {ALOGE("dequeueBuffer: IGraphicBufferProducer::requestBuffer failed: %d", result);
            mGraphicBufferProducer->cancelBuffer(buf, fence);
            returnresult; }}if (fence->isValid()) {
        *fenceFd = fence->dup(a);if (*fenceFd == - 1) {
            ALOGE("dequeueBuffer: error duping fence: %d", errno);
            // dup() should never fail; something is badly wrong. Soldier on
            // and hope for the best; the worst that should happen is some
            // visible corruption that lasts until the next frame.}}else {
        *fenceFd = - 1;
    }

    *buffer = gbuf.get(a);return OK;
}
Copy the code

The mGraphicBufferProducer is initialized in the Surface constructor. It actually points to a BpGraphicBufferProducer object. Call the BpGraphicBufferProducer class dequeueBuffer(…) The remote BnGraphicBufferProducer class dequeueBuffer(…) Will respond.

frameworks/native/libs/gui/IGraphicBufferProducer.cpp

class BpGraphicBufferProducer : public BpInterface<IGraphicBufferProducer>
{
public:...virtual status_t dequeueBuffer(int *buf, sp<Fence>* fence, bool async,
            uint32_t width, uint32_t height, PixelFormat format,
            uint32_t usage) {
        Parcel data, reply;
        data.writeInterfaceToken(IGraphicBufferProducer::getInterfaceDescriptor());
        data.writeInt32(static_cast<int32_t>(async));
        data.writeUint32(width);
        data.writeUint32(height);
        data.writeInt32(static_cast<int32_t>(format));
        data.writeUint32(usage);
        status_t result = remote() - >transact(DEQUEUE_BUFFER, data, &reply);
        if(result ! = NO_ERROR) {return result;
        }
        *buf = reply.readInt32(a);bool nonNull = reply.readInt32(a);if (nonNull) {
            *fence = new Fence(a); reply.read(**fence);
        }
        result = reply.readInt32(a);returnresult; }... }Copy the code

BufferQueueProducer inherits from BnGraphicBufferProducer. So the remote BnGraphicBufferProducer class dequeueBuffer(…) The implementation is in BufferQueueProducer. The while loop is called waitForFreeSlotThenRelock (…). Look in the cache, and you’ll get the GraphicBuffer.

frameworks/native/libs/gui/BufferQueueProducer.cpp

status_t BufferQueueProducer::dequeueBuffer(int *outSlot,
        sp<android::Fence> *outFence, bool async,
        uint32_t width, uint32_t height, PixelFormat format, uint32_t usage) {
    ATRACE_CALL(a); {// Autolock scope
        Mutex::Autolock lock(mCore->mMutex);
        mConsumerName = mCore->mConsumerName;
    } // Autolock scope

    BQ_LOGV("dequeueBuffer: async=%s w=%u h=%u format=%#x, usage=%#x",
            async ? "true" : "false", width, height, format, usage);

    if((width && ! height) || (! width && height)) {BQ_LOGE("dequeueBuffer: invalid size: w=%u h=%u", width, height);
        return BAD_VALUE;
    }

    status_t returnFlags = NO_ERROR;
    EGLDisplay eglDisplay = EGL_NO_DISPLAY;
    EGLSyncKHR eglFence = EGL_NO_SYNC_KHR;
    bool attachedByConsumer = false;

    { // Autolock scope
        Mutex::Autolock lock(mCore->mMutex);
        mCore->waitWhileAllocatingLocked(a);if (format == 0) {
            format = mCore->mDefaultBufferFormat;
        }

        // Enable the usage bits requested by the consumer
        usage |= mCore->mConsumerUsageBits;

        const booluseDefaultSize = ! width && ! height;if (useDefaultSize) {
            width = mCore->mDefaultWidth;
            height = mCore->mDefaultHeight;
        }

        int found = BufferItem::INVALID_BUFFER_SLOT;
        while (found == BufferItem::INVALID_BUFFER_SLOT) {
            status_t status = waitForFreeSlotThenRelock("dequeueBuffer", async,
                    &found, &returnFlags);
            if(status ! = NO_ERROR) {return status;
            }

            // This should not happen
            if (found == BufferQueueCore::INVALID_BUFFER_SLOT) {
                BQ_LOGE("dequeueBuffer: no available buffer slots");
                return -EBUSY;
            }

            const sp<GraphicBuffer>& buffer(mSlots[found].mGraphicBuffer);

            // If we are not allowed to allocate new buffers,
            / / the waitForFreeSlotThenRelock must return a containing buffer Slot.
            // If we need to reallocate this buffer to satisfy the requested attributes, we release it and try to get another buffer.
            if(! mCore->mAllowAllocation) {if (buffer->needsReallocation(width, height, format, usage)) {
                    mCore->freeBufferLocked(found);
                    found = BufferItem::INVALID_BUFFER_SLOT;
                    continue;
                }
            }
        }

        *outSlot = found;
        ATRACE_BUFFER_INDEX(found);

        attachedByConsumer = mSlots[found].mAttachedByConsumer;

        mSlots[found].mBufferState = BufferSlot::DEQUEUED;

        const sp<GraphicBuffer>& buffer(mSlots[found].mGraphicBuffer);
        if ((buffer == NULL) ||
                buffer->needsReallocation(width, height, format, usage))
        {
            mSlots[found].mAcquireCalled = false;
            mSlots[found].mGraphicBuffer = NULL;
            mSlots[found].mRequestBufferCalled = false;
            mSlots[found].mEglDisplay = EGL_NO_DISPLAY;
            mSlots[found].mEglFence = EGL_NO_SYNC_KHR;
            mSlots[found].mFence = Fence::NO_FENCE;
            mCore->mBufferAge = 0;

            returnFlags |= BUFFER_NEEDS_REALLOCATION;
        } else {
            // We add 1 because this is the frame number of the queue in the buffer
            mCore->mBufferAge =
                    mCore->mFrameCounter + 1 - mSlots[found].mFrameNumber;
        }

        BQ_LOGV("dequeueBuffer: setting buffer age to %" PRIu64,
                mCore->mBufferAge);

        if (CC_UNLIKELY(mSlots[found].mFence == NULL)) {
            BQ_LOGE("dequeueBuffer: about to return a NULL fence - "
                    "slot=%d w=%d h=%d format=%u",
                    found, buffer->width, buffer->height, buffer->format);
        }

        eglDisplay = mSlots[found].mEglDisplay;
        eglFence = mSlots[found].mEglFence;
        *outFence = mSlots[found].mFence;
        mSlots[found].mEglFence = EGL_NO_SYNC_KHR;
        mSlots[found].mFence = Fence::NO_FENCE;

        mCore->validateConsistencyLocked(a); }// Autolock scope

    if (returnFlags & BUFFER_NEEDS_REALLOCATION) {
        status_t error;
        BQ_LOGV("dequeueBuffer: allocating a new buffer for slot %d", *outSlot);
        sp<GraphicBuffer> graphicBuffer(mCore->mAllocator->createGraphicBuffer( width, height, format, usage, &error));
        if (graphicBuffer == NULL) {
            BQ_LOGE("dequeueBuffer: createGraphicBuffer failed");
            return error;
        }

        { // Autolock scope
            Mutex::Autolock lock(mCore->mMutex);

            if (mCore->mIsAbandoned) {
                BQ_LOGE("dequeueBuffer: BufferQueue has been abandoned");
                return NO_INIT;
            }

            graphicBuffer->setGenerationNumber(mCore->mGenerationNumber);
            mSlots[*outSlot].mGraphicBuffer = graphicBuffer;
        } // Autolock scope
    }

    if (attachedByConsumer) {
        returnFlags |= BUFFER_NEEDS_REALLOCATION;
    }

    if(eglFence ! = EGL_NO_SYNC_KHR) { EGLint result =eglClientWaitSyncKHR(eglDisplay, eglFence, 0.1000000000);
        If there is a problem, print the log, but return the buffer without synchronizing access to it.
        It is too late to abort the exit operation.
        if (result == EGL_FALSE) {
            BQ_LOGE("dequeueBuffer: error %#x waiting for fence".eglGetError());
        } else if (result == EGL_TIMEOUT_EXPIRED_KHR) {
            BQ_LOGE("dequeueBuffer: timeout waiting for fence");
        }
        eglDestroySyncKHR(eglDisplay, eglFence);
    }

    BQ_LOGV("dequeueBuffer: returning slot=%d/%" PRIu64 " buf=%p flags=%#x",
            *outSlot,
            mSlots[*outSlot].mFrameNumber,
            mSlots[*outSlot].mGraphicBuffer->handle, returnFlags);

    return returnFlags;
}
Copy the code

Finally, we analyze submitting a batch of requests to HAL. This is basically HAL process_capture_request(…) The implementation. MHal3Device points to the Camera3_DEVICe_t data type, which is actually a Camera3_device structure.

Common.version must be equal to CAMERA_DEVICE_API_VERSION_3_0 to identify this device as version 3.0 implementing camera device HAL.

Performance requirements:

Camera open (common.module-> common.methods-> open) should return within 200 milliseconds, and must return within 500 milliseconds. Camera close (common.close) should return within 200 milliseconds, and must return within 500 milliseconds.

hardware/libhardware/include/hardware/camera3.h

/ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / camera equipment definition
typedef struct camera3_device {
    hw_device_t common;
    camera3_device_ops_t *ops;
    void *priv;
} camera3_device_t;
Copy the code

Consider the camera3_device_ops_t structure, where the process_Capture_REQUEST function pointer is defined.

Process_capture_request function pointer

Send a new capture request to HAL. HAL should not return from this call until it is ready for the next processing request. The framework calls process_capture_request() only once at a time, and all calls come from the same thread. The next call to process_capture_REQUEST () is made as soon as a new request and its associated buffer are available. In a normal preview scenario, this means that the framework will call the function again almost immediately.

The actual request processing is asynchronous, and the capture result is returned by HAL through the process_capture_result() call. This call requires that the result metadata be available, but the output buffer can simply provide a synchronization fence to wait. Multiple requests are expected to be made simultaneously to maintain a complete output frame rate.

The framework retains ownership of the request structure. It is only guaranteed to be valid during this call. HAL devices must copy the information they retain for their capture processing. HAL is responsible for waiting and closing the buffer’s fence and returning the buffer’s handle to the frame.

hardware/libhardware/include/hardware/camera3.h

typedef struct camera3_device_ops {.int (*process_capture_request)(const struct camera3_device *,
            camera3_capture_request_t*request); . }camera3_device_ops_t;
Copy the code

Take the Moto Nexus 6 HAL, Process_capture_request function pointer to QCamera3HWI. CPP QCamera3HardwareInterface: : process_capture_request method.

First from the camera3_device structure priv strong private data into QCamera3HardwareInterface * pointer, then call its processCaptureRequest (…). Methods.

device/moto/shamu/camera/QCamera2/HAL3/QCamera3HWI.cpp

int QCamera3HardwareInterface::process_capture_request(
                    const struct camera3_device *device,
                    camera3_capture_request_t *request)
{
    CDBG("%s: E", __func__);
    QCamera3HardwareInterface *hw =
        reinterpret_cast<QCamera3HardwareInterface *>(device->priv);
    if(! hw) {ALOGE("%s: NULL camera device", __func__);
        return -EINVAL;
    }

    int rc = hw->processCaptureRequest(request);
    CDBG("%s: X", __func__);
    return rc;
}
Copy the code

Process capture requests from the camera service.

  1. The first call initializes all streams;
  2. Start all streams;
  3. Update the list of pending requests and the pending buffer map, and then invoke the request on the other flow.

device/moto/shamu/camera/QCamera2/HAL3/QCamera3HWI.cpp

int QCamera3HardwareInterface::processCaptureRequest(
                    camera3_capture_request_t *request)
{
    ATRACE_CALL(a);int rc = NO_ERROR;
    int32_t request_id;
    CameraMetadata meta;

    pthread_mutex_lock(&mMutex);
    // Verify the valid nature of the request
    rc = validateCaptureRequest(request);
    if(rc ! = NO_ERROR) {ALOGE("%s: incoming request is not valid", __func__);
        pthread_mutex_unlock(&mMutex);
        return rc;
    }

    meta = request->settings;

    // For the first capture request, the capture intent is sent, and then the stream is transmitted on all streams
    if (mFirstRequest) {

         /* Get eIS information for flow configuration */
        cam_is_type_t is_type;
        char is_type_value[PROPERTY_VALUE_MAX];
        property_get("camera.is_type", is_type_value, "0");
        is_type = static_cast<cam_is_type_t> (atoi(is_type_value));

        if (meta.exists(ANDROID_CONTROL_CAPTURE_INTENT)) {
            int32_t hal_version = CAM_HAL_V3;
            uint8_t captureIntent =
                meta.find(ANDROID_CONTROL_CAPTURE_INTENT).data.u8[0];
            mCaptureIntent = captureIntent;
            memset(mParameters, 0.sizeof(parm_buffer_t));
            AddSetParmEntryToBatch(mParameters, CAM_INTF_PARM_HAL_VERSION,
                sizeof(hal_version), &hal_version);
            AddSetParmEntryToBatch(mParameters, CAM_INTF_META_CAPTURE_INTENT,
                sizeof(captureIntent), &captureIntent);
        }

        // If EIS is enabled, turn it on for video recording,
        // There is no EIS for front-facing cameras and 4K video
        bool setEis = mEisEnable && (gCamCapability[mCameraId]->position == CAM_POSITION_BACK &&
            (mCaptureIntent ==  CAMERA3_TEMPLATE_VIDEO_RECORD ||
             mCaptureIntent == CAMERA3_TEMPLATE_VIDEO_SNAPSHOT));
        int32_t vsMode;
        vsMode = (setEis)? DIS_ENABLE: DIS_DISABLE;
        rc = AddSetParmEntryToBatch(mParameters,
                CAM_INTF_PARM_DIS_ENABLE,
                sizeof(vsMode), &vsMode);

        // Unless EIS IS supported, the IS type IS 0. If EIS is supported, it can be 1 or 4 depending on the stream and video size
        if (setEis){
            if (m_bIs4KVideo) {
                is_type = IS_TYPE_DIS;
            } else{ is_type = IS_TYPE_EIS_2_0; }}for (size_t i = 0; i < request->num_output_buffers; i++) {
            const camera3_stream_buffer_t& output = request->output_buffers[i];
            QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;
            /*for livesnapshot stream is_type will be DIS*/
            if (setEis && output.stream->format == HAL_PIXEL_FORMAT_BLOB) {
                rc = channel->registerBuffer(output.buffer, IS_TYPE_DIS);
            } else {
                rc = channel->registerBuffer(output.buffer, is_type);
            }
            if (rc < 0) {
                ALOGE("%s: registerBuffer failed",
                        __func__);
                pthread_mutex_unlock(&mMutex);
                return-ENODEV; }}/* Sets capture intent, Hal version, and DIS activation parameters to the backend */
        mCameraHandle->ops->set_parms(mCameraHandle->camera_handle,
                    mParameters);


        // Initialize all streams first
        for (List<stream_info_t *>::iterator it = mStreamInfo.begin(a); it ! = mStreamInfo.end(a); it++) { QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;if (setEis && (*it)->stream->format == HAL_PIXEL_FORMAT_BLOB) {
                rc = channel->initialize(IS_TYPE_DIS);
            } else {
                rc = channel->initialize(is_type);
            }
            if(NO_ERROR ! = rc) {ALOGE("%s : Channel initialization failed %d", __func__, rc);
                pthread_mutex_unlock(&mMutex);
                returnrc; }}if (mRawDumpChannel) {
            rc = mRawDumpChannel->initialize(is_type);
            if(rc ! = NO_ERROR) {ALOGE("%s: Error: Raw Dump Channel init failed", __func__);
                pthread_mutex_unlock(&mMutex);
                returnrc; }}if (mSupportChannel) {
            rc = mSupportChannel->initialize(is_type);
            if (rc < 0) {
                ALOGE("%s: Support channel initialization failed", __func__);
                pthread_mutex_unlock(&mMutex);
                returnrc; }}// Then start them
        CDBG_HIGH("%s: Start META Channel", __func__);
        rc = mMetadataChannel->start(a);if (rc < 0) {
            ALOGE("%s: META channel start failed", __func__);
            pthread_mutex_unlock(&mMutex);
            return rc;
        }

        if (mSupportChannel) {
            rc = mSupportChannel->start(a);if (rc < 0) {
                ALOGE("%s: Support channel start failed", __func__);
                mMetadataChannel->stop(a);pthread_mutex_unlock(&mMutex);
                returnrc; }}for (List<stream_info_t *>::iterator it = mStreamInfo.begin(a); it ! = mStreamInfo.end(a); it++) { QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;CDBG_HIGH("%s: Start Regular Channel mask=%d", __func__, channel->getStreamTypeMask());
            rc = channel->start(a);if (rc < 0) {
                ALOGE("%s: channel start failed", __func__);
                pthread_mutex_unlock(&mMutex);
                returnrc; }}if (mRawDumpChannel) {
            CDBG("%s: Starting raw dump stream",__func__);
            rc = mRawDumpChannel->start(a);if(rc ! = NO_ERROR) {ALOGE("%s: Error Starting Raw Dump Channel", __func__);
                for (List<stream_info_t *>::iterator it = mStreamInfo.begin(a); it ! = mStreamInfo.end(a); it++) { QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;ALOGE("%s: Stopping Regular Channel mask=%d", __func__,
                        channel->getStreamTypeMask());
                    channel->stop(a); }if (mSupportChannel)
                    mSupportChannel->stop(a); mMetadataChannel->stop(a);pthread_mutex_unlock(&mMutex);
                return rc;
            }
        }
        mWokenUpByDaemon = false;
        mPendingRequest = 0;
    }

    uint32_t frameNumber = request->frame_number;
    cam_stream_ID_t streamID;

    if (meta.exists(ANDROID_REQUEST_ID)) {
        request_id = meta.find(ANDROID_REQUEST_ID).data.i32[0];
        mCurrentRequestId = request_id;
        CDBG("%s: Received request with id: %d",__func__, request_id);
    } else if (mFirstRequest || mCurrentRequestId == - 1) {ALOGE("%s: Unable to find request id field, \ & no previous id available", __func__);
        return NAME_NOT_FOUND;
    } else {
        CDBG("%s: Re-using old request id", __func__);
        request_id = mCurrentRequestId;
    }

    CDBG("%s: %d, num_output_buffers = %d input_buffer = %p frame_number = %d",
                                    __func__, __LINE__,
                                    request->num_output_buffers,
                                    request->input_buffer,
                                    frameNumber);
    // First get all the request buffers
    streamID.num_streams = 0;
    int blob_request = 0;
    uint32_t snapshotStreamId = 0;
    for (size_t i = 0; i < request->num_output_buffers; i++) {
        const camera3_stream_buffer_t& output = request->output_buffers[i];
        QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;

        if (output.stream->format == HAL_PIXEL_FORMAT_BLOB) {
            // Call the function to store a local copy of the JPEG data for encoding parameters
            blob_request = 1;
            snapshotStreamId = channel->getStreamID(channel->getStreamTypeMask());
        }

        if(output.acquire_fence ! =- 1) {
           rc = sync_wait(output.acquire_fence, TIMEOUT_NEVER);
           close(output.acquire_fence);
           if(rc ! = OK) {ALOGE("%s: sync wait failed %d", __func__, rc);
              pthread_mutex_unlock(&mMutex);
              return rc;
           }
        }

        streamID.streamID[streamID.num_streams] =
            channel->getStreamID(channel->getStreamTypeMask());
        streamID.num_streams++;


    }

    if (blob_request && mRawDumpChannel) {
        CDBG("%s: Trigger Raw based on blob request if Raw dump is enabled", __func__);
        streamID.streamID[streamID.num_streams] =
            mRawDumpChannel->getStreamID(mRawDumpChannel->getStreamTypeMask());
        streamID.num_streams++;
    }

    if(request->input_buffer == NULL) {
       rc = setFrameParameters(request, streamID, snapshotStreamId);
        if (rc < 0) {
            ALOGE("%s: fail to set frame parameters", __func__);
            pthread_mutex_unlock(&mMutex);
            returnrc; }}else {

        if(request->input_buffer->acquire_fence ! =- 1) {
           rc = sync_wait(request->input_buffer->acquire_fence, TIMEOUT_NEVER);
           close(request->input_buffer->acquire_fence);
           if(rc ! = OK) {ALOGE("%s: input buffer sync wait failed %d", __func__, rc);
              pthread_mutex_unlock(&mMutex);
              returnrc; }}}/* Update the list of pending requests and the pending buffer mapping */
    PendingRequestInfo pendingRequest;
    pendingRequest.frame_number = frameNumber;
    pendingRequest.num_buffers = request->num_output_buffers;
    pendingRequest.request_id = request_id;
    pendingRequest.blob_request = blob_request;
    pendingRequest.bUrgentReceived = 0;

    pendingRequest.input_buffer = request->input_buffer;
    pendingRequest.settings = request->settings;
    pendingRequest.pipeline_depth = 0;
    pendingRequest.partial_result_cnt = 0;
    extractJpegMetadata(pendingRequest.jpegMetadata, request);

    // Extract the capture intent
    if (meta.exists(ANDROID_CONTROL_CAPTURE_INTENT)) {
        mCaptureIntent =
                meta.find(ANDROID_CONTROL_CAPTURE_INTENT).data.u8[0];
    }
    pendingRequest.capture_intent = mCaptureIntent;

    for (size_t i = 0; i < request->num_output_buffers; i++) {
        RequestedBufferInfo requestedBuf;
        requestedBuf.stream = request->output_buffers[i].stream;
        requestedBuf.buffer = NULL;
        pendingRequest.buffers.push_back(requestedBuf);

        // Add a buffer handle to the list of buffers to be processed
        PendingBufferInfo bufferInfo;
        bufferInfo.frame_number = frameNumber;
        bufferInfo.buffer = request->output_buffers[i].buffer;
        bufferInfo.stream = request->output_buffers[i].stream;
        mPendingBuffersMap.mPendingBufferList.push_back(bufferInfo);
        mPendingBuffersMap.num_buffers++;
        CDBG("%s: frame = %d, buffer = %p, stream = %p, stream format = %d",
          __func__, frameNumber, bufferInfo.buffer, bufferInfo.stream,
          bufferInfo.stream->format);
    }
    CDBG("%s: mPendingBuffersMap.num_buffers = %d",
          __func__, mPendingBuffersMap.num_buffers);

    mPendingRequestsList.push_back(pendingRequest);

    if(mFlush) {
        pthread_mutex_unlock(&mMutex);
        return NO_ERROR;
    }

    // Notify the metadata channel that we received a request
    mMetadataChannel->request(NULL, frameNumber);

    metadata_buffer_t reproc_meta;
    memset(&reproc_meta, 0.sizeof(metadata_buffer_t));

    if(request->input_buffer ! =NULL){
        rc = setReprocParameters(request, &reproc_meta, snapshotStreamId);
        if(NO_ERROR ! = rc) {ALOGE("%s: fail to set reproc parameters", __func__);
            pthread_mutex_unlock(&mMutex);
            returnrc; }}// Invoke the request on another stream
    for (size_t i = 0; i < request->num_output_buffers; i++) {
        const camera3_stream_buffer_t& output = request->output_buffers[i];
        QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;

        if (channel == NULL) {
            ALOGE("%s: invalid channel pointer for stream", __func__);
            continue;
        }

        if (output.stream->format == HAL_PIXEL_FORMAT_BLOB) {
            rc = channel->request(output.buffer, frameNumber,
                    request->input_buffer, (request->input_buffer)? &reproc_meta : mParameters);
            if (rc < 0) {
                ALOGE("%s: Fail to request on picture channel", __func__);
                pthread_mutex_unlock(&mMutex);
                returnrc; }}else {
            CDBG("%s: %d, request with buffer %p, frame_number %d", __func__,
                __LINE__, output.buffer, frameNumber);
            rc = channel->request(output.buffer, frameNumber);
        }
        if (rc < 0)
            ALOGE("%s: request failed", __func__);
    }

    if(request->input_buffer == NULL) {
        /* Set the parameters to the back end */
        mCameraHandle->ops->set_parms(mCameraHandle->camera_handle, mParameters);
    }

    mFirstRequest = false;
    // Add the timed condition wait
    struct timespec ts;
    uint8_t isValidTimeout = 1;
    rc = clock_gettime(CLOCK_REALTIME, &ts);
    if (rc < 0) {
      isValidTimeout = 0;
      ALOGE("%s: Error reading the real time clock!!", __func__);
    }
    else {
      // Set the timeout to 5 seconds
      ts.tv_sec += 5;
    }
    // block on the condition variable

    mPendingRequest++;
    while (mPendingRequest >= MIN_INFLIGHT_REQUESTS) {
        if(! isValidTimeout) {CDBG("%s: Blocking on conditional wait", __func__);
            pthread_cond_wait(&mRequestCond, &mMutex);
        }
        else {
            CDBG("%s: Blocking on timed conditional wait", __func__);
            rc = pthread_cond_timedwait(&mRequestCond, &mMutex, &ts);
            if (rc == ETIMEDOUT) {
                rc = -ENODEV;
                ALOGE("%s: Unblocked on timeout!!!!", __func__);
                break; }}CDBG("%s: Unblocked", __func__);
        if (mWokenUpByDaemon) {
            mWokenUpByDaemon = false;
            if (mPendingRequest < MAX_INFLIGHT_REQUESTS)
                break; }}pthread_mutex_unlock(&mMutex);

    return rc;
}
Copy the code