Save it, don’t want to keep it locally, nothing is written in enginee.cpp

1. Output selection

The whole process

  1. First we get the existing device collection availableOutputDevices

  2. The match selection is then made based on the strategty type passed in

  3. Check whether it is under special circumstances (such as during a phone call) before selecting.

  4. Finally, the device is matched according to its priority.

The strategy for audio device selection is defined in the getDeviceForStrategy() method in AudioPolicyManager, which selects the most appropriate device based on the current device state and connected devices. Concrete implementation frameworks/av/services/audiopolicy/enginedefault/SRC/Engine. The CPP.

If fromCache = true, select from mDeviceForStrategy[], otherwise proceed.

MDeviceForStrategy [NUM_STRATEGIES] is an array.

frameworks/av/services/audiopolicy/common/include/RoutingStrategy.h
 enum routing_strategy {
    STRATEGY_MEDIA,
    STRATEGY_PHONE,
    STRATEGY_SONIFICATION,
    STRATEGY_SONIFICATION_RESPECTFUL,
    STRATEGY_DTMF,
    STRATEGY_ENFORCED_AUDIBLE,
    STRATEGY_TRANSMITTED_THROUGH_SPEAKER,
    STRATEGY_ACCESSIBILITY,
    STRATEGY_REROUTING,
    NUM_STRATEGIES
};
 
Copy the code

frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.h

virtual audio_devices_t getDeviceForStrategy(routing_strategy strategy,
                                             bool fromCache);
Copy the code

1.1 Implementation in AudiopolicyManager. CPP

frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp

The following logic all involves calls to getDeviceForStrategy
  • void AudioPolicyManager::setPhoneState(audio_mode_t state)

  • Audio_io_handle_t AudioPolicyManager: : getOutput (audio_stream_type_t stream -)

  • status_t AudioPolicyManager::getOutputForAttr

  • status_t AudioPolicyManager::startOutput(

  • status_t AudioPolicyManager::getStreamVolumeIndex(audio_stream_type_t stream,int *index audio_devices_t device)

  • audio_io_handle_t AudioPolicyManager::getOutputForEffect(const effect_descriptor_t *desc)

  • void AudioPolicyManager::checkStrategyRoute(routing_strategy strategy, audio_io_handle_t ouptutToSkip)

  • status_t AudioPolicyManager::connectAudioSource(const sp& sourceDesc){

  • void AudioPolicyManager::checkStrategyRoute(routing_strategy strategy,audio_io_handle_t ouptutToSkip){

  • void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy)

  • audio_devices_t AudioPolicyManager::getNewOutputDevice(const sp& outputDesc, bool fromCache)

  • audio_devices_t AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {

  • void AudioPolicyManager::updateDevicesAndOutputs()

  • uint32_t AudioPolicyManager::checkDeviceMuteStrategies(sp

  • float AudioPolicyManager::computeVolume(audio_stream_type_t stream,

  • void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream,bool starting, bool stateChange)

  • status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t device,

audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy,
                                                         bool fromCache)
{
    // Routing
    // see if we have an explicit route
    // scan the whole RouteMap, for each entry, convert the stream type to a strategy
    // (getStrategy(stream)).
    // if the strategy from the stream type in the RouteMap is the same as the argument above,
    // and activity count is non-zero
    // the device = the device from the descriptor in the RouteMap, and exit.
    for (size_t routeIndex = 0; routeIndex < mOutputRoutes.size(a); routeIndex++) { sp<SessionRoute> route = mOutputRoutes.valueAt(routeIndex);
        routing_strategy routeStrategy = getStrategy(route->mStreamType); // Still call getDeviceForStrategy
        if ((routeStrategy == strategy) && route->isActive()) {
            return route->mDeviceDescriptor->type();
        }
    }

    if (fromCache) {
        ALOGVV("getDeviceForStrategy() from cache strategy %d, device %x",
              strategy, mDeviceForStrategy[strategy]);
        return mDeviceForStrategy[strategy];
    }
    return mEngine->getDeviceForStrategy(strategy);
}
Copy the code
This is implemented in engine.cpp

frameworks/av/services/audiopolicy/enginedefault/src/Engine.cpp

routing_strategy Engine::getStrategyForStream(audio_stream_type_t stream)
{
    // stream to strategy mapping
    switch (stream) {
    case AUDIO_STREAM_VOICE_CALL:
    case AUDIO_STREAM_BLUETOOTH_SCO:
        return STRATEGY_PHONE;
    case AUDIO_STREAM_RING:
    case AUDIO_STREAM_ALARM:
        return STRATEGY_SONIFICATION;
    case AUDIO_STREAM_NOTIFICATION:
        return STRATEGY_SONIFICATION_RESPECTFUL;
    case AUDIO_STREAM_DTMF:
        return STRATEGY_DTMF;
    default:
        ALOGE("unknown stream type %d", stream);
    case AUDIO_STREAM_SYSTEM:
        // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
        // while key clicks are played produces a poor result
    case AUDIO_STREAM_MUSIC:
        return STRATEGY_MEDIA;
    case AUDIO_STREAM_ENFORCED_AUDIBLE:
        return STRATEGY_ENFORCED_AUDIBLE;
    case AUDIO_STREAM_TTS:
        return STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
    case AUDIO_STREAM_ACCESSIBILITY:
        return STRATEGY_ACCESSIBILITY;
    case AUDIO_STREAM_REROUTING:
        returnSTRATEGY_REROUTING; }}Copy the code
audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
{
    DeviceVector availableOutputDevices = mApmObserver->getAvailableOutputDevices(a); DeviceVector availableInputDevices = mApmObserver->getAvailableInputDevices(a);const SwAudioOutputCollection &outputs = mApmObserver->getOutputs(a);return getDeviceForStrategyInt(strategy, availableOutputDevices,
                                   availableInputDevices, outputs);
}
Copy the code

1.2 For STRATEGY_MEDIA analysis, the priorities of playback devices are as follows

  • AUDIO_DEVICE_OUT_BLUETOOTH_A2DP
  • AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES
  • AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER // Here belongs to setForceUse force queue (if FORCE_SPEAKER)AUDIO_DEVICE_OUT_SPEAKER(speaker)
  • AUDIO_DEVICE_OUT_WIRED_HEADPHONE
  • AUDIO_DEVICE_OUT_LINE
  • Headset AUDIO_DEVICE_OUT_WIRED_HEADSET
  • AUDIO_DEVICE_OUT_USB_HEADSET (USB headset)
case STRATEGY_MEDIA: {
        uint32_t device2 = AUDIO_DEVICE_NONE;

        if (isInCall() && (device == AUDIO_DEVICE_NONE)) {  // For calls
            // when in call, get the device for Phone strategy
            device = getDeviceForStrategy(STRATEGY_PHONE);
            break;
        }

        if(strategy ! = STRATEGY_SONIFICATION) {// And prompts for special handling
            // no sonification on remote submix (e.g. WFD)
            if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
                                                 String8("0")) != 0) {
                device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX; }}if (isInCall() && (strategy == STRATEGY_MEDIA)) {
            device = getDeviceForStrategyInt(
                    STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
            break;
        }
        if((device2 == AUDIO_DEVICE_NONE) && (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] ! = AUDIO_POLICY_FORCE_NO_BT_A2DP) && (outputs.isA2dpOnPrimary() || (outputs.getA2dpOutput() != 0))) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;   //A2DP
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES; // Bluetooth headset
            }
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER; // Bluetooth speaker}}if ((device2 == AUDIO_DEVICE_NONE) &&
            (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] == AUDIO_POLICY_FORCE_SPEAKER)) {// If the force speker speaker speaker
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE; // Normal wired headphones do not come with a microphone
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET; // Wired earphone with microphone
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;  / / USB device
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
        }
        if((strategy ! = STRATEGY_SONIFICATION) && (device == AUDIO_DEVICE_NONE) && (device2 == AUDIO_DEVICE_NONE)) {// no sonification on aux digital (e.g. HDMI)
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
        }
        if((device2 == AUDIO_DEVICE_NONE) && (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK) && (strategy ! = STRATEGY_SONIFICATION)) { device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET; }#ifdef AUDIO_EXTN_AFE_PROXY_ENABLED
        if((strategy ! = STRATEGY_SONIFICATION) && (device == AUDIO_DEVICE_NONE) && (device2 == AUDIO_DEVICE_NONE)) {// no sonification on WFD sink
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_PROXY;
        }
#endif
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER; // Add a loudspeaker
        }
        int device3 = AUDIO_DEVICE_NONE;
        if (strategy == STRATEGY_MEDIA) {// If arc,spdif,aux_line are available, assign to device3
            // ARC, SPDIF and AUX_LINE can co-exist with others.
            device3 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HDMI_ARC;
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPDIF);
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_LINE);
        }

        device2 |= device3;
        // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
        // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
        device |= device2;

        // If hdmi system audio mode is on, remove speaker out of output list.
        if((strategy == STRATEGY_MEDIA) && (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] == AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) { device &= ~AUDIO_DEVICE_OUT_SPEAKER; }}break;
Copy the code

2. Enter Select

Audio Device Strategy Select 7.0 for the Audio Device

1. Select an input device

1.1 MediaRecorder Java layer setAudioSource

Take MediaRecorder for example, nothing else. SetAudioSource Sets the input source

   mMediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);// Set the microphone
Copy the code

frameworks/base/media/java/android/media/MediaRecorder.java

The following types of Audiosources are defined

/** Default audio source **/
public static final int DEFAULT = 0; / / the default
/** Microphone audio source */
public static final int MIC = 1; / / mike
/** Voice call uplink (Tx) audio source */
public static final int VOICE_UPLINK = 2; // The call goes up
/** Voice call downlink (Rx) audio source */
public static final int VOICE_DOWNLINK = 3; // The line is down
/** Voice call uplink + downlink audio source */
public static final int VOICE_CALL = 4;  // Call recording
/** Microphone audio source with same orientation as camera if available, the main * device microphone otherwise */
public static final int CAMCORDER = 5;  // Camera Mike
public static final int VOICE_RECOGNITION = 6; // Speech recognition
public static final int VOICE_COMMUNICATION = 7;// Voip
public static final int REMOTE_SUBMIX = 8; // for internal recording
public static final int UNPROCESSED = 9;  // The original sound is not processed
@SystemApi
public static final int RADIO_TUNER = 1998; / / radio
@SystemApi
public static final int HOTWORD = 1999;
Copy the code

Continue to see MediaRecorder

public native void setAudioSource(int audio_source)
        throws IllegalStateException;
Copy the code

Call to MediaRecorder. CPP via JNI

frameworks/base/media/jni/android_media_MediaRecorder.cpp

static void
android_media_MediaRecorder_setVideoSource(JNIEnv *env, jobject thiz, jint vs)
{
    sp<MediaRecorder> mr = getMediaRecorder(env, thiz);
}
Copy the code

frameworks/av/media/libmedia/mediarecorder.cpp

status_t MediaRecorder::setAudioSource(int as){
 status_t ret = mMediaRecorder->setAudioSource(as);
}
Copy the code

1.2 who is mMediaRecorder

The mMediaRecorder instance is a service object of the IMediaPlayerService that calls createMediaRecorder().

MediaRecorder::MediaRecorder(const String16& opPackageName) : mSurfaceMediaSource(NULL)
{
    const sp<IMediaPlayerService> service(getMediaPlayerService());
    if(service ! =NULL) {
        mMediaRecorder = service->createMediaRecorder(opPackageName); }}Copy the code

frameworks/av/media/libmedia/IMediaPlayerService.cpp

The following are the obvious binder operations, as defined by BnMediaPlayerService

virtual sp<IMediaRecorder> createMediaRecorder(const String16 &opPackageName)
{
    Parcel data, reply;
    data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
    data.writeString16(opPackageName);
    remote() - >transact(CREATE_MEDIA_RECORDER, data, &reply);
    return interface_cast<IMediaRecorder>(reply.readStrongBinder());
}
Copy the code

MediaPlayerService inheritance BnMediaPlayerService

frameworks/av/media/libmediaplayerservice/MediaPlayerService.h

class MediaPlayerService : public BnMediaPlayerService
Copy the code

Binder leads to MediaRecorderClient, which is the Recorder object

frameworks/av/media/libmediaplayerservice/MediaPlayerService.cpp

sp<IMediaRecorder> MediaPlayerService::createMediaRecorder(const String16 &opPackageName){
    sp<MediaRecorderClient> recorder = new MediaRecorderClient(this, pid, opPackageName);
    return recorder;
}
Copy the code

1.3 MediaRecorderClient. CPP setAudioSource

So here you can continue with setAudioSource

frameworks/av/media/libmediaplayerservice/MediaRecorderClient.cpp

status_t MediaRecorderClient::setAudioSource(int as)
{
    return mRecorder->setAudioSource((audio_source_t)as);
}
MediaRecorderBase      *mRecorder;
Copy the code

MRecorder is an instance of MediaRecorderBase, StagefrightRecorder inherits from MediaRecorderBase, so let’s go ahead and look at StagefrightRecorder

frameworks/av/media/libmediaplayerservice/StagefrightRecorder.cpp

struct StagefrightRecorder : public MediaRecorderBase {
status_t StagefrightRecorder::setAudioSource(audio_source_t as) {
   
    if (as == AUDIO_SOURCE_DEFAULT) {
        mAudioSource = AUDIO_SOURCE_MIC;
    } else{ mAudioSource = as; }}}Copy the code

StagefrightRecorder creates an audioSource from mAudioSource…

sp<MediaCodecSource> StagefrightRecorder::createAudioSource(a) {
 
sp<AudioSource> audioSource = AVFactory::get() - >createAudioSource(}Copy the code

frameworks/av/media/libavextensions/stagefright/AVFactory.cpp

AudioSource* AVFactory::createAudioSource(
    return new AudioSource(inputSource, opPackageName, sampleRate,
                            channels, outSampleRate, clientUid, clientPid);
}
Copy the code

AudioSource has created an AudioRecord

frameworks/av/media/libstagefright/AudioSource.cpp

mRecord = new AudioRecord(inputSource, sampleRate, AUDIO_FORMAT_PCM_16_BIT, 
Copy the code

The following code is in the constructor of the AudioRecord, where inputSource is the argument we passed above.

frameworks/av/media/libmedia/AudioRecord.cpp

mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user, notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags, uid, pid, pAttributes); // Set method set(){if (pAttributes == NULL) {mAttributes. Source = inputSource; }}Copy the code

MAttributes is applied to the openRecord_l method, and you can see that the AudioSystem calls getInputForAttr

status_t AudioRecord::openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName)
{
status = AudioSystem::getInputForAttr(&mAttributes, &input,mSessionId,mClientPid,mClientUid,mSampleRate, mFormat, mChannelMask,
                                        mFlags, mSelectedDeviceId);
}
Copy the code

frameworks/av/media/libmedia/AudioSystem.cpp

status_t AudioSystem::getInputForAttr(const audio_attributes_t *attr,)
{
    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(a);if (aps == 0) return NO_INIT;
    return aps->getInputForAttr(
            attr, input, session, pid, uid,
            samplingRate, format, channelMask, flags, selectedDeviceId);
}
Copy the code

frameworks/av/media/libmedia/IAudioPolicyService.cpp

Focus on APS, is IAudioPolicyService. Binder again

virtual status_t getInputForAttr(const audio_attributes_t *attr,){
  status_t status = remote() - >transact(GET_INPUT_FOR_ATTR, data, &reply);

}
Copy the code

frameworks/av/services/audiopolicy/service/AudioPolicyService.h

class AudioPolicyService :
    public BinderService<AudioPolicyService>,
    public BnAudioPolicyService,
Copy the code

AudioPolicyService inherits BnAudioPolicyService, which is implemented on the AudioPolicyInterface. AudioPolicyManager inherits AudioPolicyInterface. Finally audiopolicyManager.cpp

1.4 Policies in AudiopolicyManager. CPP

frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp

device = getDeviceAndMixForInputSource(inputSource, &policyMix); 
*input = getInputForDevice(device, address, session, uid, inputSource,
                               samplingRate, format, channelMask, flags,
                               policyMix);
Copy the code
audio_devices_t AudioPolicyManager::getDeviceForInputSource(audio_source_t inputSource)
{
    for (size_t routeIndex = 0; routeIndex < mInputRoutes.size(a); routeIndex++) { sp<SessionRoute> route = mInputRoutes.valueAt(routeIndex);
         if (inputSource == route->mSource && route->isActive()) {
             return route->mDeviceDescriptor->type();
         }
     }

     return mEngine->getDeviceForInputSource(inputSource);
}
Copy the code
The following are specific strategies

frameworks/av/services/audiopolicy/enginedefault/src/Engine.cpp

audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) const
{
    const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices(a);const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices(a);const SwAudioOutputCollection &outputs = mApmObserver->getOutputs(a);audio_devices_t availableDeviceTypes = availableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;

    uint32_t device = AUDIO_DEVICE_NONE;

    switch (inputSource) {
    case AUDIO_SOURCE_VOICE_UPLINK:
      if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
          device = AUDIO_DEVICE_IN_VOICE_CALL;
          break;
      }
      break;

    case AUDIO_SOURCE_DEFAULT:
    case AUDIO_SOURCE_MIC:
    if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) {  //A2DP
        device = AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
    } else if ((mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO) && // If bluetooth is forced, bluetooth headset is preferred
        (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)) {
        device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {// Wired bluetooth
        device = AUDIO_DEVICE_IN_WIRED_HEADSET;
    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {//usb
        device = AUDIO_DEVICE_IN_USB_DEVICE;
    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) { // The phone comes with a mic
        device = AUDIO_DEVICE_IN_BUILTIN_MIC;
    }
    break;

    case AUDIO_SOURCE_VOICE_COMMUNICATION:
        // Allow only use of devices on primary input if in call and HAL does not support routing
        // to voice call path.
        if ((getPhoneState() == AUDIO_MODE_IN_CALL) &&
                (availableOutputDevices.types() & AUDIO_DEVICE_OUT_TELEPHONY_TX) == 0) {
            sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput(a); availableDeviceTypes = availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle())
                    & ~AUDIO_DEVICE_BIT_IN;
        }

        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
        case AUDIO_POLICY_FORCE_BT_SCO:
            // if SCO device is requested but no SCO device is available, fall back to default case
            if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
                device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
                break;
            }
            // FALL THROUGH

        default:    // FORCE_NONE
            if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
                device = AUDIO_DEVICE_IN_WIRED_HEADSET;
            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
                device = AUDIO_DEVICE_IN_USB_DEVICE;
            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
            }
            break;

        case AUDIO_POLICY_FORCE_SPEAKER:
            if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
                device = AUDIO_DEVICE_IN_BACK_MIC;
            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
            }
            break;
        }
        break;

    case AUDIO_SOURCE_VOICE_RECOGNITION:
    case AUDIO_SOURCE_UNPROCESSED:
    case AUDIO_SOURCE_HOTWORD:
        if (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO &&
                availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
            device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
            device = AUDIO_DEVICE_IN_WIRED_HEADSET;
        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
            device = AUDIO_DEVICE_IN_USB_DEVICE;
        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
        }
        break;
    case AUDIO_SOURCE_CAMCORDER:
        if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
            device = AUDIO_DEVICE_IN_BACK_MIC;
        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
        }
        break;
    case AUDIO_SOURCE_VOICE_DOWNLINK:
    case AUDIO_SOURCE_VOICE_CALL:
        if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
            device = AUDIO_DEVICE_IN_VOICE_CALL;
        }
        break;
    case AUDIO_SOURCE_REMOTE_SUBMIX:
        if (availableDeviceTypes & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
            device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
        }
        break;
     case AUDIO_SOURCE_FM_TUNER:
        if (availableDeviceTypes & AUDIO_DEVICE_IN_FM_TUNER) {
            device = AUDIO_DEVICE_IN_FM_TUNER;
        }
        break;
    default:
        ALOGW("getDeviceForInputSource() invalid input source %d", inputSource);
        break;
    }
    if (device == AUDIO_DEVICE_NONE) {
        ALOGV("getDeviceForInputSource() no device found for source %d", inputSource);
        if (availableDeviceTypes & AUDIO_DEVICE_IN_STUB) {
            device = AUDIO_DEVICE_IN_STUB;
        }
        ALOGE_IF(device == AUDIO_DEVICE_NONE,
                 "getDeviceForInputSource() no default device defined");
    }
    ALOGV("getDeviceForInputSource()input source %d, device %08x", inputSource, device);
    return device;
}
Copy the code

Android – the MediaRecorder setAudioSource

Android two ways to change the audio output/incoming device