Java Code Examples for android.media.AudioFormat#ENCODING_PCM_8BIT

The following examples show how to use android.media.AudioFormat#ENCODING_PCM_8BIT . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MicrophoneCollector.java    From sensordatacollector with GNU General Public License v2.0 6 votes vote down vote up
private AudioRecord findAudioRecord()
{
    for(int rate : mSampleRates) {
        for(short audioFormat : new short[]{ AudioFormat.ENCODING_PCM_16BIT, AudioFormat.ENCODING_PCM_8BIT }) {
            for(short channelConfig : new short[]{ AudioFormat.CHANNEL_IN_STEREO, AudioFormat.CHANNEL_IN_DEFAULT, AudioFormat.CHANNEL_IN_MONO }) {
                try {
                    Log.d("MicrophoneCollector", "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: " + channelConfig);
                    int bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);

                    if(bufferSize != AudioRecord.ERROR_BAD_VALUE) {
                        // check if we can instantiate and have a success
                        AudioRecord recorder = new AudioRecord(MediaRecorder.AudioSource.DEFAULT, rate, channelConfig, audioFormat, bufferSize);

                        if(recorder.getState() == AudioRecord.STATE_INITIALIZED)
                            return recorder;
                    }
                } catch(Exception e) {
                    Log.e("MicrophoneCollector", rate + " Exception, keep trying.", e);
                }
            }
        }
    }
    return null;
}
 
Example 2
Source File: Microphone.java    From ssj with GNU General Public License v3.0 6 votes vote down vote up
public static Cons.Type audioFormatSampleType(int f)
{
    switch (f)
    {
        case AudioFormat.ENCODING_PCM_8BIT:
            return Cons.Type.CHAR;
        case AudioFormat.ENCODING_PCM_16BIT:
        case AudioFormat.ENCODING_DEFAULT:
            return Cons.Type.SHORT;
        case AudioFormat.ENCODING_PCM_FLOAT:
            return Cons.Type.FLOAT;
        case AudioFormat.ENCODING_INVALID:
        default:
            return Cons.Type.UNDEF;
    }
}
 
Example 3
Source File: Microphone.java    From ssj with GNU General Public License v3.0 6 votes vote down vote up
public static int audioFormatSampleBytes(int f)
{
    switch (f)
    {
        case AudioFormat.ENCODING_PCM_8BIT:
            return 1;
        case AudioFormat.ENCODING_PCM_16BIT:
        case AudioFormat.ENCODING_DEFAULT:
            return 2;
        case AudioFormat.ENCODING_PCM_FLOAT:
            return 4;
        case AudioFormat.ENCODING_INVALID:
        default:
            return 0;
    }
}
 
Example 4
Source File: AudioTrackPlayerImpl.java    From dcs-sdk-java with Apache License 2.0 6 votes vote down vote up
private int getMinBufferSize(int sampleRate, int channelConfig, int audioFormat) {
    minBufferSize = AudioTrack.getMinBufferSize(sampleRate, channelConfig, audioFormat);
    // 解决异常IllegalArgumentException: Invalid audio buffer size
    int channelCount = 1;
    switch (channelConfig) {
        // AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
        case AudioFormat.CHANNEL_OUT_DEFAULT:
        case AudioFormat.CHANNEL_OUT_MONO:
        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
            channelCount = 1;
            break;
        case AudioFormat.CHANNEL_OUT_STEREO:
        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
            channelCount = 2;
            break;
        default:
            channelCount = Integer.bitCount(channelConfig);
    }
    // 判断minBufferSize是否在范围内,如果不在设定默认值为1152
    int frameSizeInBytes = channelCount * (audioFormat == AudioFormat.ENCODING_PCM_8BIT ? 1 : 2);
    if ((minBufferSize % frameSizeInBytes != 0) || (minBufferSize < 1)) {
        minBufferSize = 1152;
    }
    return minBufferSize;
}
 
Example 5
Source File: AudioSaveHelper.java    From Android-AudioRecorder-App with Apache License 2.0 5 votes vote down vote up
/**
 * Writes the proper 44-byte RIFF/WAVE header to/for the given stream
 * Two size fields are left empty/null since we do not yet know the final stream size
 *
 * @param out The stream to write the header to
 * @param channelMask An AudioFormat.CHANNEL_* mask
 * @param sampleRate The sample rate in hertz
 * @param encoding An AudioFormat.ENCODING_PCM_* value
 * @throws IOException
 */
private void writeWavHeader(OutputStream out, int channelMask, int sampleRate, int encoding)
    throws IOException {
  short channels;
  switch (channelMask) {
    case AudioFormat.CHANNEL_IN_MONO:
      channels = 1;
      break;
    case AudioFormat.CHANNEL_IN_STEREO:
      channels = 2;
      break;
    default:
      throw new IllegalArgumentException("Unacceptable channel mask");
  }

  short bitDepth;
  switch (encoding) {
    case AudioFormat.ENCODING_PCM_8BIT:
      bitDepth = 8;
      break;
    case AudioFormat.ENCODING_PCM_16BIT:
      bitDepth = 16;
      break;
    case AudioFormat.ENCODING_PCM_FLOAT:
      bitDepth = 32;
      break;
    default:
      throw new IllegalArgumentException("Unacceptable encoding");
  }

  writeWavHeader(out, channels, sampleRate, bitDepth);
}
 
Example 6
Source File: AudioRecordConfig.java    From OmRecorder with Apache License 2.0 5 votes vote down vote up
@Override public byte bitsPerSample() {
  if (audioEncoding == AudioFormat.ENCODING_PCM_16BIT) {
    return 16;
  } else if (audioEncoding == AudioFormat.ENCODING_PCM_8BIT) {
    return 8;
  } else {
    return 16;
  }
}
 
Example 7
Source File: RecordAudioTest.java    From AndPermission with Apache License 2.0 5 votes vote down vote up
public static int[] findAudioParameters() {
    for (int rate : RATES) {
        for (int channel : new int[]{AudioFormat.CHANNEL_IN_MONO, AudioFormat.CHANNEL_IN_STEREO}) {
            for (int format : new int[]{AudioFormat.ENCODING_PCM_8BIT, AudioFormat.ENCODING_PCM_16BIT}) {
                int buffer = AudioRecord.getMinBufferSize(rate, channel, format);
                if (buffer != AudioRecord.ERROR_BAD_VALUE) {
                    return new int[]{rate, channel, format, buffer};
                }
            }
        }
    }
    return null;
}
 
Example 8
Source File: WavFileHelper.java    From video-quickstart-android with MIT License 5 votes vote down vote up
/**
 * Writes the proper 44-byte RIFF/WAVE header to/for the given stream Two size fields are left
 * empty/null since we do not yet know the final stream size
 *
 * @param out         The stream to write the header to
 * @param channelMask An AudioFormat.CHANNEL_* mask
 * @param sampleRate  The sample rate in hertz
 * @param encoding    An AudioFormat.ENCODING_PCM_* value
 * @throws IOException
 */
private static void writeWavHeader(
        OutputStream out, int channelMask, int sampleRate, int encoding) throws IOException {
    short channels;
    switch (channelMask) {
        case AudioFormat.CHANNEL_IN_MONO:
            channels = 1;
            break;
        case AudioFormat.CHANNEL_IN_STEREO:
            channels = 2;
            break;
        default:
            throw new IllegalArgumentException("Unacceptable channel mask");
    }

    short bitDepth;
    switch (encoding) {
        case AudioFormat.ENCODING_PCM_8BIT:
            bitDepth = 8;
            break;
        case AudioFormat.ENCODING_PCM_16BIT:
            bitDepth = 16;
            break;
        case AudioFormat.ENCODING_PCM_FLOAT:
            bitDepth = 32;
            break;
        default:
            throw new IllegalArgumentException("Unacceptable encoding");
    }

    writeWavHeader(out, channels, sampleRate, bitDepth);
}
 
Example 9
Source File: MediaAudioEncoder.java    From EZFilter with MIT License 5 votes vote down vote up
private int getBitsPerSample(int audioFormat) {
    int bitsPerSample;
    switch (audioFormat) {
        case AudioFormat.ENCODING_PCM_16BIT:
            bitsPerSample = 16;
            break;
        case AudioFormat.ENCODING_PCM_8BIT:
            bitsPerSample = 8;
            break;
        default:
            bitsPerSample = 16;
            break;
    }
    return bitsPerSample;
}
 
Example 10
Source File: MediaAudioEncoder.java    From EZFilter with MIT License 5 votes vote down vote up
/**
 * 查找可用的音频录制器
 *
 * @return
 */
private AudioRecord findAudioRecord() {
    int[] samplingRates = new int[]{44100, 22050, 11025, 8000};
    int[] audioFormats = new int[]{
            AudioFormat.ENCODING_PCM_16BIT,
            AudioFormat.ENCODING_PCM_8BIT};
    int[] channelConfigs = new int[]{
            AudioFormat.CHANNEL_IN_STEREO,
            AudioFormat.CHANNEL_IN_MONO};

    for (int rate : samplingRates) {
        for (int format : audioFormats) {
            for (int config : channelConfigs) {
                try {
                    int bufferSize = AudioRecord.getMinBufferSize(rate, config, format);
                    if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
                        for (int source : AUDIO_SOURCES) {
                            AudioRecord recorder = new AudioRecord(source, rate, config, format, bufferSize * 4);
                            if (recorder.getState() == AudioRecord.STATE_INITIALIZED) {
                                mSamplingRate = rate;
                                return recorder;
                            }
                        }
                    }
                } catch (Exception e) {
                    Log.e(TAG, "Init AudioRecord Error." + Log.getStackTraceString(e));
                }
            }
        }
    }
    return null;
}
 
Example 11
Source File: BaseAudioDecoder.java    From sdl_java_suite with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
protected void onOutputFormatChanged(@NonNull MediaFormat mediaFormat) {
    if (mediaFormat.containsKey(MediaFormat.KEY_CHANNEL_COUNT)) {
        outputChannelCount = mediaFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
    }

    if (mediaFormat.containsKey(MediaFormat.KEY_SAMPLE_RATE)) {
        outputSampleRate = mediaFormat.getInteger(MediaFormat.KEY_SAMPLE_RATE);
    }

    if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.N && mediaFormat.containsKey(MediaFormat.KEY_PCM_ENCODING)) {
        int key = mediaFormat.getInteger(MediaFormat.KEY_PCM_ENCODING);
        switch (key) {
            case AudioFormat.ENCODING_PCM_8BIT:
                outputSampleType = SampleType.UNSIGNED_8_BIT;
                break;
            case AudioFormat.ENCODING_PCM_FLOAT:
                outputSampleType = SampleType.FLOAT;
                break;
            case AudioFormat.ENCODING_PCM_16BIT:
            default:
                // by default we fallback to signed 16 bit samples
                outputSampleType = SampleType.SIGNED_16_BIT;
                break;
        }
    } else {
        outputSampleType = SampleType.SIGNED_16_BIT;
    }
}
 
Example 12
Source File: AudioProcess.java    From NoiseCapture with GNU General Public License v3.0 5 votes vote down vote up
/**
 * Constructor
 * @param recording Recording state
 * @param canceled Canceled state
 * @param customLeqProcessing Custom receiver of sound signals
 */
public AudioProcess(AtomicBoolean recording, AtomicBoolean canceled, ProcessingThread
        customLeqProcessing) {
    this.recording = recording;
    this.canceled = canceled;
    this.customLeqProcessing = customLeqProcessing;
    final int[] mSampleRates = new int[] {44100}; // AWeigting coefficient are based on 44100
    // Hz sampling rate, so we do not support other samplings (22050, 16000, 11025,8000)
    final int[] encodings = new int[] { AudioFormat.ENCODING_PCM_16BIT , AudioFormat.ENCODING_PCM_8BIT };
    final short[] audioChannels = new short[] { AudioFormat.CHANNEL_IN_MONO, AudioFormat.CHANNEL_IN_STEREO };
    for (int tryRate : mSampleRates) {
        for (int tryEncoding : encodings) {
            for(short tryAudioChannel : audioChannels) {
                int tryBufferSize = AudioRecord.getMinBufferSize(tryRate,
                        tryAudioChannel, tryEncoding);
                if (tryBufferSize != AudioRecord.ERROR_BAD_VALUE) {
                    // Take a higher buffer size in order to get a smooth recording under load
                    // avoiding Buffer overflow error on AudioRecord side.
                    bufferSize = Math.max(tryBufferSize,
                            (int)(AcousticIndicators.TIMEPERIOD_FAST * tryRate));
                    encoding = tryEncoding;
                    audioChannel = tryAudioChannel;
                    rate = tryRate;
                    this.fastLeqProcessing = new LeqProcessingThread(this,
                            AcousticIndicators.TIMEPERIOD_FAST, true,
                            hannWindowFast ? FFTSignalProcessing.WINDOW_TYPE.TUKEY :
                                    FFTSignalProcessing.WINDOW_TYPE.RECTANGULAR, PROP_MOVING_SPECTRUM, true);
                    this.slowLeqProcessing = new LeqProcessingThread(this,
                            AcousticIndicators.TIMEPERIOD_SLOW, true,
                            hannWindowOneSecond ? FFTSignalProcessing.WINDOW_TYPE.TUKEY :
                                    FFTSignalProcessing.WINDOW_TYPE.RECTANGULAR,
                            PROP_DELAYED_STANDART_PROCESSING, false);
                    return;
                }
            }
        }
    }
    throw new IllegalStateException("This device is not compatible");
}
 
Example 13
Source File: RecordAudioTester.java    From PermissionAgent with Apache License 2.0 5 votes vote down vote up
private static AudioRecord findAudioRecord() {
    for (int rate : RATES) {
        for (short format : new short[] {AudioFormat.ENCODING_PCM_8BIT, AudioFormat.ENCODING_PCM_16BIT}) {
            for (short channel : new short[] {AudioFormat.CHANNEL_IN_MONO, AudioFormat.CHANNEL_IN_STEREO}) {
                int buffer = AudioRecord.getMinBufferSize(rate, channel, format);
                if (buffer != AudioRecord.ERROR_BAD_VALUE) {
                    AudioRecord recorder = new AudioRecord(MediaRecorder.AudioSource.MIC, rate, channel, format,
                        buffer);
                    if (recorder.getState() == AudioRecord.STATE_INITIALIZED) return recorder;
                }
            }
        }
    }
    return null;
}
 
Example 14
Source File: MainActivity.java    From android-fskmodem with GNU General Public License v3.0 4 votes vote down vote up
@Override
protected void onCreate(Bundle savedInstanceState) {
	super.onCreate(savedInstanceState);
	setContentView(R.layout.activity_main);
	
	/// INIT FSK CONFIG
	
	try {
		mConfig = new FSKConfig(FSKConfig.SAMPLE_RATE_44100, FSKConfig.PCM_8BIT, FSKConfig.CHANNELS_MONO, FSKConfig.SOFT_MODEM_MODE_4, FSKConfig.THRESHOLD_20P);
	} catch (IOException e1) {
		e1.printStackTrace();
	}

	/// INIT FSK DECODER
	
	mDecoder = new FSKDecoder(mConfig, new FSKDecoderCallback() {
		
		@Override
		public void decoded(byte[] newData) {
			
			final String text = new String(newData);
			
			runOnUiThread(new Runnable() {
				public void run() {
					
					TextView view = ((TextView) findViewById(R.id.result));
					
					view.setText(view.getText()+text);
				}
			});
		}
	});
	
	/// INIT FSK ENCODER
	
	mEncoder = new FSKEncoder(mConfig, new FSKEncoderCallback() {
		
		@Override
		public void encoded(byte[] pcm8, short[] pcm16) {
			if (mConfig.pcmFormat == FSKConfig.PCM_8BIT) {
				//8bit buffer is populated, 16bit buffer is null
				
				mAudioTrack.write(pcm8, 0, pcm8.length);
				
				mDecoder.appendSignal(pcm8);
			}
			else if (mConfig.pcmFormat == FSKConfig.PCM_16BIT) {
				//16bit buffer is populated, 8bit buffer is null
				
				mAudioTrack.write(pcm16, 0, pcm16.length);
				
				mDecoder.appendSignal(pcm16);
			}
		}
	});
	
	///
	
	mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
			mConfig.sampleRate, AudioFormat.CHANNEL_OUT_MONO,
			AudioFormat.ENCODING_PCM_8BIT, 1024,
			AudioTrack.MODE_STREAM);
	
	mAudioTrack.play();
	
	///
	
	new Thread(mDataFeeder).start();
}
 
Example 15
Source File: Record.java    From SinVoiceDemo with Apache License 2.0 4 votes vote down vote up
public void start() {
    if (STATE_STOP == mState) {
        mState = STATE_START;

        switch (mChannel) {
        case CHANNEL_1:
            mChannelConfig = AudioFormat.CHANNEL_IN_MONO;
            break;
        case CHANNEL_2:
            mChannelConfig = AudioFormat.CHANNEL_IN_STEREO;
            break;
        }

        switch (mBits) {
        case BITS_8:
            mAudioEncoding = AudioFormat.ENCODING_PCM_8BIT;
            break;

        case BITS_16:
            mAudioEncoding = AudioFormat.ENCODING_PCM_16BIT;
            break;
        }

        int minBufferSize = AudioRecord.getMinBufferSize(mFrequence, mChannelConfig, mAudioEncoding);
        LogHelper.d(TAG, "minBufferSize:" + minBufferSize);

        AudioRecord record = new AudioRecord(MediaRecorder.AudioSource.MIC, mFrequence, mChannelConfig, mAudioEncoding, mBufferSize);
        record.startRecording();
        LogHelper.d(TAG, "record start");

        if (null != mCallback) {
            if (null != mListener) {
                mListener.onStartRecord();
            }

            while (STATE_START == mState) {
                BufferData data = mCallback.getRecordBuffer();
                if (null != data) {
                    if (null != data.byteData) {
                        int bufferReadResult = record.read(data.byteData, 0, mBufferSize);
                        data.setFilledSize(bufferReadResult);

                        mCallback.freeRecordBuffer(data);
                    } else {
                        // end of input
                        LogHelper.d(TAG, "get end input data, so stop");
                        break;
                    }
                } else {
                    LogHelper.d(TAG, "get null data");
                    break;
                }
            }

            if (null != mListener) {
                mListener.onStopRecord();
            }
        }

        record.stop();
        record.release();

        LogHelper.d(TAG, "record stop");
    }
}
 
Example 16
Source File: RNAudioRecordModule.java    From react-native-audio-record with MIT License 4 votes vote down vote up
private void addWavHeader(FileOutputStream out, long totalAudioLen, long totalDataLen)
        throws Exception {

    long sampleRate = sampleRateInHz;
    int channels = channelConfig == AudioFormat.CHANNEL_IN_MONO ? 1 : 2;
    int bitsPerSample = audioFormat == AudioFormat.ENCODING_PCM_8BIT ? 8 : 16;
    long byteRate =  sampleRate * channels * bitsPerSample / 8;
    int blockAlign = channels * bitsPerSample / 8;

    byte[] header = new byte[44];

    header[0] = 'R';                                    // RIFF chunk
    header[1] = 'I';
    header[2] = 'F';
    header[3] = 'F';
    header[4] = (byte) (totalDataLen & 0xff);           // how big is the rest of this file
    header[5] = (byte) ((totalDataLen >> 8) & 0xff);
    header[6] = (byte) ((totalDataLen >> 16) & 0xff);
    header[7] = (byte) ((totalDataLen >> 24) & 0xff);
    header[8] = 'W';                                    // WAVE chunk
    header[9] = 'A';
    header[10] = 'V';
    header[11] = 'E';
    header[12] = 'f';                                   // 'fmt ' chunk
    header[13] = 'm';
    header[14] = 't';
    header[15] = ' ';
    header[16] = 16;                                    // 4 bytes: size of 'fmt ' chunk
    header[17] = 0;
    header[18] = 0;
    header[19] = 0;
    header[20] = 1;                                     // format = 1 for PCM
    header[21] = 0;
    header[22] = (byte) channels;                       // mono or stereo
    header[23] = 0;
    header[24] = (byte) (sampleRate & 0xff);            // samples per second
    header[25] = (byte) ((sampleRate >> 8) & 0xff);
    header[26] = (byte) ((sampleRate >> 16) & 0xff);
    header[27] = (byte) ((sampleRate >> 24) & 0xff);
    header[28] = (byte) (byteRate & 0xff);              // bytes per second
    header[29] = (byte) ((byteRate >> 8) & 0xff);
    header[30] = (byte) ((byteRate >> 16) & 0xff);
    header[31] = (byte) ((byteRate >> 24) & 0xff);
    header[32] = (byte) blockAlign;                     // bytes in one sample, for all channels
    header[33] = 0;
    header[34] = (byte) bitsPerSample;                  // bits in a sample
    header[35] = 0;
    header[36] = 'd';                                   // beginning of the data chunk
    header[37] = 'a';
    header[38] = 't';
    header[39] = 'a';
    header[40] = (byte) (totalAudioLen & 0xff);         // how big is this data chunk
    header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
    header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
    header[43] = (byte) ((totalAudioLen >> 24) & 0xff);

    out.write(header, 0, 44);
}
 
Example 17
Source File: RNAudioRecordModule.java    From react-native-audio-record with MIT License 4 votes vote down vote up
@ReactMethod
public void init(ReadableMap options) {
    sampleRateInHz = 44100;
    if (options.hasKey("sampleRate")) {
        sampleRateInHz = options.getInt("sampleRate");
    }

    channelConfig = AudioFormat.CHANNEL_IN_MONO;
    if (options.hasKey("channels")) {
        if (options.getInt("channels") == 2) {
            channelConfig = AudioFormat.CHANNEL_IN_STEREO;
        }
    }

    audioFormat = AudioFormat.ENCODING_PCM_16BIT;
    if (options.hasKey("bitsPerSample")) {
        if (options.getInt("bitsPerSample") == 8) {
            audioFormat = AudioFormat.ENCODING_PCM_8BIT;
        }
    }

    audioSource = AudioSource.VOICE_RECOGNITION;
    if (options.hasKey("audioSource")) {
        audioSource = options.getInt("audioSource");
    }

    String documentDirectoryPath = getReactApplicationContext().getFilesDir().getAbsolutePath();
    outFile = documentDirectoryPath + "/" + "audio.wav";
    tmpFile = documentDirectoryPath + "/" + "temp.pcm";
    if (options.hasKey("wavFile")) {
        String fileName = options.getString("wavFile");
        outFile = documentDirectoryPath + "/" + fileName;
    }

    isRecording = false;
    eventEmitter = reactContext.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter.class);

    bufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
    int recordingBufferSize = bufferSize * 3;
    recorder = new AudioRecord(audioSource, sampleRateInHz, channelConfig, audioFormat, recordingBufferSize);
}
 
Example 18
Source File: PlaybackSynthesisCallback.java    From android_9.0.0_r45 with Apache License 2.0 4 votes vote down vote up
@Override
public int start(int sampleRateInHz, int audioFormat, int channelCount) {
    if (DBG) Log.d(TAG, "start(" + sampleRateInHz + "," + audioFormat + "," + channelCount
            + ")");
    if (audioFormat != AudioFormat.ENCODING_PCM_8BIT &&
        audioFormat != AudioFormat.ENCODING_PCM_16BIT &&
        audioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
        Log.w(TAG, "Audio format encoding " + audioFormat + " not supported. Please use one " +
                   "of AudioFormat.ENCODING_PCM_8BIT, AudioFormat.ENCODING_PCM_16BIT or " +
                   "AudioFormat.ENCODING_PCM_FLOAT");
    }
    mDispatcher.dispatchOnBeginSynthesis(sampleRateInHz, audioFormat, channelCount);

    int channelConfig = BlockingAudioTrack.getChannelConfig(channelCount);

    synchronized (mStateLock) {
        if (channelConfig == 0) {
            Log.e(TAG, "Unsupported number of channels :" + channelCount);
            mStatusCode = TextToSpeech.ERROR_OUTPUT;
            return TextToSpeech.ERROR;
        }
        if (mStatusCode == TextToSpeech.STOPPED) {
            if (DBG) Log.d(TAG, "stop() called before start(), returning.");
            return errorCodeOnStop();
        }
        if (mStatusCode != TextToSpeech.SUCCESS) {
            if (DBG) Log.d(TAG, "Error was raised");
            return TextToSpeech.ERROR;
        }
        if (mItem != null) {
            Log.e(TAG, "Start called twice");
            return TextToSpeech.ERROR;
        }
        SynthesisPlaybackQueueItem item = new SynthesisPlaybackQueueItem(
                mAudioParams, sampleRateInHz, audioFormat, channelCount,
                mDispatcher, mCallerIdentity, mLogger);
        mAudioTrackHandler.enqueue(item);
        mItem = item;
    }

    return TextToSpeech.SUCCESS;
}
 
Example 19
Source File: AudioStreamManagerTest.java    From sdl_java_suite with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
public void testOutputFormatChanged() {
    BaseAudioDecoder mockDecoder = mock(BaseAudioDecoder.class, Mockito.CALLS_REAL_METHODS);

    try {
        Field outputChannelCountField = BaseAudioDecoder.class.getDeclaredField("outputChannelCount");
        Field outputSampleRateField = BaseAudioDecoder.class.getDeclaredField("outputSampleRate");
        Field outputSampleTypeField = BaseAudioDecoder.class.getDeclaredField("outputSampleType");

        outputChannelCountField.setAccessible(true);
        outputSampleRateField.setAccessible(true);
        outputSampleTypeField.setAccessible(true);

        // channel count, sample rate, sample type
        int key_channel_count = 0, key_sample_rate = 1, key_sample_type = 2, key_sample_type_result = 3;
        int[][] tests = new int[][] {
                { 47, 42000, AudioFormat.ENCODING_PCM_8BIT, SampleType.UNSIGNED_8_BIT },
                { 2, 16000, AudioFormat.ENCODING_PCM_16BIT, SampleType.SIGNED_16_BIT },
                { 1, 22050, AudioFormat.ENCODING_PCM_FLOAT, SampleType.FLOAT },
                { 3, 48000, AudioFormat.ENCODING_INVALID, SampleType.SIGNED_16_BIT },
        };

        for (int[] test : tests) {
            int channel_count = test[key_channel_count];
            int sample_rate = test[key_sample_rate];
            int sample_type = test[key_sample_type];
            int sample_type_result = test[key_sample_type_result];

            MediaFormat format = new MediaFormat();

            format.setInteger(MediaFormat.KEY_CHANNEL_COUNT, channel_count);
            format.setInteger(MediaFormat.KEY_SAMPLE_RATE, sample_rate);
            format.setInteger(MediaFormat.KEY_PCM_ENCODING, sample_type);

            // in case the phone version is old the method does not take sample type into account but
            // always expected 16 bit. See https://developer.android.com/reference/android/media/MediaFormat.html#KEY_PCM_ENCODING
            if (android.os.Build.VERSION.SDK_INT < android.os.Build.VERSION_CODES.N) {
                sample_type_result = SampleType.SIGNED_16_BIT;
            }

            mockDecoder.onOutputFormatChanged(format);

            int output_channel_count = outputChannelCountField.getInt(mockDecoder);
            int output_sample_rate = outputSampleRateField.getInt(mockDecoder);
            int output_sample_type = outputSampleTypeField.getInt(mockDecoder);

            // changing from assertEquals to if and fail so travis gives better results

            if (channel_count != output_channel_count) {
                fail("AssertEqualsFailed: channel_count == output_channel_count (" + channel_count + " == " + output_channel_count + ")");
            }

            if (sample_rate != output_sample_rate) {
                fail("AssertEqualsFailed: sample_rate == output_sample_rate (" + sample_rate + " == " + output_sample_rate + ")");
            }

            if (sample_type_result != output_sample_type) {
                fail("Assert: sample_type_result == output_sample_type (" + sample_type_result + " == " + output_sample_type + ")");
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail();
    }
}
 
Example 20
Source File: AudioQuality.java    From DeviceConnect-Android with MIT License 3 votes vote down vote up
/**
 * 音声のフォーマットを設定します.
 *
 * <p>
 * デフォルトでは、 {@link AudioFormat#ENCODING_PCM_16BIT} が設定されています。
 * </p>
 *
 * @param format {@link AudioFormat#ENCODING_PCM_16BIT} or {@link AudioFormat#ENCODING_PCM_8BIT}
 */
public void setFormat(int format) {
    if (format != AudioFormat.ENCODING_PCM_16BIT && format != AudioFormat.ENCODING_PCM_8BIT) {
        throw new IllegalArgumentException("Not supported a format. format=" + format);
    }
    mFormat = format;
}