Java Code Examples for android.media.AudioFormat#CHANNEL_IN_MONO

The following examples show how to use android.media.AudioFormat#CHANNEL_IN_MONO . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RecordingSampler.java    From voice-recording-visualizer with Apache License 2.0 6 votes vote down vote up
private void initAudioRecord() {
    int bufferSize = AudioRecord.getMinBufferSize(
            RECORDING_SAMPLE_RATE,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT
    );

    mAudioRecord = new AudioRecord(
            MediaRecorder.AudioSource.MIC,
            RECORDING_SAMPLE_RATE,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            bufferSize
    );

    if (mAudioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
        mBufSize = bufferSize;
    }
}
 
Example 2
Source File: MicrophoneSource.java    From media-for-mobile with Apache License 2.0 6 votes vote down vote up
public synchronized void configure(int sampleRate, int channels) {
    this.sampleRate = sampleRate;
    recordChannels = channels;

    switch (recordChannels) {
        case 1: {
            androidChannels = AudioFormat.CHANNEL_IN_MONO;
        }
        break;

        case 2: {
            androidChannels = AudioFormat.CHANNEL_IN_STEREO;
        }
        break;
    }

    minBufferSize = AudioRecord.getMinBufferSize(sampleRate, androidChannels, audioEncoding);

    if (minBufferSize < 0) {
        this.sampleRate = 8000;
        minBufferSize = AudioRecord.getMinBufferSize(sampleRate, androidChannels, audioEncoding);
    }
}
 
Example 3
Source File: WavFileHelper.java    From video-quickstart-android with MIT License 5 votes vote down vote up
private int getChannelMask(int channels) {
    switch (channels) {
        case 1:
            return AudioFormat.CHANNEL_IN_MONO;
        case 2:
            return AudioFormat.CHANNEL_IN_STEREO;
    }
    return AudioFormat.CHANNEL_IN_STEREO;
}
 
Example 4
Source File: AudioSaveHelper.java    From Android-AudioRecorder-App with Apache License 2.0 5 votes vote down vote up
/**
 * Writes the proper 44-byte RIFF/WAVE header to/for the given stream
 * Two size fields are left empty/null since we do not yet know the final stream size
 *
 * @param out The stream to write the header to
 * @param channelMask An AudioFormat.CHANNEL_* mask
 * @param sampleRate The sample rate in hertz
 * @param encoding An AudioFormat.ENCODING_PCM_* value
 * @throws IOException
 */
private void writeWavHeader(OutputStream out, int channelMask, int sampleRate, int encoding)
    throws IOException {
  short channels;
  switch (channelMask) {
    case AudioFormat.CHANNEL_IN_MONO:
      channels = 1;
      break;
    case AudioFormat.CHANNEL_IN_STEREO:
      channels = 2;
      break;
    default:
      throw new IllegalArgumentException("Unacceptable channel mask");
  }

  short bitDepth;
  switch (encoding) {
    case AudioFormat.ENCODING_PCM_8BIT:
      bitDepth = 8;
      break;
    case AudioFormat.ENCODING_PCM_16BIT:
      bitDepth = 16;
      break;
    case AudioFormat.ENCODING_PCM_FLOAT:
      bitDepth = 32;
      break;
    default:
      throw new IllegalArgumentException("Unacceptable encoding");
  }

  writeWavHeader(out, channels, sampleRate, bitDepth);
}
 
Example 5
Source File: SpeechRecord.java    From speechutils with Apache License 2.0 5 votes vote down vote up
public SpeechRecord(int sampleRateInHz, int bufferSizeInBytes)
        throws IllegalArgumentException {

    this(
            MediaRecorder.AudioSource.VOICE_RECOGNITION,
            sampleRateInHz,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            bufferSizeInBytes,
            false,
            false,
            false
    );
}
 
Example 6
Source File: WavRecorderActivity.java    From OmRecorder with Apache License 2.0 5 votes vote down vote up
private PullableSource mic() {
  return new PullableSource.Default(
      new AudioRecordConfig.Default(
          MediaRecorder.AudioSource.MIC, AudioFormat.ENCODING_PCM_16BIT,
          AudioFormat.CHANNEL_IN_MONO, 44100
      )
  );
}
 
Example 7
Source File: RecordDialog.java    From RecordDialog with MIT License 5 votes vote down vote up
private PullableSource mic() {
    return new PullableSource.Default(
            new AudioRecordConfig.Default(
                    MediaRecorder.AudioSource.MIC, AudioFormat.ENCODING_PCM_16BIT,
                    AudioFormat.CHANNEL_IN_MONO, 44100
            )
    );
}
 
Example 8
Source File: SaiyRecorder.java    From Saiy-PS with GNU Affero General Public License v3.0 5 votes vote down vote up
/**
 * Constructor
 * <p>
 * Uses the most common application defaults
 */
public SaiyRecorder() {
    this.audioSource = MediaRecorder.AudioSource.VOICE_RECOGNITION;
    this.sampleRateInHz = 8000;
    this.channelConfig = AudioFormat.CHANNEL_IN_MONO;
    this.audioFormat = AudioFormat.ENCODING_PCM_16BIT;
    this.bufferSizeInBytes = calculateBufferSize();
    this.enhance = true;
}
 
Example 9
Source File: SpeechRecord.java    From AlexaAndroid with GNU General Public License v2.0 5 votes vote down vote up
public SpeechRecord(int sampleRateInHz, int bufferSizeInBytes, boolean noise, boolean gain, boolean echo)
        throws IllegalArgumentException {

    this(
            MediaRecorder.AudioSource.VOICE_RECOGNITION,
            sampleRateInHz,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            bufferSizeInBytes,
            noise,
            gain,
            echo
    );
}
 
Example 10
Source File: WebRtcAudioManager.java    From webrtc_android with MIT License 5 votes vote down vote up
private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
  final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
  final int channelConfig =
      (numChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
  return AudioRecord.getMinBufferSize(
             sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
      / bytesPerFrame;
}
 
Example 11
Source File: WebRtcAudioManager.java    From webrtc_android with MIT License 5 votes vote down vote up
private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
  final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
  final int channelConfig =
      (numChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
  return AudioRecord.getMinBufferSize(
             sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
      / bytesPerFrame;
}
 
Example 12
Source File: SpeechSendVoice.java    From AlexaAndroid with GNU General Public License v2.0 5 votes vote down vote up
/**
 * The trigger to open a new AudioRecord and start recording with the intention of sending the audio to the AVS server using the stopRecord(). This will have permissions
 * issues in Marshmallow that need to be handled at the Activity level (checking for permission to record audio, and requesting it if we don't already have permissions).
 * @param url our POST url
 * @param accessToken our user's access token
 * @param buffer a byte[] that allows us to prepend whatever audio is recorded by the user with either generated ore pre-recorded audio, this needs
 *               to be in the same format as the audio being recorded
 * @param callback our callback to notify us when we change states
 * @throws IOException
 *
 * @deprecated Manage this state on the application side, instead, and send the audio using {@link SpeechSendAudio}
 */
@Deprecated
public void startRecording(final String url, final String accessToken, @Nullable byte[] buffer,
                           @Nullable final AsyncCallback<Void, Exception> callback) throws IOException {
    synchronized(mLock) {
        mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, AUDIO_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, BUFFER_SIZE);
    }

    if(callback != null){
        callback.start();
    }

    mCallback = callback;
    mIsRecording = true;
    new AsyncTask<Void, Void, Void>() {
        @Override
        protected Void doInBackground(Void... params) {
            synchronized(mLock) {
                prepareConnection(url, accessToken);
            }
            return null;
        }
    }.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR);

    if(buffer != null){
        mOutputStream.write(buffer);
    }

    //record our audio
    recordAudio(mAudioRecord, mOutputStream);
}
 
Example 13
Source File: MainActivity.java    From SimpleRecorder with Apache License 2.0 5 votes vote down vote up
@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_main);

    if (Build.VERSION.SDK_INT >= 23) {
        requestPermissions(perms, REQUEST_CODE);
    }

    ImageButton mImageButton = (ImageButton) findViewById(R.id.action_image);
    mImageButton.setOnClickListener(new View.OnClickListener() {
        @Override
        public void onClick(View v) {
            if (mRecorder == null || !mRecorder.isInitialized()) {
                return;
            }
            boolean recording = mRecorder.isRecording();
            if (recording) {
                ((ImageButton) v).setImageResource(R.drawable.record);
                mRecorder.stop();
            } else {
                ((ImageButton) v).setImageResource(R.drawable.pause);
                mRecorder.startRecording();
            }
        }
    });

    boolean result = createOutputFile();
    if (!result) {
        Toast.makeText(this, "创建文件失败~", Toast.LENGTH_SHORT).show();
    }

    mRecorder = new Recorder(44100,
            AudioFormat.CHANNEL_IN_MONO/*单双声道*/,
            AudioFormat.ENCODING_PCM_16BIT/*格式*/,
            MediaRecorder.AudioSource.MIC/*AudioSource*/,
            NUM_SAMPLES/*period*/,
            this/*onDataChangeListener*/);

}
 
Example 14
Source File: MicRecorder.java    From ScreenCapture with MIT License 5 votes vote down vote up
MicRecorder(AudioEncodeConfig config) {
    mEncoder = new AudioEncoder(config);
    mSampleRate = config.sampleRate;
    mChannelsSampleRate = mSampleRate * config.channelCount;
    if (VERBOSE) Log.i(TAG, "in bitrate " + mChannelsSampleRate * 16 /* PCM_16BIT*/);
    mChannelConfig = config.channelCount == 2 ? AudioFormat.CHANNEL_IN_STEREO : AudioFormat.CHANNEL_IN_MONO;
    mRecordThread = new HandlerThread(TAG);
}
 
Example 15
Source File: MainActivity.java    From android-fskmodem with GNU General Public License v3.0 4 votes vote down vote up
@Override
protected void onCreate(Bundle savedInstanceState) {
	super.onCreate(savedInstanceState);
	setContentView(R.layout.activity_main);
	
	/// INIT FSK CONFIG
	
	try {
		mConfig = new FSKConfig(FSKConfig.SAMPLE_RATE_44100, FSKConfig.PCM_16BIT, FSKConfig.CHANNELS_MONO, FSKConfig.SOFT_MODEM_MODE_4, FSKConfig.THRESHOLD_20P);
	} catch (IOException e1) {
		e1.printStackTrace();
	}

	/// INIT FSK DECODER
	
	mDecoder = new FSKDecoder(mConfig, new FSKDecoderCallback() {
		
		@Override
		public void decoded(byte[] newData) {
			
			final String text = new String(newData);
			
			runOnUiThread(new Runnable() {
				public void run() {
					
					TextView view = ((TextView) findViewById(R.id.result));
					
					view.setText(view.getText()+text);
				}
			});
		}
	});
	
	///
	
	//make sure that the settings of the recorder match the settings of the decoder
	//most devices cant record anything but 44100 samples in 16bit PCM format...
	mBufferSize = AudioRecord.getMinBufferSize(FSKConfig.SAMPLE_RATE_44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
	
	//scale up the buffer... reading larger amounts of data
	//minimizes the chance of missing data because of thread priority
	mBufferSize *= 10;
	
	//again, make sure the recorder settings match the decoder settings
	mRecorder = new AudioRecord(AudioSource.MIC, FSKConfig.SAMPLE_RATE_44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, mBufferSize);

	if (mRecorder.getState() == AudioRecord.STATE_INITIALIZED) {
		mRecorder.startRecording();
		
		//start a thread to read the audio data
		Thread thread = new Thread(mRecordFeed);
		thread.setPriority(Thread.MAX_PRIORITY);
		thread.start();
	}
	else {
		Log.i("FSKDecoder", "Please check the recorder settings, something is wrong!");
	}
}
 
Example 16
Source File: WebRtcAudioRecord.java    From webrtc_android with MIT License 4 votes vote down vote up
private int channelCountToConfiguration(int channels) {
  return (channels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
}
 
Example 17
Source File: SoundFile.java    From YTPlayer with GNU General Public License v3.0 4 votes vote down vote up
private void RecordAudio() {
    if (mProgressListener ==  null) {
        // A progress listener is mandatory here, as it will let us know when to stop recording.
        return;
    }
    mInputFile = null;
    mFileType = "raw";
    mFileSize = 0;
    mSampleRate = 44100;
    mChannels = 1;  // record mono audio.
    short[] buffer = new short[1024];  // buffer contains 1 mono frame of 1024 16 bits samples
    int minBufferSize = AudioRecord.getMinBufferSize(
            mSampleRate, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
    // make sure minBufferSize can contain at least 1 second of audio (16 bits sample).
    if (minBufferSize < mSampleRate * 2) {
        minBufferSize = mSampleRate * 2;
    }
    AudioRecord audioRecord = new AudioRecord(
            MediaRecorder.AudioSource.DEFAULT,
            mSampleRate,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            minBufferSize
            );

    // Allocate memory for 20 seconds first. Reallocate later if more is needed.
    mDecodedBytes = ByteBuffer.allocate(20 * mSampleRate * 2);
    mDecodedBytes.order(ByteOrder.LITTLE_ENDIAN);
    mDecodedSamples = mDecodedBytes.asShortBuffer();
    audioRecord.startRecording();
    while (true) {
        // check if mDecodedSamples can contain 1024 additional samples.
        if (mDecodedSamples.remaining() < 1024) {
            // Try to allocate memory for 10 additional seconds.
            int newCapacity = mDecodedBytes.capacity() + 10 * mSampleRate * 2;
            ByteBuffer newDecodedBytes = null;
            try {
                newDecodedBytes = ByteBuffer.allocate(newCapacity);
            } catch (OutOfMemoryError oome) {
                break;
            }
            int position = mDecodedSamples.position();
            mDecodedBytes.rewind();
            newDecodedBytes.put(mDecodedBytes);
            mDecodedBytes = newDecodedBytes;
            mDecodedBytes.order(ByteOrder.LITTLE_ENDIAN);
            mDecodedBytes.rewind();
            mDecodedSamples = mDecodedBytes.asShortBuffer();
            mDecodedSamples.position(position);
        }
        // TODO(nfaralli): maybe use the read method that takes a direct ByteBuffer argument.
        audioRecord.read(buffer, 0, buffer.length);
        mDecodedSamples.put(buffer);
        // Let the progress listener know how many seconds have been recorded.
        // The returned value tells us if we should keep recording or stop.
        if (!mProgressListener.reportProgress(
                (float)(mDecodedSamples.position()) / mSampleRate)) {
            break;
        }
    }
    audioRecord.stop();
    audioRecord.release();
    mNumSamples = mDecodedSamples.position();
    mDecodedSamples.rewind();
    mDecodedBytes.rewind();
    mAvgBitRate = mSampleRate * 16 / 1000;

    // Temporary hack to make it work with the old version.
    mNumFrames = mNumSamples / getSamplesPerFrame();
    if (mNumSamples % getSamplesPerFrame() != 0){
        mNumFrames++;
    }
    mFrameGains = new int[mNumFrames];
    mFrameLens = null;  // not needed for recorded audio
    mFrameOffsets = null;  // not needed for recorded audio
    int i, j;
    int gain, value;
    for (i=0; i<mNumFrames; i++){
        gain = -1;
        for(j=0; j<getSamplesPerFrame(); j++) {
            if (mDecodedSamples.remaining() > 0) {
                value = Math.abs(mDecodedSamples.get());
            } else {
                value = 0;
            }
            if (gain < value) {
                gain = value;
            }
        }
        mFrameGains[i] = (int) Math.sqrt(gain);  // here gain = sqrt(max value of 1st channel)...
    }
    mDecodedSamples.rewind();
    // DumpSamples();  // Uncomment this line to dump the samples in a TSV file.
}
 
Example 18
Source File: RNAudioRecordModule.java    From react-native-audio-record with MIT License 4 votes vote down vote up
private void addWavHeader(FileOutputStream out, long totalAudioLen, long totalDataLen)
        throws Exception {

    long sampleRate = sampleRateInHz;
    int channels = channelConfig == AudioFormat.CHANNEL_IN_MONO ? 1 : 2;
    int bitsPerSample = audioFormat == AudioFormat.ENCODING_PCM_8BIT ? 8 : 16;
    long byteRate =  sampleRate * channels * bitsPerSample / 8;
    int blockAlign = channels * bitsPerSample / 8;

    byte[] header = new byte[44];

    header[0] = 'R';                                    // RIFF chunk
    header[1] = 'I';
    header[2] = 'F';
    header[3] = 'F';
    header[4] = (byte) (totalDataLen & 0xff);           // how big is the rest of this file
    header[5] = (byte) ((totalDataLen >> 8) & 0xff);
    header[6] = (byte) ((totalDataLen >> 16) & 0xff);
    header[7] = (byte) ((totalDataLen >> 24) & 0xff);
    header[8] = 'W';                                    // WAVE chunk
    header[9] = 'A';
    header[10] = 'V';
    header[11] = 'E';
    header[12] = 'f';                                   // 'fmt ' chunk
    header[13] = 'm';
    header[14] = 't';
    header[15] = ' ';
    header[16] = 16;                                    // 4 bytes: size of 'fmt ' chunk
    header[17] = 0;
    header[18] = 0;
    header[19] = 0;
    header[20] = 1;                                     // format = 1 for PCM
    header[21] = 0;
    header[22] = (byte) channels;                       // mono or stereo
    header[23] = 0;
    header[24] = (byte) (sampleRate & 0xff);            // samples per second
    header[25] = (byte) ((sampleRate >> 8) & 0xff);
    header[26] = (byte) ((sampleRate >> 16) & 0xff);
    header[27] = (byte) ((sampleRate >> 24) & 0xff);
    header[28] = (byte) (byteRate & 0xff);              // bytes per second
    header[29] = (byte) ((byteRate >> 8) & 0xff);
    header[30] = (byte) ((byteRate >> 16) & 0xff);
    header[31] = (byte) ((byteRate >> 24) & 0xff);
    header[32] = (byte) blockAlign;                     // bytes in one sample, for all channels
    header[33] = 0;
    header[34] = (byte) bitsPerSample;                  // bits in a sample
    header[35] = 0;
    header[36] = 'd';                                   // beginning of the data chunk
    header[37] = 'a';
    header[38] = 't';
    header[39] = 'a';
    header[40] = (byte) (totalAudioLen & 0xff);         // how big is this data chunk
    header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
    header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
    header[43] = (byte) ((totalAudioLen >> 24) & 0xff);

    out.write(header, 0, 44);
}
 
Example 19
Source File: WebRtcAudioRecord.java    From webrtc_android with MIT License 4 votes vote down vote up
private int channelCountToConfiguration(int channels) {
  return (channels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
}
 
Example 20
Source File: CheckPermission.java    From EasyPhotos with Apache License 2.0 4 votes vote down vote up
/**
 * 用于检测是否具有录音权限
 *
 * @return
 */
public static int getRecordState() {
    int minBuffer = AudioRecord.getMinBufferSize(44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat
            .ENCODING_PCM_16BIT);
    AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.DEFAULT, 44100, AudioFormat
            .CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, (minBuffer * 100));
    short[] point = new short[minBuffer];
    int readSize = 0;
    try {

        audioRecord.startRecording();//检测是否可以进入初始化状态
    } catch (Exception e) {
        if (audioRecord != null) {
            audioRecord.release();
            audioRecord = null;
        }
        return STATE_NO_PERMISSION;
    }
    if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
        //6.0以下机型都会返回此状态,故使用时需要判断bulid版本
        //检测是否在录音中
        if (audioRecord != null) {
            audioRecord.stop();
            audioRecord.release();
            audioRecord = null;
            LogUtil.d("录音机被占用");
        }
        return STATE_RECORDING;
    } else {
        //检测是否可以获取录音结果

        readSize = audioRecord.read(point, 0, point.length);


        if (readSize <= 0) {
            if (audioRecord != null) {
                audioRecord.stop();
                audioRecord.release();
                audioRecord = null;

            }
            LogUtil.d("录音的结果为空");
            return STATE_NO_PERMISSION;

        } else {
            if (audioRecord != null) {
                audioRecord.stop();
                audioRecord.release();
                audioRecord = null;

            }

            return STATE_SUCCESS;
        }
    }
}