Java Code Examples for javax.sound.sampled.TargetDataLine#stop()

The following examples show how to use javax.sound.sampled.TargetDataLine#stop() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Recognize.java    From java-docs-samples with Apache License 2.0 5 votes vote down vote up
/** Performs microphone streaming speech recognition with a duration of 1 minute. */
public static void streamingMicRecognize() throws Exception {

  ResponseObserver<StreamingRecognizeResponse> responseObserver = null;
  try (SpeechClient client = SpeechClient.create()) {

    responseObserver =
        new ResponseObserver<StreamingRecognizeResponse>() {
          ArrayList<StreamingRecognizeResponse> responses = new ArrayList<>();

          public void onStart(StreamController controller) {}

          public void onResponse(StreamingRecognizeResponse response) {
            responses.add(response);
          }

          public void onComplete() {
            for (StreamingRecognizeResponse response : responses) {
              StreamingRecognitionResult result = response.getResultsList().get(0);
              SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
              System.out.printf("Transcript : %s\n", alternative.getTranscript());
            }
          }

          public void onError(Throwable t) {
            System.out.println(t);
          }
        };

    ClientStream<StreamingRecognizeRequest> clientStream =
        client.streamingRecognizeCallable().splitCall(responseObserver);

    RecognitionConfig recognitionConfig =
        RecognitionConfig.newBuilder()
            .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16)
            .setLanguageCode("en-US")
            .setSampleRateHertz(16000)
            .build();
    StreamingRecognitionConfig streamingRecognitionConfig =
        StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig).build();

    StreamingRecognizeRequest request =
        StreamingRecognizeRequest.newBuilder()
            .setStreamingConfig(streamingRecognitionConfig)
            .build(); // The first request in a streaming call has to be a config

    clientStream.send(request);
    // SampleRate:16000Hz, SampleSizeInBits: 16, Number of channels: 1, Signed: true,
    // bigEndian: false
    AudioFormat audioFormat = new AudioFormat(16000, 16, 1, true, false);
    DataLine.Info targetInfo =
        new Info(
            TargetDataLine.class,
            audioFormat); // Set the system information to read from the microphone audio stream

    if (!AudioSystem.isLineSupported(targetInfo)) {
      System.out.println("Microphone not supported");
      System.exit(0);
    }
    // Target data line captures the audio stream the microphone produces.
    TargetDataLine targetDataLine = (TargetDataLine) AudioSystem.getLine(targetInfo);
    targetDataLine.open(audioFormat);
    targetDataLine.start();
    System.out.println("Start speaking");
    long startTime = System.currentTimeMillis();
    // Audio Input Stream
    AudioInputStream audio = new AudioInputStream(targetDataLine);
    while (true) {
      long estimatedTime = System.currentTimeMillis() - startTime;
      byte[] data = new byte[6400];
      audio.read(data);
      if (estimatedTime > 60000) { // 60 seconds
        System.out.println("Stop speaking.");
        targetDataLine.stop();
        targetDataLine.close();
        break;
      }
      request =
          StreamingRecognizeRequest.newBuilder()
              .setAudioContent(ByteString.copyFrom(data))
              .build();
      clientStream.send(request);
    }
  } catch (Exception e) {
    System.out.println(e);
  }
  responseObserver.onComplete();
}
 
Example 2
Source File: bug6372428.java    From openjdk-jdk9 with GNU General Public License v2.0 4 votes vote down vote up
void testRecord() throws LineUnavailableException {
    // prepare audio data
    AudioFormat format = new AudioFormat(22050, 8, 1, false, false);

    // create & open target data line
    //TargetDataLine line = AudioSystem.getTargetDataLine(format);
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
    TargetDataLine line = (TargetDataLine)AudioSystem.getLine(info);

    line.open(format);

    // start read data thread
    byte[] data = new byte[(int) (format.getFrameRate() * format.getFrameSize() * DATA_LENGTH)];
    ReadThread p1 = new ReadThread(line, data);
    p1.start();

    // start line
    //new RecordThread(line).start();
    RecordThread p2 = new RecordThread(line);
    p2.start();

    // monitor line
    long endTime = currentTimeMillis() + DATA_LENGTH * 1000;

    long realTime1 = currentTimeMillis();
    long lineTime1 = line.getMicrosecondPosition() / 1000;

    while (realTime1 < endTime && !p1.isCompleted()) {
        delay(100);
        long lineTime2 = line.getMicrosecondPosition() / 1000;
        long realTime2 = currentTimeMillis();
        long dLineTime = lineTime2 - lineTime1;
        long dRealTime = realTime2 - realTime1;
        log("line pos: " + lineTime2 + "ms" + ", thread is " + (p2.isAlive() ? "alive" : "DIED"));
        if (dLineTime < 0) {
            line.stop();
            line.close();
            throw new RuntimeException("ERROR: line position have decreased from " + lineTime1 + " to " + lineTime2);
        }
        if (dRealTime < 450) {
            // delay() has been interrupted?
            continue;
        }
        lineTime1 = lineTime2;
        realTime1 = realTime2;
    }
    log("stopping line...");
    line.stop();
    line.close();

    /*
    log("");
    log("");
    log("");
    log("recording completed, delaying 5 sec");
    log("recorded " + p1.getCount() + " bytes, " + DATA_LENGTH + " seconds: " + (p1.getCount() * 8 / DATA_LENGTH) + " bit/sec");
    log("");
    log("");
    log("");
    delay(5000);
    log("starting playing...");
    playRecorded(format, data);
    */
}
 
Example 3
Source File: MicrophoneWithWebSocketsExample.java    From java-sdk with Apache License 2.0 4 votes vote down vote up
/**
 * The main method.
 *
 * @param args the arguments
 * @throws Exception the exception
 */
public static void main(final String[] args) throws Exception {
  Authenticator authenticator = new IamAuthenticator("<iam_api_key>");
  SpeechToText service = new SpeechToText(authenticator);

  // Signed PCM AudioFormat with 16kHz, 16 bit sample size, mono
  int sampleRate = 16000;
  AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, false);
  DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);

  if (!AudioSystem.isLineSupported(info)) {
    System.out.println("Line not supported");
    System.exit(0);
  }

  TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info);
  line.open(format);
  line.start();

  AudioInputStream audio = new AudioInputStream(line);

  RecognizeOptions options =
      new RecognizeOptions.Builder()
          .audio(audio)
          .interimResults(true)
          .timestamps(true)
          .wordConfidence(true)
          // .inactivityTimeout(5) // use this to stop listening when the speaker pauses, i.e. for
          // 5s
          .contentType(HttpMediaType.AUDIO_RAW + ";rate=" + sampleRate)
          .build();

  service.recognizeUsingWebSocket(
      options,
      new BaseRecognizeCallback() {
        @Override
        public void onTranscription(SpeechRecognitionResults speechResults) {
          System.out.println(speechResults);
        }
      });

  System.out.println("Listening to your voice for the next 30s...");
  Thread.sleep(30 * 1000);

  // closing the WebSockets underlying InputStream will close the WebSocket itself.
  line.stop();
  line.close();

  System.out.println("Fin.");
}