当前位置: 首页 > 后端技术 > Python

Python语音识别

时间:2023-03-26 17:56:29 Python

调用科大讯飞语音听写,使用Python实现语音识别,将实时语音转换为文本。参考这篇[blog]()的记录,先在官网下载了关于语音听写的SDK,然后在文件夹中新建两个.py文件,分别是get_audio.py和iat_demo.py,新建一个存放录音的文件夹audios,文件夹中存放录音文件input.wav。我的整个文件目录如下:asr_SDK(文件名)├─Readme.html├─audios│└─input.wav(存储音频)├─bin...source.txt│├─userwords.txt│└─wav├─doc├─get_audio.py├─iat_demo.py├─include├─libs├─release.txt└─samples1.录音这里我们使用pyaudio进行录音,需要下载相关的wheel。具体可以参考我的另一篇博客。然后我根据自己的需要修改了。gt_audio.py的全部代码如下:importpyaudio#这个需要自己下载wheelimportwavein_path="./audios/input.wav"#录音的存放路径defget_audio(filepath):aa=str(input("Startrecording?(y/n)"))ifaa==str("y"):CHUNK=1024FORMAT=pyaudio.paInt16CHANNELS=1#通道数RATE=11025#采样率RECORD_SECONDS=5#录音时间WAVE_OUTPUT_FILENAME=filepathp=pyaudio.PyAudio()stream=p.open(format=FORMAT,channels=CHANNELS,rate=RATE,input=True,frames_per_buffer=CHUNK)print("*"*5,"开始录音:请在5秒内输入语音","*"*5)frames=[]foriinrange(0,int(RATE/CHUNK*RECORD_SECONDS)):data=stream.read(CHUNK)frames.append(data)print("*"*5,"录音结束\n")stream.stop_stream()stream.close()p.terminate()wf=wave.open(WAVE_OUTPUT_FILENAME,'wb')wf.setnchannels(CHANNELS)wf.setsampwidth(p.get_sample_size(FORMAT))wf.setframerate(RATE)wf.writeframes(b''.join(frames))wf.close()elifaa==str("No"):exit()else:print("录音失败,请重新开始")get_audio(in_path)录音可以一直循环,每次重新录音,之前的录音音频将被覆盖。2、语音识别直接使用科大讯飞官网语音听写webAPI的Python例子。在此基础上进行相关调整,自动识别录音并转换为文字。iat_demo.py的全部代码如下:importwebsocketimportrequestsimportdatetimeimporthashlibimportbase64importhmacimportjsonimportos,sysimportrefromurllib.parseimporturlencodeimportloggingimporttimeimportsslimportwavefromwsgiref.handlersimportformat_date_timefromdatetimeimportdatetimefromtimeimportmktimefrompyaudioimportPyAudio,paInt16fromget_audioimportget_audio#导入Recording.py文件input_filename="input.wav"#麦克风采集的语音输入input_filepath="./audios/"#输入文件的pathin_path=input_filepath+input_filenametype=sys.getfilesystemencoding()path_pwd=os.path.split(os.path.realpath(__file__))[0]os.chdir(path_pwd)try:importthreadexceptImportError:import_threadasthreadlogging.basicConfig()STATUS_FIRST_FRAME=0#第一帧IDSTATUS_CONTINUE_FRAME=1#中间帧IDSTATUS_LAST_FRAME=2#标识最后一帧的framerate=8000NUM_SAMPLES=2000channels=1sampwidth=2TIME=2globalwsParamclassWs_Param(object):#初始化def__init__(self,host):self.Host=hostself.HttpProto="HTTP/1.1"self.HttpMethod="GET"self.RequestUri="/v2/iat"self.APPID="5d312675"#inconsole-My应用-语音听写(流媒体版)获取APPIDself.Algorithm="hmac-sha256"self.url="wss://"+self.Host+self.RequestUri#采集录音get_audio("./audios/input.wav")#设置测试音频文件,流式听写一次最多支持60s,超过60s会导致超时等错误self.AudioFile=r"./audios/input.wav"self.CommonArgs={"app_id":self.APPID}self.BusinessArgs={"domain":"iat","language":"zh_cn","accent":"mandarin"}defcreate_url(self):url='wss://ws-api.xfyun.cn/v2/iat'now=datetime.now()date=format_date_time(mktime(now.timetuple()))APIKey='a6aabfcca4ae28f9b6a448f705b7e432'#在控制台-我的应用-语音听写(流版)中获取APIKeyAPISecret='e649956e14eeb085d1b0dce77a671131'#在控制台-我的应用-语音听写(流版)中获取APISecretsignature_origin="+shost:"-api.xfyun.cn"+"\n"signature_origin+="date:"+date+"\n"signature_origin+="GET"+"/v2/iat"+"HTTP/1.1"signature_sha=hmac.new(APISecret.encode('utf-8'),signature_origin.encode('utf-8'),digestmod=hashlib.sha256).digest()signature_sha=base64.b64encode(signature_sha).decode(encoding='utf-8')authorization_origin="api_key=\"%s\",algorithm=\"%s\",headers=\"%s\",signature=\"%s\""%(APIKey,"hmac-sha256","hostdaterequest-line",signature_sha)authorization=base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')v={"authorization":authorization,"date":date,"host":"ws-api.xfyun.cn"}url=url+'?'+urlencode(v)returnurl#接收websocket消息的处理这里我对json解析做了一些改动打印一些短信息defon_message(ws,message):msg=json.loads(message)#将json对象转换为python对象json格式为字典格式try:code=msg["code"]sid=msg["sid"]ifcode!=0:errMsg=msg["message"]print("sid:%scallerror:%scodeis:%s\n"%(sid,errMsg,code))else:result=msg["data"]["result"]["ws"]#以json格式显示data_result=json.dumps(result,ensure_ascii=假,sort_keys=True,缩进=4,分隔符=(',',':'))print("sid:%scallsuccess!"%(sid))print("resultis:%s\n"%(data_result))exceptExceptionase:print("receivemsg,butparseexception:",e)#接收websocket错误处理defon_error(ws,error):print("###error:",error)#接收websocket关闭处理defon_close(ws):print("###closed###")#接收websocket连接建立的处理defon_open(ws):defrun(*args):frameSize=1280#每帧音频大小intervel=0.04#发送音频间隔(单位:s)status=STATUS_FIRST_FRAME#音频状态信息,识别音频是第一帧、中间帧还是最后一帧withopen(wsParam.AudioFile,"rb")asfp:whileTrue:buf=fp.read(frameSize)#endoffileifnotbuf:status=STATUS_LAST_FRAME#Firstframeprocessing#发送第一帧音频,带业务参数#Appid必须包含,只有第一帧发送ifstatus==STATUS_FIRST_FRAME:d={"common":wsParam.CommonArgs,"business":wsParam。商业Args,“数据”:{“状态”:0,“格式”:“音频/L16;rate=16000”,“音频”:str(base64.b64encode(buf),'utf-8'),“编码”:"raw"}}d=json.dumps(d)ws.send(d)status=STATUS_CONTINUE_FRAME#中间帧处理elifstatus==STATUS_CONTINUE_FRAME:d={"data":{"status":1,"format":"audio/L16;rate=16000","audio":str(base64.b64encode(buf),'utf-8'),"encoding":"raw"}}ws.send(json.dumps(d))#最后一层处理elifstatus==STATUS_LAST_FRAME:d={"data":{"status":2,"format":"audio/L16;rate=16000","audio":str(base64.b64encode(buf),'utf-8'),"编码":"原始"}}ws.send(json.dumps(d))time.sleep(1)break#模拟音频采样间隔time.sleep(intervel)ws.close()thread.start_new_thread(run,())if__name__=="__main__":wsParam=Ws_Param("ws-api.xfyun.cn")#流式听写域名websocket.enableTrace(False)wsUrl=wsParam.create_url()ws=websocket.WebSocketApp(wsUrl,on_message=on_message,on_error=on_error,on_close=on_close)ws.on_open=on_openws.run_forever(sslopt={"cert_reqs":ssl.CERT_NONE})3.在程序文件夹中启动程序,右键iat_demo,选择EditwithIDLE->EditwithIDLE3.7(32位),然后使用F5快速启动。如果启动时没有第三方包,安装成功后按提示操作即可。这是我修改json解析之前运行的打印结果:相关代码下载

最新推荐
猜你喜欢