Sync from bytedesk-private: update

This commit is contained in:
jack ning
2024-12-14 10:43:18 +08:00
parent 476eebb101
commit 5e082909e4
3421 changed files with 812709 additions and 0 deletions

View File

@@ -0,0 +1,63 @@
# -*- coding: utf-8 -*-
###
### Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights
### Reserved. MIT License (https://opensource.org/licenses/MIT)
###
### 2022-2023 by zhaoming,mali aihealthx.com
from flask import Flask, render_template, request, send_from_directory, jsonify, redirect, url_for
# from gevent.pywsgi import WSGIServer
import datetime
import random
import string
import time
import argparse
app = Flask(__name__, static_folder="static", static_url_path="/static")
@app.route("/")
def homePage():
return redirect("/static/index.html")
parser = argparse.ArgumentParser()
parser.add_argument(
"--host", type=str, default="0.0.0.0", required=False, help="host ip, localhost, 0.0.0.0"
)
parser.add_argument("--port", type=int, default=1337, required=False, help="html5 server port")
parser.add_argument(
"--certfile", type=str, default="./ssl_key/server.crt", required=False, help="certfile for ssl"
)
parser.add_argument(
"--keyfile", type=str, default="./ssl_key/server.key", required=False, help="keyfile for ssl"
)
if __name__ == "__main__":
args = parser.parse_args()
port = args.port
# WSGIServer
# ssl = {
# 'certfile': 'server.crt',
# 'keyfile': 'server.key'
# }
# httpsServer = WSGIServer(("0.0.0.0",port), app, **ssl)
# httpsServer.serve_forever()
# flask
print("srv run on ", port)
app.run(
debug=False,
threaded=True,
host=args.host,
port=port,
ssl_context=(args.certfile, args.keyfile),
)

View File

@@ -0,0 +1,93 @@
([简体中文](./readme_zh.md)|English)
# Speech Recognition Service Html5 Client Access Interface
The server deployment uses the websocket protocol. The client can support html5 webpage access and microphone input or file input. There are two ways to access the service:
- Method 1:
Directly connect to the html client, manually download the client ([click here](https://github.com/modelscope/FunASR/tree/main/runtime/html5/static)) to the local computer, and open the index.html webpage to enter the wss address and port number.
- Method 2:
Html5 server, automatically download the client to the local computer, and support access by mobile phones and other devices.
## Starting Speech Recognition Service
Support the deployment of Python and C++ versions, where
- Python version
Directly deploy the Python pipeline, support streaming real-time speech recognition models, offline speech recognition models, streaming offline integrated error correction models, and output text with punctuation marks. Single server, supporting a single client.
- C++ version
funasr-runtime-sdk, supports one-key deployment, version 0.1.0, supports offline file transcription. Single server, supporting requests from hundreds of clients.
### Starting Python Version Service
#### Install Dependencies
```shell
pip3 install -U modelscope funasr flask
# Users in mainland China, if encountering network issues, can install with the following command:
# pip3 install -U modelscope funasr -i https://mirror.sjtu.edu.cn/pypi/web/simple
git clone https://github.com/alibaba/FunASR.git && cd FunASR
```
#### Start ASR Service
#### wss Method
```shell
cd funasr/runtime/python/websocket
python funasr_wss_server.py --port 10095
```
For detailed parameter configuration and analysis, please click [here](https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/runtime/python/websocket).
#### Html5 Service (Optional)
If you need to use the client method mentioned above to access it, you can start the html5 service
```shell
h5Server.py [-h] [--host HOST] [--port PORT] [--certfile CERTFILE] [--keyfile KEYFILE]
```
As shown in the example below, pay attention to the IP address. If accessing from another device (such as a mobile phone), you need to set the IP address to the real public IP address.
```shell
cd funasr/runtime/html5
python h5Server.py --host 0.0.0.0 --port 1337
```
After starting, enter ([https://127.0.0.1:1337/static/index.html](https://127.0.0.1:1337/static/index.html)) in the browser to access it.
### Starting C++ Version Service
Since there are many dependencies for C++, it is recommended to deploy it using docker, which supports one-key start of the service.
```shell
curl -O https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/shell/funasr-runtime-deploy-offline-cpu-zh.sh;
sudo bash funasr-runtime-deploy-offline-cpu-zh.sh install --workspace /root/funasr-runtime-resources
```
For detailed parameter configuration and analysis, please click [here](https://github.com/alibaba-damo-academy/FunASR/blob/main/funasr/runtime/docs/SDK_tutorial_zh.md).
## Client Testing
### Method 1
Directly connect to the html client, manually download the client ([click here](https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/runtime/html5/static)) to the local computer, and open the index.html webpage, enter the wss address and port number to use.
### Method 2
Html5 server, automatically download the client to the local computer, and support access by mobile phones and other devices. The IP address needs to be consistent with the html5 server. If it is a local computer, you can use 127.0.0.1.
```shell
https://127.0.0.1:1337/static/index.html
```
Enter the wss address and port number to use.
## Acknowledge
1. This project is maintained by [FunASR community](https://github.com/alibaba-damo-academy/FunASR).
2. We acknowledge [AiHealthx](http://www.aihealthx.com/) for contributing the html5 demo.

View File

@@ -0,0 +1,93 @@
(简体中文|[English](./readme.md))
# 语音识别服务Html5客户端访问界面
服务端部署采用websocket协议客户端可以支持html5网页访问支持麦克风输入与文件输入可以通过如下2种方式访问
- 方式一:
html客户端直连手动下载客户端[点击此处](https://github.com/modelscope/FunASR/tree/main/runtime/html5/static))至本地,打开`index.html`网页输入wss地址与端口号
- 方式二:
html5服务端自动下载客户端至本地支持手机等端上访问
## 语音识别服务启动
支持python版本与c++版本服务部署,其中
- python版本
直接部署python pipeline支持流式实时语音识别模型离线语音识别模型流式离线一体化纠错模型输出带标点文字。单个server支持单个client。
- c++版本
funasr-runtime-sdk支持一键部署0.1.0版本支持离线文件转写。单个server支持上百路client请求。
### python版本服务启动
#### 安装依赖环境
```shell
pip3 install -U modelscope funasr flask
# 中国大陆用户,如果遇到网络问题,可以通过下面指令安装:
# pip3 install -U modelscope funasr -i https://mirror.sjtu.edu.cn/pypi/web/simple
git clone https://github.com/alibaba/FunASR.git && cd FunASR
```
#### 启动ASR服务
#### wss方式
```shell
cd funasr/runtime/python/websocket
python funasr_wss_server.py --port 10095
```
详细参数配置与解析([点击此处](https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/runtime/python/websocket)
#### html5服务可选
如果需要使用上面所说的客户端方式二进行访问可以启动html5服务
```shell
h5Server.py [-h] [--host HOST] [--port PORT] [--certfile CERTFILE] [--keyfile KEYFILE]
```
例子如下需要注意ip地址如果从其他设备访问需求例如手机端需要将ip地址设为真实公网ip
```shell
cd funasr/runtime/html5
python h5Server.py --host 0.0.0.0 --port 1337
```
启动后,在浏览器中输入([https://127.0.0.1:1337/static/index.html](https://127.0.0.1:1337/static/index.html))即可访问
### c++ 版本服务启动
由于c++依赖环境较多建议采用docker部署支持一键启动服务
```shell
curl -O https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/shell/funasr-runtime-deploy-offline-cpu-zh.sh;
sudo bash funasr-runtime-deploy-offline-cpu-zh.sh install --workspace /root/funasr-runtime-resources
```
详细参数配置与解析([点击此处](https://github.com/alibaba-damo-academy/FunASR/blob/main/funasr/runtime/docs/SDK_tutorial_zh.md)
## 客户端测试
### 方式一
html客户端直连手动下载客户端[点击此处](https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/runtime/html5/static))至本地,打开`index.html`网页输入wss地址与端口号即可使用
### 方式二
html5服务端自动下载客户端至本地支持手机等端上访问ip地址需要与html5 server保持一致如果是本地机器可以用127.0.0.1
```shell
https://127.0.0.1:1337/static/index.html
```
输入wss地址与端口号即可使用
## Acknowledge
1. This project is maintained by [FunASR community](https://github.com/alibaba-damo-academy/FunASR).
2. We acknowledge [AiHealthx](http://www.aihealthx.com/) for contributing the html5 demo.

View File

@@ -0,0 +1 @@
../ssl_key

View File

@@ -0,0 +1,93 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width,initial-scale=1" />
<title>语音识别</title>
</head>
<body style="margin-left: 3%">
<script src="recorder-core.js" charset="UTF-8"></script>
<script src="wav.js" charset="UTF-8"></script>
<script src="pcm.js" charset="UTF-8"></script>
<h1>FunASR Demo</h1>
<h3>这里是FunASR开源项目体验demo集成了VAD、ASR与标点等工业级别的模型支持长音频离线文件转写实时语音识别等开源项目地址https://github.com/alibaba-damo-academy/FunASR</h3>
<div class="div_class_topArea">
<div class="div_class_recordControl">
asr服务器地址(必填):
<br>
<input id="wssip" type="text" onchange="addresschange()" style=" width: 100%;height:100%" value="wss://127.0.0.1:10095/"/>
<br>
<a id="wsslink" href="#" onclick="window.open('https://127.0.0.1:10095/', '_blank')"><div id="info_wslink">点此处手工授权wss://127.0.0.1:10095/</div></a>
<br>
<br>
<div style="border:2px solid #ccc;">
选择录音模式:<br/>
<label><input name="recoder_mode" onclick="on_recoder_mode_change()" type="radio" value="mic" checked="true"/>麦克风 </label>&nbsp;&nbsp;
<label><input name="recoder_mode" onclick="on_recoder_mode_change()" type="radio" value="file" />文件 </label>
</div>
<br>
<div id="mic_mode_div" style="border:2px solid #ccc;display:block;">
选择asr模型模式:<br/>
<label><input name="asr_mode" type="radio" value="2pass" checked="true"/>2pass </label>&nbsp;&nbsp;
<label><input name="asr_mode" type="radio" value="online" />online </label>&nbsp;&nbsp;
<label><input name="asr_mode" type="radio" value="offline" />offline </label>
</div>
<div id="rec_mode_div" style="border:2px solid #ccc;display:none;">
<input type="file" id="upfile">
</div>
<br>
<div id="use_itn_div" style="border:2px solid #ccc;display:block;">
逆文本标准化(ITN):<br/>
<label><input name="use_itn" type="radio" value="false" checked="true"/></label>&nbsp;&nbsp;
<label><input name="use_itn" type="radio" value="true" /></label>
</div>
<br>
<div style="border:2px solid #ccc;">
热词设置(一行一个关键字,空格隔开权重,如"阿里巴巴 20")
<br>
<textarea rows="3" id="varHot" style=" width: 100%;height:100%" >阿里巴巴 20&#13;hello world 40</textarea>
<br>
</div>
语音识别结果显示:
<br>
<textarea rows="10" id="varArea" readonly="true" style=" width: 100%;height:100%" ></textarea>
<br>
<div id="info_div">请点击开始</div>
<div class="div_class_buttons">
<button id="btnConnect">连接</button>
<button id="btnStart">开始</button>
<button id="btnStop">停止</button>
</div>
<audio id="audio_record" type="audio/wav" controls style="margin-top: 12px; width: 100%;"></audio>
</div>
</div>
<script src="wsconnecter.js" charset="utf-8"></script>
<script src="main.js" charset="utf-8"></script>
</body>
</html>

View File

@@ -0,0 +1,576 @@
/**
* Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights
* Reserved. MIT License (https://opensource.org/licenses/MIT)
*/
/* 2022-2023 by zhaoming,mali aihealthx.com */
// 连接; 定义socket连接类对象与语音对象
var wsconnecter = new WebSocketConnectMethod({msgHandle:getJsonMessage,stateHandle:getConnState});
var audioBlob;
// 录音; 定义录音对象,wav格式
var rec = Recorder({
type:"pcm",
bitRate:16,
sampleRate:16000,
onProcess:recProcess
});
var sampleBuf=new Int16Array();
// 定义按钮响应事件
var btnStart = document.getElementById('btnStart');
btnStart.onclick = record;
var btnStop = document.getElementById('btnStop');
btnStop.onclick = stop;
btnStop.disabled = true;
btnStart.disabled = true;
btnConnect= document.getElementById('btnConnect');
btnConnect.onclick = start;
var awsslink= document.getElementById('wsslink');
var rec_text=""; // for online rec asr result
var offline_text=""; // for offline rec asr result
var info_div = document.getElementById('info_div');
var upfile = document.getElementById('upfile');
var isfilemode=false; // if it is in file mode
var file_ext="";
var file_sample_rate=16000; //for wav file sample rate
var file_data_array; // array to save file data
var totalsend=0;
var now_ipaddress=window.location.href;
now_ipaddress=now_ipaddress.replace("https://","wss://");
now_ipaddress=now_ipaddress.replace("static/index.html","");
var localport=window.location.port;
now_ipaddress=now_ipaddress.replace(localport,"10095");
document.getElementById('wssip').value=now_ipaddress;
addresschange();
function addresschange()
{
var Uri = document.getElementById('wssip').value;
document.getElementById('info_wslink').innerHTML="点此处手工授权IOS手机";
Uri=Uri.replace(/wss/g,"https");
console.log("addresschange uri=",Uri);
awsslink.onclick=function(){
window.open(Uri, '_blank');
}
}
upfile.onclick=function()
{
btnStart.disabled = true;
btnStop.disabled = true;
btnConnect.disabled=false;
}
// from https://github.com/xiangyuecn/Recorder/tree/master
var readWavInfo=function(bytes){
//读取wav文件头统一成44字节的头
if(bytes.byteLength<44){
return null;
};
var wavView=bytes;
var eq=function(p,s){
for(var i=0;i<s.length;i++){
if(wavView[p+i]!=s.charCodeAt(i)){
return false;
};
};
return true;
};
if(eq(0,"RIFF")&&eq(8,"WAVEfmt ")){
var numCh=wavView[22];
if(wavView[20]==1 && (numCh==1||numCh==2)){//raw pcm 单或双声道
var sampleRate=wavView[24]+(wavView[25]<<8)+(wavView[26]<<16)+(wavView[27]<<24);
var bitRate=wavView[34]+(wavView[35]<<8);
var heads=[wavView.subarray(0,12)],headSize=12;//head只保留必要的块
//搜索data块的位置
var dataPos=0; // 44 或有更多块
for(var i=12,iL=wavView.length-8;i<iL;){
if(wavView[i]==100&&wavView[i+1]==97&&wavView[i+2]==116&&wavView[i+3]==97){//eq(i,"data")
heads.push(wavView.subarray(i,i+8));
headSize+=8;
dataPos=i+8;break;
}
var i0=i;
i+=4;
i+=4+wavView[i]+(wavView[i+1]<<8)+(wavView[i+2]<<16)+(wavView[i+3]<<24);
if(i0==12){//fmt
heads.push(wavView.subarray(i0,i));
headSize+=i-i0;
}
}
if(dataPos){
var wavHead=new Uint8Array(headSize);
for(var i=0,n=0;i<heads.length;i++){
wavHead.set(heads[i],n);n+=heads[i].length;
}
return {
sampleRate:sampleRate
,bitRate:bitRate
,numChannels:numCh
,wavHead44:wavHead
,dataPos:dataPos
};
};
};
};
return null;
};
upfile.onchange = function () {
      var len = this.files.length;
for(let i = 0; i < len; i++) {
let fileAudio = new FileReader();
fileAudio.readAsArrayBuffer(this.files[i]);
file_ext=this.files[i].name.split('.').pop().toLowerCase();
var audioblob;
fileAudio.onload = function() {
audioblob = fileAudio.result;
file_data_array=audioblob;
info_div.innerHTML='请点击连接进行识别';
}
          fileAudio.onerror = function(e) {
            console.log('error' + e);
          }
}
// for wav file, we get the sample rate
if(file_ext=="wav")
for(let i = 0; i < len; i++) {
let fileAudio = new FileReader();
fileAudio.readAsArrayBuffer(this.files[i]);
fileAudio.onload = function() {
audioblob = new Uint8Array(fileAudio.result);
// for wav file, we can get the sample rate
var info=readWavInfo(audioblob);
console.log(info);
file_sample_rate=info.sampleRate;
}
      
}
}
function play_file()
{
var audioblob=new Blob( [ new Uint8Array(file_data_array)] , {type :"audio/wav"});
var audio_record = document.getElementById('audio_record');
audio_record.src = (window.URL||webkitURL).createObjectURL(audioblob);
audio_record.controls=true;
//audio_record.play(); //not auto play
}
function start_file_send()
{
sampleBuf=new Uint8Array( file_data_array );
var chunk_size=960; // for asr chunk_size [5, 10, 5]
while(sampleBuf.length>=chunk_size){
sendBuf=sampleBuf.slice(0,chunk_size);
totalsend=totalsend+sampleBuf.length;
sampleBuf=sampleBuf.slice(chunk_size,sampleBuf.length);
wsconnecter.wsSend(sendBuf);
}
stop();
}
function on_recoder_mode_change()
{
var item = null;
var obj = document.getElementsByName("recoder_mode");
for (var i = 0; i < obj.length; i++) { //遍历Radio
if (obj[i].checked) {
item = obj[i].value;
break;
}
}
if(item=="mic")
{
document.getElementById("mic_mode_div").style.display = 'block';
document.getElementById("rec_mode_div").style.display = 'none';
btnStart.disabled = true;
btnStop.disabled = true;
btnConnect.disabled=false;
isfilemode=false;
}
else
{
document.getElementById("mic_mode_div").style.display = 'none';
document.getElementById("rec_mode_div").style.display = 'block';
btnStart.disabled = true;
btnStop.disabled = true;
btnConnect.disabled=true;
isfilemode=true;
info_div.innerHTML='请点击选择文件';
}
}
function getHotwords(){
var obj = document.getElementById("varHot");
if(typeof(obj) == 'undefined' || obj==null || obj.value.length<=0){
return null;
}
let val = obj.value.toString();
console.log("hotwords="+val);
let items = val.split(/[(\r\n)\r\n]+/); //split by \r\n
var jsonresult = {};
const regexNum = /^[0-9]*$/; // test number
for (item of items) {
let result = item.split(" ");
if(result.length>=2 && regexNum.test(result[result.length-1]))
{
var wordstr="";
for(var i=0;i<result.length-1;i++)
wordstr=wordstr+result[i]+" ";
jsonresult[wordstr.trim()]= parseInt(result[result.length-1]);
}
}
console.log("jsonresult="+JSON.stringify(jsonresult));
return JSON.stringify(jsonresult);
}
function getAsrMode(){
var item = null;
var obj = document.getElementsByName("asr_mode");
for (var i = 0; i < obj.length; i++) { //遍历Radio
if (obj[i].checked) {
item = obj[i].value;
break;
}
}
if(isfilemode)
{
item= "offline";
}
console.log("asr mode"+item);
return item;
}
function handleWithTimestamp(tmptext,tmptime)
{
console.log( "tmptext: " + tmptext);
console.log( "tmptime: " + tmptime);
if(tmptime==null || tmptime=="undefined" || tmptext.length<=0)
{
return tmptext;
}
tmptext=tmptext.replace(/。|||、|\?|\.|\ /g, ","); // in case there are a lot of "。"
var words=tmptext.split(","); // split to chinese sentence or english words
var jsontime=JSON.parse(tmptime); //JSON.parse(tmptime.replace(/\]\]\[\[/g, "],[")); // in case there are a lot segments by VAD
var char_index=0; // index for timestamp
var text_withtime="";
for(var i=0;i<words.length;i++)
{
if(words[i]=="undefined" || words[i].length<=0)
{
continue;
}
console.log("words===",words[i]);
console.log( "words: " + words[i]+",time="+jsontime[char_index][0]/1000);
if (/^[a-zA-Z]+$/.test(words[i]))
{ // if it is english
text_withtime=text_withtime+jsontime[char_index][0]/1000+":"+words[i]+"\n";
char_index=char_index+1; //for english, timestamp unit is about a word
}
else{
// if it is chinese
text_withtime=text_withtime+jsontime[char_index][0]/1000+":"+words[i]+"\n";
char_index=char_index+words[i].length; //for chinese, timestamp unit is about a char
}
}
return text_withtime;
}
// 语音识别结果; 对jsonMsg数据解析,将识别结果附加到编辑框中
function getJsonMessage( jsonMsg ) {
//console.log(jsonMsg);
console.log( "message: " + JSON.parse(jsonMsg.data)['text'] );
var rectxt=""+JSON.parse(jsonMsg.data)['text'];
var asrmodel=JSON.parse(jsonMsg.data)['mode'];
var is_final=JSON.parse(jsonMsg.data)['is_final'];
var timestamp=JSON.parse(jsonMsg.data)['timestamp'];
if(asrmodel=="2pass-offline" || asrmodel=="offline")
{
offline_text=offline_text+handleWithTimestamp(rectxt,timestamp); //rectxt; //.replace(/ +/g,"");
rec_text=offline_text;
}
else
{
rec_text=rec_text+rectxt; //.replace(/ +/g,"");
}
var varArea=document.getElementById('varArea');
varArea.value=rec_text;
console.log( "offline_text: " + asrmodel+","+offline_text);
console.log( "rec_text: " + rec_text);
if (isfilemode==true && is_final==true){
console.log("call stop ws!");
play_file();
wsconnecter.wsStop();
info_div.innerHTML="请点击连接";
btnStart.disabled = true;
btnStop.disabled = true;
btnConnect.disabled=false;
}
}
// 连接状态响应
function getConnState( connState ) {
if ( connState === 0 ) { //on open
info_div.innerHTML='连接成功!请点击开始';
if (isfilemode==true){
info_div.innerHTML='请耐心等待,大文件等待时间更长';
start_file_send();
}
else
{
btnStart.disabled = false;
btnStop.disabled = true;
btnConnect.disabled=true;
}
} else if ( connState === 1 ) {
//stop();
} else if ( connState === 2 ) {
stop();
console.log( 'connecttion error' );
alert("连接地址"+document.getElementById('wssip').value+"失败,请检查asr地址和端口。或试试界面上手动授权再连接。");
btnStart.disabled = true;
btnStop.disabled = true;
btnConnect.disabled=false;
info_div.innerHTML='请点击连接';
}
}
function record()
{
rec.open( function(){
rec.start();
console.log("开始");
btnStart.disabled = true;
btnStop.disabled = false;
btnConnect.disabled=true;
});
}
// 识别启动、停止、清空操作
function start() {
// 清除显示
clear();
//控件状态更新
console.log("isfilemode"+isfilemode);
//启动连接
var ret=wsconnecter.wsStart();
// 1 is ok, 0 is error
if(ret==1){
info_div.innerHTML="正在连接asr服务器请等待...";
isRec = true;
btnStart.disabled = true;
btnStop.disabled = true;
btnConnect.disabled=true;
return 1;
}
else
{
info_div.innerHTML="请点击开始";
btnStart.disabled = true;
btnStop.disabled = true;
btnConnect.disabled=false;
return 0;
}
}
function stop() {
var chunk_size = new Array( 5, 10, 5 );
var request = {
"chunk_size": chunk_size,
"wav_name": "h5",
"is_speaking": false,
"chunk_interval":10,
"mode":getAsrMode(),
};
console.log(request);
if(sampleBuf.length>0){
wsconnecter.wsSend(sampleBuf);
console.log("sampleBuf.length"+sampleBuf.length);
sampleBuf=new Int16Array();
}
wsconnecter.wsSend( JSON.stringify(request) );
// 控件状态更新
isRec = false;
info_div.innerHTML="发送完数据,请等候,正在识别...";
if(isfilemode==false){
btnStop.disabled = true;
btnStart.disabled = true;
btnConnect.disabled=true;
//wait 3s for asr result
setTimeout(function(){
console.log("call stop ws!");
wsconnecter.wsStop();
btnConnect.disabled=false;
info_div.innerHTML="请点击连接";}, 3000 );
rec.stop(function(blob,duration){
console.log(blob);
var audioBlob = Recorder.pcm2wav(data = {sampleRate:16000, bitRate:16, blob:blob},
function(theblob,duration){
console.log(theblob);
var audio_record = document.getElementById('audio_record');
audio_record.src = (window.URL||webkitURL).createObjectURL(theblob);
audio_record.controls=true;
//audio_record.play();
} ,function(msg){
console.log(msg);
}
);
},function(errMsg){
console.log("errMsg: " + errMsg);
});
}
// 停止连接
}
function clear() {
var varArea=document.getElementById('varArea');
varArea.value="";
rec_text="";
offline_text="";
}
function recProcess( buffer, powerLevel, bufferDuration, bufferSampleRate,newBufferIdx,asyncEnd ) {
if ( isRec === true ) {
var data_48k = buffer[buffer.length-1];
var array_48k = new Array(data_48k);
var data_16k=Recorder.SampleData(array_48k,bufferSampleRate,16000).data;
sampleBuf = Int16Array.from([...sampleBuf, ...data_16k]);
var chunk_size=960; // for asr chunk_size [5, 10, 5]
info_div.innerHTML=""+bufferDuration/1000+"s";
while(sampleBuf.length>=chunk_size){
sendBuf=sampleBuf.slice(0,chunk_size);
sampleBuf=sampleBuf.slice(chunk_size,sampleBuf.length);
wsconnecter.wsSend(sendBuf);
}
}
}
function getUseITN() {
var obj = document.getElementsByName("use_itn");
for (var i = 0; i < obj.length; i++) {
if (obj[i].checked) {
return obj[i].value === "true";
}
}
return false;
}

View File

@@ -0,0 +1,96 @@
/*
pcm编码器+编码引擎
https://github.com/xiangyuecn/Recorder
编码原理本编码器输出的pcm格式数据其实就是Recorder中的buffers原始数据经过了重新采样16位时为LE小端模式Little Endian并未经过任何编码处理
编码的代码和wav.js区别不大pcm加上一个44字节wav头即成wav文件所以要播放pcm就很简单了直接转成wav文件来播放已提供转换函数 Recorder.pcm2wav
*/
(function(){
"use strict";
Recorder.prototype.enc_pcm={
stable:true
,testmsg:"pcm为未封装的原始音频数据pcm数据文件无法直接播放支持位数8位、16位填在比特率里面采样率取值无限制"
};
Recorder.prototype.pcm=function(res,True,False){
var This=this,set=This.set
,size=res.length
,bitRate=set.bitRate==8?8:16;
var buffer=new ArrayBuffer(size*(bitRate/8));
var data=new DataView(buffer);
var offset=0;
// 写入采样数据
if(bitRate==8) {
for(var i=0;i<size;i++,offset++) {
//16转8据说是雷霄骅的 https://blog.csdn.net/sevennight1989/article/details/85376149 细节比blqw的按比例的算法清晰点虽然都有明显杂音
var val=(res[i]>>8)+128;
data.setInt8(offset,val,true);
};
}else{
for (var i=0;i<size;i++,offset+=2){
data.setInt16(offset,res[i],true);
};
};
True(new Blob([data.buffer],{type:"audio/pcm"}));
};
/**pcm直接转码成wav可以直接用来播放需同时引入wav.js
data: {
sampleRate:16000 pcm的采样率
bitRate:16 pcm的位数 取值8 或 16
blob:blob对象
}
data如果直接提供的blob将默认使用16位16khz的配置仅用于测试
True(wavBlob,duration)
False(msg)
**/
Recorder.pcm2wav=function(data,True,False){
if(data.slice && data.type!=null){//Blob 测试用
data={blob:data};
};
var sampleRate=data.sampleRate||16000,bitRate=data.bitRate||16;
if(!data.sampleRate || !data.bitRate){
console.warn("pcm2wav必须提供sampleRate和bitRate");
};
if(!Recorder.prototype.wav){
False("pcm2wav必须先加载wav编码器wav.js");
return;
};
var reader=new FileReader();
reader.onloadend=function(){
var pcm;
if(bitRate==8){
//8位转成16位
var u8arr=new Uint8Array(reader.result);
pcm=new Int16Array(u8arr.length);
for(var j=0;j<u8arr.length;j++){
pcm[j]=(u8arr[j]-128)<<8;
};
}else{
pcm=new Int16Array(reader.result);
};
Recorder({
type:"wav"
,sampleRate:sampleRate
,bitRate:bitRate
}).mock(pcm,sampleRate).stop(function(wavBlob,duration){
True(wavBlob,duration);
},False);
};
reader.readAsArrayBuffer(data.blob);
};
})();

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,86 @@
/*
wav编码器+编码引擎
https://github.com/xiangyuecn/Recorder
当然最佳推荐使用mp3、wav格式代码也是优先照顾这两种格式
浏览器支持情况
https://developer.mozilla.org/en-US/docs/Web/HTML/Supported_media_formats
编码原理给pcm数据加上一个44直接的wav头即成wav文件pcm数据就是Recorder中的buffers原始数据重新采样16位时为LE小端模式Little Endian实质上是未经过任何编码处理
*/
(function(){
"use strict";
Recorder.prototype.enc_wav={
stable:true
,testmsg:"支持位数8位、16位填在比特率里面采样率取值无限制"
};
Recorder.prototype.wav=function(res,True,False){
var This=this,set=This.set
,size=res.length
,sampleRate=set.sampleRate
,bitRate=set.bitRate==8?8:16;
//编码数据 https://github.com/mattdiamond/Recorderjs https://www.cnblogs.com/blqw/p/3782420.html https://www.cnblogs.com/xiaoqi/p/6993912.html
var dataLength=size*(bitRate/8);
var buffer=new ArrayBuffer(44+dataLength);
var data=new DataView(buffer);
var offset=0;
var writeString=function(str){
for (var i=0;i<str.length;i++,offset++) {
data.setUint8(offset,str.charCodeAt(i));
};
};
var write16=function(v){
data.setUint16(offset,v,true);
offset+=2;
};
var write32=function(v){
data.setUint32(offset,v,true);
offset+=4;
};
/* RIFF identifier */
writeString('RIFF');
/* RIFF chunk length */
write32(36+dataLength);
/* RIFF type */
writeString('WAVE');
/* format chunk identifier */
writeString('fmt ');
/* format chunk length */
write32(16);
/* sample format (raw) */
write16(1);
/* channel count */
write16(1);
/* sample rate */
write32(sampleRate);
/* byte rate (sample rate * block align) */
write32(sampleRate*(bitRate/8));// *1 声道
/* block align (channel count * bytes per sample) */
write16(bitRate/8);// *1 声道
/* bits per sample */
write16(bitRate);
/* data chunk identifier */
writeString('data');
/* data chunk length */
write32(dataLength);
// 写入采样数据
if(bitRate==8) {
for(var i=0;i<size;i++,offset++) {
//16转8据说是雷霄骅的 https://blog.csdn.net/sevennight1989/article/details/85376149 细节比blqw的按比例的算法清晰点虽然都有明显杂音
var val=(res[i]>>8)+128;
data.setInt8(offset,val,true);
};
}else{
for (var i=0;i<size;i++,offset+=2){
data.setInt16(offset,res[i],true);
};
};
True(new Blob([data.buffer],{type:"audio/wav"}));
}
})();

View File

@@ -0,0 +1,119 @@
/**
* Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights
* Reserved. MIT License (https://opensource.org/licenses/MIT)
*/
/* 2021-2023 by zhaoming,mali aihealthx.com */
function WebSocketConnectMethod( config ) { //定义socket连接方法类
var speechSokt;
var connKeeperID;
var msgHandle = config.msgHandle;
var stateHandle = config.stateHandle;
this.wsStart = function () {
var Uri = document.getElementById('wssip').value; //"wss://111.205.137.58:5821/wss/" //设置wss asr online接口地址 如 wss://X.X.X.X:port/wss/
if(Uri.match(/wss:\S*|ws:\S*/))
{
console.log("Uri"+Uri);
}
else
{
alert("请检查wss地址正确性");
return 0;
}
if ( 'WebSocket' in window ) {
speechSokt = new WebSocket( Uri ); // 定义socket连接对象
speechSokt.onopen = function(e){onOpen(e);}; // 定义响应函数
speechSokt.onclose = function(e){
console.log("onclose ws!");
//speechSokt.close();
onClose(e);
};
speechSokt.onmessage = function(e){onMessage(e);};
speechSokt.onerror = function(e){onError(e);};
return 1;
}
else {
alert('当前浏览器不支持 WebSocket');
return 0;
}
};
// 定义停止与发送函数
this.wsStop = function () {
if(speechSokt != undefined) {
console.log("stop ws!");
speechSokt.close();
}
};
this.wsSend = function ( oneData ) {
if(speechSokt == undefined) return;
if ( speechSokt.readyState === 1 ) { // 0:CONNECTING, 1:OPEN, 2:CLOSING, 3:CLOSED
speechSokt.send( oneData );
}
};
// SOCEKT连接中的消息与状态响应
function onOpen( e ) {
// 发送json
var chunk_size = new Array( 5, 10, 5 );
var request = {
"chunk_size": chunk_size,
"wav_name": "h5",
"is_speaking": true,
"chunk_interval":10,
"itn":getUseITN(),
"mode":getAsrMode(),
};
if(isfilemode)
{
request.wav_format=file_ext;
if(file_ext=="wav")
{
request.wav_format="PCM";
request.audio_fs=file_sample_rate;
}
}
var hotwords=getHotwords();
if(hotwords!=null )
{
request.hotwords=hotwords;
}
console.log(JSON.stringify(request));
speechSokt.send(JSON.stringify(request));
console.log("连接成功");
stateHandle(0);
}
function onClose( e ) {
stateHandle(1);
}
function onMessage( e ) {
msgHandle( e );
}
function onError( e ) {
info_div.innerHTML="连接"+e;
console.log(e);
stateHandle(2);
}
}