Java 读取麦克风的声音
package utils;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import javax.sound.sampled.AudioFileFormat;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.TargetDataLine;
public class MicrophoneSample {
public static void main(String args[]) {
MicrophoneSample engineeCore = new MicrophoneSample();
engineeCore.startRecognize();
}
private AudioFormat audioFormat;
public MicrophoneSample() {
float sampleRate = 16000; // 8000,11025,16000,22050,44100
int sampleSizeInBits = 16; // 8,16
int channels = 1; // 1,2
boolean signed = true; // true,false
boolean bigEndian = false; // true,false
audioFormat = new AudioFormat(sampleRate, sampleSizeInBits, channels, signed, bigEndian);
}
private TargetDataLine targetDataLine;
private boolean runFlag = true;
private void startRecognize() {
try {
// 获得指定的音频格式
DataLine.Info dataLineInfo = new DataLine.Info(TargetDataLine.class, audioFormat);
targetDataLine = (TargetDataLine) AudioSystem.getLine(dataLineInfo);
runFlag = true;
new CaptureThread().start();
} catch (Exception e) {
e.printStackTrace();
} // end catch
}// end captureAudio method
private void stopRecognize() {
runFlag = false;
targetDataLine.stop();
targetDataLine.close();
}
class CaptureThread extends Thread {
private File audioFile = new File("/home/jade/tmp/voice_cache.wav");
private int fgmtSize = 1024; // 帧大小
// private int fgmtTailIdx = fgmtSize - 1; // 帧尾坐标
private byte[] fragment = new byte[fgmtSize];
private AudioFileFormat.Type fileType = AudioFileFormat.Type.WAVE;
public void run() {
// // 声音录入的权值
// int weight = 2;
// // 判断是否停止的计数
// int downSum = 0;
long startTime = System.currentTimeMillis();
ByteArrayInputStream bais = null;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
AudioInputStream ais = null;
try {
targetDataLine.open(audioFormat);
targetDataLine.start();
ais = new AudioInputStream(targetDataLine);
while (runFlag) {
targetDataLine.read(fragment, 0, fgmtSize);
baos.write(fragment);
// 写死只录20秒的声音
if ((System.currentTimeMillis() - startTime) > 20_000) {
System.out.println("停止录入");
break;
}
// 另一种控制方式,如果超过20个循环没有声音,就停止录音
// 当数组末位大于weight时开始存储字节(有声音传入),一旦开始不再需要判断末位
// if (Math.abs(fragment[fragment.length - 1]) > weight || baos.size() > 0) {
// System.out.println("守卫:" + fragment[0] + ",末尾:" + //
// fragment[fgmtTailIdx] + ",lenght" + fragment.length);
// // 判断语音是否停止
// if (Math.abs(fragment[fgmtTailIdx]) <= weight) {
// downSum++;
// } else {
// System.out.println("重置奇数");
// downSum = 0;
// }
// // 计数超过20说明此段时间没有声音传入(值也可更改)
// if (downSum > 200) {
// System.out.println("停止录入");
// break;
// }
// }
}
// 取得录音输入流
byte audioData[] = baos.toByteArray();
bais = new ByteArrayInputStream(audioData);
ais = new AudioInputStream(bais, audioFormat, audioData.length / audioFormat.getFrameSize());
// 定义最终保存的文件名
System.out.println("开始生成语音文件");
AudioSystem.write(ais, fileType, audioFile);
// downSum = 0;
stopRecognize();
} catch (Exception e) {
e.printStackTrace();
} finally {
// 关闭流
try {
ais.close();
bais.close();
baos.reset();
} catch (IOException e) {
e.printStackTrace();
}
}
}// end run
}
}