Electron音视频相关

获取设备

1
2
3
let devices = await navigator
.mediaDevices
.enumerateDevices()

获取到数组的对象格式如下

1
2
3
4
5
6
{
deviceId: "default",
groupId: "052c3cdc7b40ad499d3378c99f07e928988364dde49df6264ec9833620c63c92",
kind: "audioinput",
label: "Default - 麦克风 (HECATE G30 GAMING HEADSET) (2d99:0026)"
}

其中kind有以下几种类型

  • videoinput 视频输入 (摄像头)
  • audioinput 音频输入 (麦克风)
  • audiooutput 音频输出 (扬声器)

其中deviceId是设备的id,有以下几种值

  • default 默认的设备(只有一个)
  • communications 通讯中的设备(只有一个)
  • id 设备的id 会和前面的默认设备重复

其中groupId代表同一个设备

比如我的耳机既能听声音又有麦克风,那么获取到的音频输入和音频输出设备的groupId就会是一样的。

其中label是设备的名称

注意的是默认设备和通讯设备会在名称前拼接了Default或者Communications并用-分隔

获取名称的方式

1
2
3
4
5
6
7
8
9
10
11
let devices = await navigator.mediaDevices
.enumerateDevices()
for (let device of devices) {
if (
device.deviceId !== "default"
&& device.deviceId !== "communications"
) {
let label = device.label;
console.info(label);
}
}

摄像头

1
2
3
let devices = await navigator.mediaDevices
.enumerateDevices()
.then((devices) => devices.filter((d) => d.kind === "videoinput"));

麦克风

1
2
3
let devices = await navigator.mediaDevices
.enumerateDevices()
.then((devices) => devices.filter((d) => d.kind === "audioinput"));

获取音视频流

基本语法

1
2
3
4
5
6
7
8
9
10
11
navigator
.mediaDevices
.getUserMedia(constraints)
.then(
function(stream) {
/* 使用这个stream stream */
})
.catch(
function(err) {
/* 处理error */
});

constraints 参数是一个包含了videoaudio两个成员的MediaStreamConstraints 对象,用于说明请求的媒体类型。必须至少一个类型或者两个同时可以被指定。如果浏览器无法找到指定的媒体类型或者无法满足相对应的参数要求,那么返回的Promise对象就会处于rejected[失败]状态,NotFoundError作为rejected[失败]回调的参数。

其中约束条件constraints可以设置以下的值

同时请求不带任何参数的音频和视频:

1
2
3
4
{ 
audio: true,
video: true
}

当由于隐私保护的原因,无法访问用户的摄像头和麦克风信息时,应用可以使用额外的constraints参数请求它所需要或者想要的摄像头和麦克风能力。下面演示了应用想要使用1280x720的摄像头分辨率:

1
2
3
4
{
audio: true,
video: { width: 1280, height: 720 }
}

匹配最佳摄像头或理想值:当请求包含一个ideal(应用最理想的)值时,这个值有着更高的权重,意味着浏览器会先尝试找到最接近指定的理想值的设定或者摄像头(如果设备拥有不止一个摄像头)。

1
2
3
4
5
6
7
{
audio: true,
video: {
width: { min: 1024, ideal: 1280, max: 1920 },
height: { min: 776, ideal: 720, max: 1080 }
}
}

并不是所有的constraints 都是数字。例如, 在移动设备上面,如下的例子表示优先使用前置摄像头(如果有的话):

1
2
3
4
{ 
audio: true,
video: { facingMode: "user" }
}

强制使用后置摄像头,请用:

1
2
3
4
5
6
{ 
audio: true,
video: {
facingMode: { exact: "environment" }
}
}

在某些情况下,比如WebRTC上使用受限带宽传输时,低帧率可能更适宜。

1
2
3
4
5
{ 
video: {
frameRate: { ideal: 10, max: 15 }
}
}

固定的摄像头

1
2
3
4
5
6
{
video: {
deviceId: video.deviceId,
groupId: video.groupId,
}
}

固定麦克风

1
2
3
4
5
6
{
audio: {
deviceId: video.deviceId,
groupId: video.groupId,
}
}

关闭摄像头

1
2
3
4
5
if (window.mystream) {
window.mystream.getTracks().forEach(track => {
track.stop();
});
}

获取摄像头的流

1
2
3
4
5
6
7
8
9
10
11
let device_index = this.device_index;
let devices = await navigator.mediaDevices
.enumerateDevices()
.then((devices) => devices.filter((d) => d.kind === "videoinput"));
let video = devices[device_index];
navigator.mediaDevices
.getUserMedia({video})
.then(function (localStream) {
window.mystream = localStream;
that.$refs["camera_video"].srcObject = localStream;
})

切换摄像头

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
async change_camera_click() {
let that = this;
if (window.mystream) {
window.mystream.getTracks().forEach(track => {
track.stop();
});
}
let device_index = this.device_index;
let devices = await navigator.mediaDevices
.enumerateDevices()
.then((devices) => devices.filter((d) => d.kind === "videoinput"));

if (devices.length > 1) {
if (device_index === 0) {
device_index = devices.length - 1;
} else {
device_index = 0;
}
that.device_index = device_index;
} else if (devices.length === 1) {
that.device_index = 0;
device_index = 0;
}
this.devices = devices;
if (devices.length > 0) {
let video = devices[device_index];
navigator.mediaDevices
.getUserMedia(
{
video: {
deviceId: video.deviceId,
groupId: video.groupId,
}
})
.then(
function (localStream) {
window.mystream = localStream;
that.$refs["camera_video"].srcObject = localStream;
that.show_camera_div = true;
})
.catch(function (e) {
console.info(e);
});
} else {
that.show_camera_div = false;
}
},

获取桌面的流

1
2
3
4
5
6
7
8
9
10
let stream = await navigator.mediaDevices.getUserMedia({
audio: false,
video: {
mandatory: {
chromeMediaSource: "desktop",
maxWidth: window.screen.width,
maxHeight: window.screen.height,
},
},
});

获取主桌面的流

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
let sources = await desktopCapturer.getSources({types: ['screen']})
let source = sources[0];
navigator.getUserMedia(
{
audio: false,
video: {
mandatory: {
chromeMediaSource: "desktop",
chromeMediaSourceId: source.id,
maxWidth: display.bounds.width * scaleFactor,
maxHeight: display.bounds.height * scaleFactor,
},
},
},
handleStream,
handleError
)

Electron获取窗口的流

下面的示例演示如何从标题为 Electron 的桌面窗口捕获视频:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
// In the renderer process.
const { desktopCapturer } = require('electron')

desktopCapturer.getSources({ types: ['window', 'screen'] }).then(async sources => {
for (const source of sources) {
if (source.name === 'Electron') {
try {
const stream = await navigator.mediaDevices.getUserMedia({
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: source.id,
minWidth: 1280,
maxWidth: 1280,
minHeight: 720,
maxHeight: 720
}
}
})
handleStream(stream)
} catch (e) {
handleError(e)
}
return
}
}
})

function handleStream (stream) {
const video = document.querySelector('video')
video.srcObject = stream
video.onloadedmetadata = (e) => video.play()
}

function handleError (e) {
console.log(e)
}

Electron获取截图

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
const desktopCapturer = electron.desktopCapturer;
const {screen} = window.require("electron").remote;

jieping1() {
let that = this;
let display = screen.getPrimaryDisplay();
let scaleFactor = display.scaleFactor;

desktopCapturer
.getSources(
{
types: ["screen"],
thumbnailSize: {
width: display.bounds.width * scaleFactor,
height: display.bounds.height * scaleFactor,
},
})
.then(
async (sources) => {
let selectSource = sources[0];
let imageurl = selectSource.thumbnail.toDataURL("image/png");
window.myevent.$emit("jiangping_image", imageurl);
});
},

desktopCapturer.getSources 会导致整个程序挂起,挂起时间与屏幕分辨率、屏幕数量和电脑性能有关。 在自用的 Macbook Pro 外接2K 显示器的情况下截图可以卡住2秒以上,而且鼠标还会出现等待的样式,这个体验是相当差了

Electron截图方式二

这种方式优化了创建截图的时间

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
jieping2() {
document.body.style.cursor = "text";
let that = this;
let display = screen.getPrimaryDisplay();
let scaleFactor = display.scaleFactor;
const handleStream = (stream) => {
let video = document.createElement('video')
video.autoplay = true;
video.style.cssText = 'position:absolute;top:-100000px;left:-100000px;'

let loaded = false
video.onloadedmetadata = () => {
if (loaded) {
return
}
loaded = true
console.info("video.videoHeight", video.videoHeight)
console.info("video.videoWidth", video.videoWidth)
// Set video ORIGINAL height (screenshot)
video.style.height = video.videoHeight + 'px' // videoHeight
video.style.width = video.videoHeight + 'px' // videoWidth

// Create canvas
let canvas = document.createElement('canvas')
canvas.width = video.videoWidth
canvas.height = video.videoHeight
let ctx = canvas.getContext('2d')
// Draw video on canvas
ctx.drawImage(video, 0, 0, canvas.width, canvas.height)
video.remove()
document.body.style.cursor = "default";
try {
stream.getTracks()[0].stop()
} catch (e) {
}
let imageurl = canvas.toDataURL('image/png')
window.myevent.$emit("jiangping_image", imageurl);
that.show_jiangping();
}
video.srcObject = stream
document.body.appendChild(video)
}

const handleError = (error) => {
console.info(error)
console.info("截屏失败!")
}
console.info("window.screen.width", window.screen.width)
navigator.getUserMedia(
{
audio: false,
video: {
mandatory: {
chromeMediaSource: "desktop",
maxWidth: display.bounds.width * scaleFactor,
maxHeight: display.bounds.height * scaleFactor,
},
},
},
handleStream,
handleError
)
},

麦克风

获取麦克风设备

1
2
3
let devices = await navigator.mediaDevices
.enumerateDevices()
.then((devices) => devices.filter((d) => d.kind === "audioinput"));

获取声轨

1
2
3
4
5
6
7
let voiceStream = await navigator
.mediaDevices
.getUserMedia(
{video: false, audio: true}
);
let audioTracks = voiceStream.getAudioTracks();
console.info(audioTracks)

播放声音

只做测试使用,一台设备上即录音也播音会产生啸叫

1
2
3
4
5
let video = document.createElement('video')
video.autoplay = true;
video.style.cssText = 'position:absolute;top:-100000px;left:-100000px;'
video.srcObject = voiceStream
document.body.appendChild(video)

获取麦克风声音大小

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
async showAudio() {
let audioStream = await navigator
.mediaDevices
.getUserMedia(
{video: false, audio: true}
);

let AudioContext = window.AudioContext || window.webkitAudioContext, // 兼容性
context = new AudioContext(), // 创建Audio上下文
media = context.createMediaStreamSource(audioStream), // 从元素创建媒体节点
processor = context.createScriptProcessor(
0,
1,
1
);

/*
media → processor → destination
*/
media.connect(processor)
processor.connect(context.destination);

//Canvas绘制
let canvas = document.createElement('canvas')
canvas.style.cssText = 'position:absolute;top:0px;left:0px;width:200px;height:100px;background:#ffffff;'
document.body.appendChild(canvas)
let width = canvas.width,
height = canvas.height,
g = canvas.getContext("2d");
g.translate(0, height / 2);

//控制节点的过程处理
processor.onaudioprocess = function (e) {
//获取输入和输出的数据缓冲区
let input = e.inputBuffer.getChannelData(0);
//将缓冲区的数据绘制到Canvas上
g.clearRect(0, -height / 2, width, height);
g.beginPath();
for (let i = 0; i < width; i++) {
g.lineTo(i, height / 2 * input[input.length * i / width | 0]);
}
g.stroke();
// let output = e.outputBuffer.getChannelData(0);
//将输入数缓冲复制到输出缓冲上
// for (let i = 0; i < input.length; i++) {
// output[i] = input[i];
// }
};
},

获取麦克风声音大小修改单声道的大小

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
async showAudio() {
let audioStream = await navigator
.mediaDevices
.getUserMedia(
{video: false, audio: true}
);

let AudioContext = window.AudioContext || window.webkitAudioContext, // 兼容性
context = new AudioContext(), // 创建Audio上下文
media = context.createMediaStreamSource(audioStream), // 从元素创建媒体节点
lGain = context.createGain(), // 左声道
rGain = context.createGain(), // 右声道
splitter = context.createChannelSplitter(2), // 分离器
merger = context.createChannelMerger(2), // 合成器
processor = context.createScriptProcessor(
0,
1,
1
);

/*
→ lGain
media → splitter → merger → processor → destination
→ rGain
*/
lGain.gain.value = 1;
rGain.gain.value = 1;
media.connect(splitter);
splitter.connect(lGain, 0);
splitter.connect(rGain, 1);
lGain.connect(merger, 0, 0);
rGain.connect(merger, 0, 1);
merger.connect(processor);
processor.connect(context.destination);

//Canvas绘制
let canvas = document.createElement('canvas')
canvas.style.cssText = 'position:absolute;top:0px;left:0px;width:200px;height:100px;background:#ffffff;'
document.body.appendChild(canvas)
let width = canvas.width,
height = canvas.height,
g = canvas.getContext("2d");
g.translate(0, height / 2);

//控制节点的过程处理
processor.onaudioprocess = function (e) {
//获取输入和输出的数据缓冲区
let input = e.inputBuffer.getChannelData(0);
//将缓冲区的数据绘制到Canvas上
g.clearRect(0, -height / 2, width, height);
g.beginPath();
for (let i = 0; i < width; i++) {
g.lineTo(i, height / 2 * input[input.length * i / width | 0]);
}
g.stroke();
// let output = e.outputBuffer.getChannelData(0);
//将输入数缓冲复制到输出缓冲上
// for (let i = 0; i < input.length; i++) {
// output[i] = input[i];
// }
};
},

如果缓冲的长度小于像素的宽度,我们最好用换缓冲的长度绘制

1
2
3
4
5
6
7
8
g.clearRect(0, -height / 2, width, height);
g.beginPath();
console.info("input.length",input.length);
for (let i = 0; i < input.length; i++) {
let x = width / input.length * i;
g.lineTo(x, height / 2 * input[i | 0]);
}
g.stroke();

注意

上面注释的部分取消就会播放出声音

加载设备的音频流

1
context.createMediaStreamSource(audioStream)

如果要加载本地音频文件

1
2
let audio = new Audio( '茜拉 - 想你的夜.mp3' );
let media = context.createMediaElementSource( audio ); // 从元素创建媒体节点

设置声道

1
2
3
4
5
6
7
8
9
let vol = 100,// 音量
lVol = 100,// 左声道
rVol = 100; // 右声道
// 声道控制
function setVolumeLR() {
lVol = 70;
rVol = 80;
setVolume()
}

设置音量

1
2
3
4
5
// 设置音量
function setVolume() {
lGain.gain.value = lVol / 100 * vol / 100;
rGain.gain.value = rVol / 100 * vol / 100;
}

音视频合并

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
let stream = await navigator.mediaDevices.getUserMedia({
audio: false,
video: {
mandatory: {
chromeMediaSource: "desktop",
maxWidth: window.screen.width,
maxHeight: window.screen.height,
},
},
});
let audioStream = await navigator.mediaDevices.getUserMedia({
audio: true,
video: false,
});
let audioTracks = audioStream.getAudioTracks()[0];
stream.addTrack(audioTracks);