I'm trying to create a nextjs app that uses media recorder api to record screen, screen audio and user microphone, but I have the problem with the audio (user voice can't be recorded clearly, sometimes can't be recorded), i show my code here and I hope someone can help me, thank you all!
'use client'
import React, { useState, useRef, useCallback, useEffect } from 'react';
const ScreenRecorder: React.FC = () => {
const [recording, setRecording] = useState<boolean>(false);
const [mediaUrl, setMediaUrl] = useState<string>();
const [mediaRecorder, setMediaRecorder] = useState<MediaRecorder | null>(null);
const [uStream, setUStream] = useState<MediaStream>()
const [mStream, setMStream] = useState<MediaStream>()
const startRecording = useCallback(async () => {
let mediaParts: any[] = [];
try {
const stream = await navigator.mediaDevices.getDisplayMedia({
video: true,
audio: true,
});
setMStream(stream)
const userStream = await navigator.mediaDevices.getUserMedia({
audio: true,
});
setUStream(userStream)
const audioContext = new window.AudioContext()
const micSource = audioContext.createMediaStreamSource(userStream)
const speakerSource = audioContext.createMediaStreamSource(stream);
const destination = audioContext.createMediaStreamDestination();
micSource.connect(destination);
speakerSource.connect(destination)
const combinedStream = new MediaStream([
...destination.stream.getAudioTracks(),
...stream.getVideoTracks(),
]);
const recorder = new MediaRecorder(combinedStream);
setMediaRecorder(recorder);
recorder.start();
setRecording(true);
recorder.onstop = () => {
const blob = new Blob(mediaParts, { type: 'video/mp4' });
const url = URL.createObjectURL(blob);
setMediaUrl(url)
console.log('On Stop')
recorder.stop()
};
recorder.addEventListener('dataavailable', (e) => {
var data = e.data
if (data && data.size > 0) {
mediaParts.push(data);
}
})
} catch (error) {
console.error('Error starting recording:', error);
}
}, []);
const stopRecording = useCallback(() => {
if (mediaRecorder) {
console.log('Click to Stop')
mediaRecorder.stop()
uStream!.getTracks().forEach(track => track.stop())
mStream!.getTracks().forEach(track => track.stop())
setRecording(false);
}
}, [mediaRecorder, uStream, mStream]);
return (
<div>
<h1>State: {mediaRecorder?.state}</h1>
{!recording && (
<button onClick={startRecording}>Start Recording</button>
)}
{recording && (
<button onClick={stopRecording}>Stop Recording</button>
)}
<video src={mediaUrl} controls autoPlay/>
</div>
);
};
export default ScreenRecorder;
for the first time I didn't use audioContext but just combined tracks but I only recorded the screen and user voice (didn't get the screen audio)
We are glad to have you as a member of our group! One possible problem when you integrate the MediaRecorder API into your Next application is that it might not be able to capture sound from the user’s microphone as well as capturing everything on the display (screen). This may depend on the browser care for that. Also please check permissions always.
Here’s how you can do it:
'use client'
import React, { useState, useCallback } from 'react';
const ScreenRecorder: React.FC = () => {
const [recording, setRecording] = useState<boolean>(false);
const [mediaUrl, setMediaUrl] = useState<string>();
const [mediaRecorder, setMediaRecorder] = useState<MediaRecorder | null>(null);
const [uStream, setUStream] = useState<MediaStream | null>(null);
const [mStream, setMStream] = useState<MediaStream | null>(null);
const [audioContext] = useState(() => new window.AudioContext());
const startRecording = useCallback(async () => {
let mediaParts: any[] = [];
try {
const displayStream = await navigator.mediaDevices.getDisplayMedia({
video: true,
audio: true,
});
setMStream(displayStream);
const userStream = await navigator.mediaDevices.getUserMedia({
audio: true,
});
setUStream(userStream);
const micSource = audioContext.createMediaStreamSource(userStream);
const speakerSource = audioContext.createMediaStreamSource(displayStream);
const destination = audioContext.createMediaStreamDestination();
micSource.connect(destination);
speakerSource.connect(destination);
const combinedStream = new MediaStream([
...destination.stream.getAudioTracks(),
...displayStream.getVideoTracks(),
]);
const recorder = new MediaRecorder(combinedStream);
setMediaRecorder(recorder);
recorder.ondataavailable = (event) => {
if (event.data && event.data.size > 0) {
mediaParts.push(event.data);
}
};
recorder.onstop = () => {
const blob = new Blob(mediaParts, { type: 'video/mp4' });
const url = URL.createObjectURL(blob);
setMediaUrl(url);
};
recorder.start();
setRecording(true);
} catch (error) {
console.error('Error starting recording:', error);
}
}, [audioContext]);
const stopRecording = useCallback(() => {
if (mediaRecorder) {
mediaRecorder.stop();
uStream?.getTracks().forEach(track => track.stop());
mStream?.getTracks().forEach(track => track.stop());
setRecording(false);
}
}, [mediaRecorder, uStream, mStream]);
return (
<div>
<h1>State: {mediaRecorder?.state}</h1>
{!recording && (
<button onClick={startRecording}>Start Recording</button>
)}
{recording && (
<button onClick={stopRecording}>Stop Recording</button>
)}
{mediaUrl && (
<video src={mediaUrl} controls autoPlay />
)}
</div>
);
};
export default ScreenRecorder;
Feel free to reach out if you have any more questions or need further help. Good luck with your project!