diff --git a/src/utils/AudioCompare.js b/src/utils/AudioCompare.js index 317eba82..8549ea09 100644 --- a/src/utils/AudioCompare.js +++ b/src/utils/AudioCompare.js @@ -7,9 +7,7 @@ import playButton from "../../src/assets/listen.png"; import pauseButton from "../../src/assets/pause.png"; const AudioRecorder = (props) => { - const [isRecording, setIsRecording] = useState(false); const [status, setStatus] = useState(""); - const [audioBlob, setAudioBlob] = useState(null); const recorderRef = useRef(null); const mediaStreamRef = useRef(null); @@ -45,22 +43,68 @@ const AudioRecorder = (props) => { }); recorderRef.current.startRecording(); - - setIsRecording(true); } catch (err) { console.error("Failed to start recording:", err); } }; - const stopRecording = () => { + const analyzeAudio = async (blob) => { + try { + const audioContext = new (window.AudioContext || + window.webkitAudioContext)(); + const arrayBuffer = await blob.arrayBuffer(); + const audioBuffer = await audioContext.decodeAudioData(arrayBuffer); + + const rawData = audioBuffer.getChannelData(0); // Get audio samples from the first channel + let total = 0; + + // Sum the absolute values of the audio samples + for (let i = 0; i < rawData.length; i++) { + total += Math.abs(rawData[i]); + } + + const average = total / rawData.length; + + // Threshold for silence detection + const silenceThreshold = 0.01; + + if (average < silenceThreshold) { + console.log("The audio contains only silence."); + props.setOpenMessageDialog({ + message: + "Sorry I couldn't hear a voice. Could you please speak again?", + dontShowHeader: true, + }); + return true; + } else { + console.log("The audio contains sound."); + return false; + } + } catch (error) { + console.error("Error analyzing audio:", error); + return true; + } + }; + + const stopRecording = async () => { setStatus("inactive"); if (recorderRef.current) { - recorderRef.current.stopRecording(() => { + recorderRef.current.stopRecording(async () => { const blob = recorderRef.current.getBlob(); if (blob) { - setAudioBlob(blob); - saveBlob(blob); // Persist the blob + const isSilent = await analyzeAudio(blob); + + if (!isSilent) { + saveBlob(blob); + if (props.setEnableNext) { + props.setEnableNext(true); + } + } else { + console.log( + "The recorded audio is empty or silent. Please try again." + ); + } } else { console.error("Failed to retrieve audio blob."); } @@ -69,13 +113,8 @@ const AudioRecorder = (props) => { if (mediaStreamRef.current) { mediaStreamRef.current.getTracks().forEach((track) => track.stop()); } - - setIsRecording(false); }); } - if (props.setEnableNext) { - props.setEnableNext(true); - } }; const saveBlob = (blob) => { diff --git a/src/utils/VoiceAnalyser.js b/src/utils/VoiceAnalyser.js index aef73a3f..2acdeada 100644 --- a/src/utils/VoiceAnalyser.js +++ b/src/utils/VoiceAnalyser.js @@ -702,7 +702,7 @@ VoiceAnalyser.propTypes = { setIsNextButtonCalled: PropTypes.func, handleNext: PropTypes.func.isRequired, originalText: PropTypes.string, - isShowCase: PropTypes.bool.isRequired, + isShowCase: PropTypes.bool, dontShowListen: PropTypes.bool, setEnableNext: PropTypes.func.isRequired, showOnlyListen: PropTypes.bool,