+ {/* Download List Button */}
+
+ {/* Share Via Email */}
+
+
+
+
+
+ );
+}
+
+export default ShoppingList;
diff --git a/src/utils/speechRecognition.js b/src/utils/speechRecognition.js
new file mode 100644
index 00000000..abb7ac15
--- /dev/null
+++ b/src/utils/speechRecognition.js
@@ -0,0 +1,81 @@
+const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
+const SpeechGrammarList = window.SpeechGrammarList || window.webkitSpeechGrammarList;
+
+export const initializeSpeech = () => {
+ const recognition = new SpeechRecognition();
+ const speechRecognitionList = new SpeechGrammarList();
+ recognition.grammars = speechRecognitionList;
+ recognition.lang = 'en-US';
+ recognition.interimResults = false;
+ recognition.maxAlternatives = 1;
+ return recognition;
+}
+
+export const startRecognition = (recognition, onStart, onResult, onEnd) => {
+
+ // To ensure case consistency while checking with the returned output text
+ recognition.start();
+ onStart();
+
+ recognition.onresult = function(event) {
+ // The SpeechRecognitionEvent results property returns a SpeechRecognitionResultList object
+ // The SpeechRecognitionResultList object contains SpeechRecognitionResult objects.
+ // It has a getter so it can be accessed like an array
+ // The first [0] returns the SpeechRecognitionResult at position 0.
+ // Each SpeechRecognitionResult object contains SpeechRecognitionAlternative objects that contain individual results.
+ // These also have getters so they can be accessed like arrays.
+ // The second [0] returns the SpeechRecognitionAlternative at position 0.
+ // We then return the transcript property of the SpeechRecognitionAlternative object
+ const speechResult = event.results[0][0].transcript.toLowerCase();
+ onResult(speechResult);
+ console.log('Confidence: ' + event.results[0][0].confidence);
+ }
+
+ recognition.onspeechend = function() {
+ recognition.stop();
+ onEnd();
+ }
+
+ recognition.onerror = function(event) {
+ console.log('Error occurred in recognition: ' + event.error);
+ }
+
+ recognition.onaudiostart = function(event) {
+ //Fired when the user agent has started to capture audio.
+ console.log('Listening');
+ }
+
+ recognition.onaudioend = function(event) {
+ //Fired when the user agent has finished capturing audio.
+ console.log('Done listening');
+ }
+
+ recognition.onend = function(event) {
+ //Fired when the speech recognition service has disconnected.
+ console.log('SpeechRecognition.onend');
+ }
+
+ recognition.onnomatch = function(event) {
+ //Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold.
+ console.log('Please try again.');
+ }
+
+ recognition.onsoundstart = function(event) {
+ //Fired when any sound — recognisable speech or not — has been detected.
+ console.log('SpeechRecognition.onsoundstart');
+ }
+
+ recognition.onsoundend = function(event) {
+ //Fired when any sound — recognisable speech or not — has stopped being detected.
+ console.log('SpeechRecognition.onsoundend');
+ }
+
+ recognition.onspeechstart = function (event) {
+ //Fired when sound that is recognised by the speech recognition service as speech has been detected.
+ console.log('SpeechRecognition.onspeechstart');
+ }
+ recognition.onstart = function(event) {
+ //Fired when the speech recognition service has begun listening to incoming audio with intent to recognize grammars associated with the current SpeechRecognition.
+ console.log('SpeechRecognition.onstart');
+ }
+}
\ No newline at end of file
diff --git a/src/utils/utils.js b/src/utils/utils.js
new file mode 100644
index 00000000..79ce59a6
--- /dev/null
+++ b/src/utils/utils.js
@@ -0,0 +1,4 @@
+// save data to local storage
+export const saveToLocalStorage = (key, value) => {
+ localStorage.setItem(key, JSON.stringify(value));
+};
\ No newline at end of file