I'm using the Google Speech To Text API from the googleapis package. But I didn't found any documentation(For dart & flutter) explaining how to use a local audio file present in the application assets folder as audio data while sending a RecognizeRequest.fromJson. I want to know how I can use a local file in place of audio content in _json in code. Thanks in advance.
final httpClient = await clientViaServiceAccount(_credentials, _scopes);
try {
final speech2Text = SpeechApi(httpClient);
final _json = {
"config": {
"encoding": "FLAC",
"sampleRateHertz": 16000,
"languageCode": "en-US",
"enableWordTimeOffsets": false
},
"audio": {"uri": "gs://cloud-samples-tests/speech/brooklyn.flac"}
};
final _recognizeRequest = RecognizeRequest.fromJson(_json);
await speech2Text.speech.recognize(_recognizeRequest).then((response) {
for (var result in response.results) {
print(result.toJson());
}
});
} finally {
httpClient.close();
}
}
I finally managed to do it by looking at the example of this google_speech package.
assets:
- assets/brooklyn.flac
Future<void> _copyFileFromAssets(String name) async {
var data = await rootBundle.load('assets/$name');
final directory = await getApplicationDocumentsDirectory();
final path = directory.path + '/$name';
await File(path).writeAsBytes(
data.buffer.asUint8List(data.offsetInBytes, data.lengthInBytes));
}
Future<List<int>> _getAudioContent(String name) async {
final directory = await getApplicationDocumentsDirectory();
final path = directory.path + '/$name';
if (!File(path).existsSync()) {
await _copyFileFromAssets(name);
}
return File(path).readAsBytesSync().toList();
}
final audio = await _getAudioContent('brooklyn.flac');
String audio64 = base64Encode(audio);
final _json = {
"config": {
"encoding": "FLAC",
"sampleRateHertz": 16000,
"languageCode": "en-US",
"enableWordTimeOffsets": false
},
// "audio": {"uri": "gs://cloud-samples-tests/speech/brooklyn.flac"}
"audio": {"content": audio64},
};
I hope this helps someone with a similar issue.