
자동화 툴 - 공부해보기
https://www.youtube.com/watch?v=ywH7JIK34Tg
- yeji KimY
import tensorflow as tf
# Assuming `model` is your trained model
model.save('path/to/saved_model')
pip install flask tensorflow
from flask import Flask, request, jsonify
import tensorflow as tf
import numpy as np
import librosa
app = Flask(__name__)
# Load the saved model
model = tf.keras.models.load_model('path/to/saved_model')
def preprocess_audio(file_path):
# Load and preprocess the audio file
audio, sr = librosa.load(file_path, sr=16000)
# Assume the model expects input shape (None, 16000)
audio = librosa.util.fix_length(audio, 16000)
audio = np.expand_dims(audio, axis=0)
return audio
@app.route('/predict', methods=['POST'])
def predict():
if 'file' not in request.files:
return jsonify({"error": "No file provided"}), 400
file = request.files['file']
file_path = 'temp.wav'
file.save(file_path)
audio = preprocess_audio(file_path)
prediction = model.predict(audio)
transcription = decode_prediction(prediction)
return jsonify({"transcription": transcription})
def decode_prediction(prediction):
# Decode the model's output to text
return "decoded text"
if __name__ == '__main__':
app.run(debug=True)
dependencies:
flutter:
sdk: flutter
http: ^0.13.3
audioplayers: ^0.20.1
path_provider: ^2.0.1
import 'package:flutter/material.dart';
import 'package:http/http.dart' as http;
import 'package:path_provider/path_provider.dart';
import 'package:audioplayers/audioplayers.dart';
import 'dart:io';
void main() {
runApp(MyApp());
}
class MyApp extends StatelessWidget {
@override
Widget build(BuildContext context) {
return MaterialApp(
home: SpeechToTextScreen(),
);
}
}
class SpeechToTextScreen extends StatefulWidget {
@override
_SpeechToTextScreenState createState() => _SpeechToTextScreenState();
}
class _SpeechToTextScreenState extends State<SpeechToTextScreen> {
final AudioPlayer _audioPlayer = AudioPlayer();
bool _isRecording = false;
String _transcription = "";
Future<void> _recordAudio() async {
// Code to record audio using your preferred method
// Save the audio file to a temporary directory
Directory tempDir = await getTemporaryDirectory();
String tempPath = '${tempDir.path}/temp.wav';
// Start recording
// This is a placeholder. Use a suitable package to record audio in Flutter.
await _audioPlayer.startRecorder(toFile: tempPath);
setState(() {
_isRecording = true;
});
}
Future<void> _stopRecording() async {
// Stop recording
// This is a placeholder. Use a suitable package to stop recording in Flutter.
await _audioPlayer.stopRecorder();
setState(() {
_isRecording = false;
});
// Send the recorded file to the backend server
Directory tempDir = await getTemporaryDirectory();
String tempPath = '${tempDir.path}/temp.wav';
File audioFile = File(tempPath);
await _sendToServer(audioFile);
}
Future<void> _sendToServer(File audioFile) async {
var request = http.MultipartRequest(
'POST',
Uri.parse('http://your_server_ip:your_server_port/predict'),
);
request.files.add(await http.MultipartFile.fromPath('file', audioFile.path));
var response = await request.send();
if (response.statusCode == 200) {
var responseData = await response.stream.bytesToString();
setState(() {
_transcription = responseData; // Update this according to your server's response format
});
} else {
setState(() {
_transcription = "Error: ${response.statusCode}";
});
}
}
@override
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(title: Text("Speech to Text")),
body: Center(
child: Column(
mainAxisAlignment: MainAxisAlignment.center,
children: [
_isRecording
? ElevatedButton(
onPressed: _stopRecording,
child: Text("Stop Recording"),
)
: ElevatedButton(
onPressed: _recordAudio,
child: Text("Start Recording"),
),
SizedBox(height: 20),
Text(_transcription),
],
),
),
);
}
}