fix(audio): prime Safari mic graph; use raw PCM fallback; stabilize teardown

Route mic source through analyser into a silent sink to force processing on Safari/WebKit.

Add ScriptProcessorNode raw-PCM capture path for reliable RMS/entropy sampling.

Clamp/scale levels to avoid Infinity/opacity warnings and improve UI thresholds.

Deduplicate/guard AudioContext shutdown to prevent “Cannot close a closed AudioContext”.

Improve logging around context state, pipeline init, and capture source selection.
This commit is contained in:
LC mac
2026-02-11 00:32:42 +08:00
parent f52186f2e7
commit 20cf558e83
2 changed files with 717 additions and 0 deletions

View File

@@ -33,6 +33,7 @@ import CameraEntropy from './components/CameraEntropy';
import DiceEntropy from './components/DiceEntropy';
import { InteractionEntropy } from './lib/interactionEntropy';
import AudioEntropy from './AudioEntropy';
console.log("OpenPGP.js version:", openpgp.config.versionString);
interface StorageItem {
@@ -736,6 +737,17 @@ function App() {
</div>
)}
{/* Audio Entropy Component */}
{entropySource === 'audio' && !generatedSeed && (
<AudioEntropy
key={`audio-${resetCounter}`}
wordCount={seedWordCount}
onEntropyGenerated={handleEntropyGenerated}
onCancel={() => setEntropySource(null)}
interactionEntropy={interactionEntropyRef.current}
/>
)}
{/* Generated Seed Display + Destination Selector */}
{generatedSeed && (
<div className="space-y-4">

705
src/AudioEntropy.tsx Normal file
View File

@@ -0,0 +1,705 @@
import React, { useState, useRef, useEffect } from 'react';
import { Mic, X, CheckCircle2 } from 'lucide-react';
import { InteractionEntropy } from './lib/interactionEntropy';
interface AudioStats {
sampleRate: number;
duration: number;
peakAmplitude: number;
rmsAmplitude: number;
zeroCrossings: number;
frequencyBands: number[];
spectralEntropy: number;
interactionSamples: number;
totalBits: number;
}
interface AudioEntropyProps {
wordCount: 12 | 24;
onEntropyGenerated: (mnemonic: string, stats: AudioStats) => void;
onCancel: () => void;
interactionEntropy: InteractionEntropy;
}
const AudioEntropy: React.FC<AudioEntropyProps> = ({
wordCount,
onEntropyGenerated,
onCancel,
interactionEntropy
}) => {
const [step, setStep] = useState<'permission' | 'capture' | 'processing' | 'stats'>('permission');
const [stream, setStream] = useState<MediaStream | null>(null);
const [audioLevel, setAudioLevel] = useState(0);
const [captureEnabled, setCaptureEnabled] = useState(false);
const [stats, setStats] = useState<AudioStats | null>(null);
const [generatedMnemonic, setGeneratedMnemonic] = useState('');
const [error, setError] = useState('');
const [captureProgress, setCaptureProgress] = useState(0);
const audioContextRef = useRef<AudioContext | null>(null);
const analyserRef = useRef<AnalyserNode | null>(null);
const animationRef = useRef<number>();
const audioDataRef = useRef<Float32Array[]>([]);
const audioLevelLoggedRef = useRef(false);
const scriptProcessorRef = useRef<ScriptProcessorNode | null>(null);
const rawAudioDataRef = useRef<Float32Array[]>([]);
const frameCounterRef = useRef(0);
const teardownAudio = async () => {
if (animationRef.current) {
cancelAnimationFrame(animationRef.current);
animationRef.current = undefined;
}
if (stream) {
stream.getTracks().forEach(t => t.stop());
setStream(null);
}
if (scriptProcessorRef.current) {
(scriptProcessorRef.current as any).onaudioprocess = null;
try { scriptProcessorRef.current.disconnect(); } catch {}
scriptProcessorRef.current = null;
}
analyserRef.current = null;
const ctx = audioContextRef.current;
audioContextRef.current = null;
if (ctx && ctx.state !== 'closed') {
try { await ctx.close(); } catch {}
}
};
const requestMicrophoneAccess = async () => {
try {
console.log('🎤 Requesting microphone access...');
// Clean up any existing audio context first
await teardownAudio();
const mediaStream = await navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: false,
noiseSuppression: false,
autoGainControl: false,
sampleRate: { ideal: 44100 }, // Safari prefers this
channelCount: 1,
},
});
console.log('✅ Microphone access granted');
// Set up Web Audio API
const audioContext = new (window.AudioContext || (window as any).webkitAudioContext)();
const analyser = audioContext.createAnalyser();
// Back to normal analyser settings
analyser.fftSize = 2048; // back to normal
analyser.smoothingTimeConstant = 0.3;
analyser.minDecibels = -100;
analyser.maxDecibels = 0;
analyser.channelCount = 1;
const source = audioContext.createMediaStreamSource(mediaStream);
// Silent sink that still "pulls" the graph (no speaker output)
const silentGain = audioContext.createGain();
silentGain.gain.value = 0;
const silentSink = audioContext.createMediaStreamDestination();
// IMPORTANT: analyser must be in the pulled path
source.connect(analyser);
analyser.connect(silentGain);
silentGain.connect(silentSink);
// Safari fallback: ScriptProcessor gets RAW mic PCM
try {
const scriptProcessor = (audioContext as any).createScriptProcessor(1024, 1, 1);
scriptProcessor.onaudioprocess = (event: AudioProcessingEvent) => {
const inputBuffer = event.inputBuffer.getChannelData(0); // RAW MIC DATA!
// Append for entropy
rawAudioDataRef.current.push(new Float32Array(inputBuffer));
// Calc RMS from raw data
let sum = 0;
for (let i = 0; i < inputBuffer.length; i++) {
sum += inputBuffer[i] * inputBuffer[i];
}
const rawRms = Math.sqrt(sum / inputBuffer.length);
// Update state via postMessage (React-safe)
if (Math.random() < 0.1) { // Throttle
setAudioLevel(Math.min(rawRms * 2000, 100));
}
// Deterministic logging every 30 frames
if (frameCounterRef.current++ % 30 === 0) {
console.log('🎙️ RAW mic RMS:', rawRms.toFixed(4), 'Sample:', inputBuffer.slice(0,5));
}
};
// ScriptProcessor branch also pulled
source.connect(scriptProcessor);
scriptProcessor.connect(silentGain); // pull it via the same sink path
scriptProcessorRef.current = scriptProcessor;
console.log('✅ ScriptProcessor active (Safari fallback)');
} catch (e) {
console.log('⚠️ ScriptProcessor not supported');
}
console.log('🎧 Pipeline primed:', {
sampleRate: audioContext.sampleRate,
state: audioContext.state,
fftSize: analyser.fftSize,
channels: analyser.channelCount,
});
audioContextRef.current = audioContext;
analyserRef.current = analyser;
setStream(mediaStream);
// Resume context
if (audioContext.state === 'suspended') {
await audioContext.resume();
console.log('▶️ Audio context resumed:', audioContext.state);
}
// Give pipeline 300ms to fill buffer
setTimeout(() => {
if (analyserRef.current) {
console.log('▶️ Starting analysis after buffer fill');
startAudioAnalysis();
setStep('capture');
}
}, 300);
} catch (err: any) {
console.error('❌ Microphone error:', err);
setError(`Microphone access denied: ${err.message}`);
setTimeout(() => onCancel(), 2000);
}
};
const startAudioAnalysis = () => {
if (!analyserRef.current) {
console.error('❌ No analyser');
return;
}
console.log('✅ Analysis loop started');
const analyze = () => {
if (!analyserRef.current) return;
// Use FLOAT data (more precise than Byte)
const bufferLength = analyserRef.current.frequencyBinCount;
const timeData = new Float32Array(bufferLength);
const freqData = new Float32Array(bufferLength);
analyserRef.current.getFloatTimeDomainData(timeData);
analyserRef.current.getFloatFrequencyData(freqData);
// RMS from time domain (-1 to 1 range)
let sum = 0;
for (let i = 0; i < bufferLength; i++) {
sum += timeData[i] * timeData[i];
}
const rms = Math.sqrt(sum / bufferLength);
const level = Math.min(rms * 2000, 100); // Scale for visibility
// Proper dBFS to linear energy
let freqEnergy = 0;
let activeBins = 0;
for (let i = 0; i < bufferLength; i++) {
const dB = freqData[i];
if (dB > -100) { // Ignore silence floor
const linear = Math.pow(10, dB / 20); // dB → linear amplitude
freqEnergy += linear * linear; // Power
activeBins++;
}
}
const freqRms = activeBins > 0 ? Math.sqrt(freqEnergy / activeBins) : 0;
const freqLevel = Math.min(freqRms * 1000, 50);
const finalLevel = Math.max(level, freqLevel);
// CLAMP
const clampedLevel = Math.min(Math.max(finalLevel, 0), 100);
// Log first few + random
if (!audioLevelLoggedRef.current) {
audioLevelLoggedRef.current = true;
console.log('📊 First frame:', {
rms: rms.toFixed(4),
level: level.toFixed(1),
timeSample: timeData.slice(0, 5),
freqSample: freqData.slice(0, 5)
});
} else if (Math.random() < 0.03) {
console.log('🎵 Level:', clampedLevel.toFixed(1), 'RMS:', rms.toFixed(4));
}
setAudioLevel(clampedLevel);
setCaptureEnabled(clampedLevel > 1); // Lower threshold
animationRef.current = requestAnimationFrame(analyze);
};
analyze();
};
// Auto-start analysis when analyser is ready
useEffect(() => {
if (analyserRef.current && step === 'capture' && !animationRef.current) {
console.log('🎬 useEffect: Starting audio analysis');
startAudioAnalysis();
}
}, [analyserRef.current, step]);
const captureAudioEntropy = async () => {
// Ensure audio context is running
if (audioContextRef.current && audioContextRef.current.state === 'suspended') {
await audioContextRef.current.resume();
console.log('▶️ Audio context resumed on capture');
}
setStep('processing');
setCaptureProgress(0);
console.log('🎙️ Capturing audio entropy...');
// Capture 3 seconds of audio data
const captureDuration = 3000; // 3 seconds
const sampleInterval = 50; // Sample every 50ms
const totalSamples = captureDuration / sampleInterval;
audioDataRef.current = [];
rawAudioDataRef.current = [];
for (let i = 0; i < totalSamples; i++) {
await new Promise(resolve => setTimeout(resolve, sampleInterval));
// Try to get data from analyser first, fall back to raw audio data
if (analyserRef.current) {
const bufferLength = analyserRef.current!.frequencyBinCount;
const timeData = new Float32Array(bufferLength);
analyserRef.current!.getFloatTimeDomainData(timeData);
// Store Float32Array directly (no conversion needed)
audioDataRef.current.push(new Float32Array(timeData));
}
setCaptureProgress(((i + 1) / totalSamples) * 100);
}
// Use raw audio data if available (from ScriptProcessor)
if (rawAudioDataRef.current.length > 0) {
console.log('✅ Using raw audio data from ScriptProcessor:', rawAudioDataRef.current.length, 'samples');
audioDataRef.current = rawAudioDataRef.current.slice(-totalSamples); // Use most recent samples
}
console.log('✅ Audio captured:', audioDataRef.current.length, 'samples');
// Analyze captured audio
const audioStats = await analyzeAudioEntropy();
const mnemonic = await generateMnemonicFromAudio(audioStats);
setStats(audioStats);
setGeneratedMnemonic(mnemonic);
setStep('stats');
// Use teardownAudio for proper cleanup
await teardownAudio();
};
const analyzeAudioEntropy = async (): Promise<AudioStats> => {
// Convert Float32Array[] to number[] by flattening and converting each Float32Array to array
const allSamples: number[] = audioDataRef.current.flatMap(arr => Array.from(arr));
const sampleRate = audioContextRef.current?.sampleRate || 48000;
// Peak amplitude
const peakAmplitude = Math.max(...allSamples.map(Math.abs));
// RMS amplitude
const sumSquares = allSamples.reduce((sum, val) => sum + val * val, 0);
const rmsAmplitude = Math.sqrt(sumSquares / allSamples.length);
// Zero crossings (measure of frequency content)
let zeroCrossings = 0;
for (let i = 1; i < allSamples.length; i++) {
if ((allSamples[i] >= 0 && allSamples[i - 1] < 0) ||
(allSamples[i] < 0 && allSamples[i - 1] >= 0)) {
zeroCrossings++;
}
}
// Frequency analysis (simplified)
const frequencyBands = Array(8).fill(0); // 8 bands
for (const frame of audioDataRef.current) {
const bufferLength = frame.length;
const bandSize = Math.floor(bufferLength / 8);
for (let band = 0; band < 8; band++) {
const start = band * bandSize;
const end = start + bandSize;
let bandEnergy = 0;
for (let i = start; i < end && i < bufferLength; i++) {
bandEnergy += Math.abs(frame[i]);
}
frequencyBands[band] += bandEnergy / bandSize;
}
}
// Normalize frequency bands
const maxBand = Math.max(...frequencyBands);
if (maxBand > 0) {
for (let i = 0; i < frequencyBands.length; i++) {
frequencyBands[i] = (frequencyBands[i] / maxBand) * 100;
}
}
// Spectral entropy (simplified)
let spectralEntropy = 0;
const total = frequencyBands.reduce((a, b) => a + b, 0);
if (total > 0) {
for (const band of frequencyBands) {
if (band > 0) {
const p = band / total;
spectralEntropy -= p * Math.log2(p);
}
}
}
return {
sampleRate,
duration: audioDataRef.current.length * 50, // milliseconds
peakAmplitude,
rmsAmplitude,
zeroCrossings,
frequencyBands,
spectralEntropy,
interactionSamples: interactionEntropy.getSampleCount().total,
totalBits: 256,
};
};
const generateMnemonicFromAudio = async (audioStats: AudioStats): Promise<string> => {
// Mix audio data with other entropy sources
// Convert Float32Array[] to a single Float32Array by concatenating all arrays
const allAudioData = audioDataRef.current.flatMap(arr => Array.from(arr));
const audioHash = await crypto.subtle.digest(
'SHA-256',
new Float32Array(allAudioData).buffer
);
const interactionBytes = await interactionEntropy.getEntropyBytes();
const cryptoBytes = crypto.getRandomValues(new Uint8Array(32));
const combined = [
Array.from(new Uint8Array(audioHash)).join(','),
audioStats.zeroCrossings.toString(),
audioStats.peakAmplitude.toString(),
performance.now().toString(),
Array.from(interactionBytes).join(','),
Array.from(cryptoBytes).join(','),
].join('|');
const encoder = new TextEncoder();
const data = encoder.encode(combined);
const hash = await crypto.subtle.digest('SHA-256', data);
const { entropyToMnemonic } = await import('bip39');
const entropyLength = wordCount === 12 ? 16 : 32;
const finalEntropy = new Uint8Array(hash).slice(0, entropyLength);
const entropyHex = Buffer.from(finalEntropy).toString('hex');
return entropyToMnemonic(entropyHex);
};
useEffect(() => {
return () => {
teardownAudio();
};
}, []);
const getStatusMessage = () => {
if (audioLevel > 10) {
return { text: '✅ Excellent audio - ready!', color: '#39ff14' };
} else if (audioLevel > 5) {
return { text: '🟡 Good - speak or make noise', color: '#ffd700' };
} else if (audioLevel > 2) {
return { text: '🟠 Low - louder noise needed', color: '#ff9500' };
} else {
return { text: '🔴 Too quiet - tap desk/speak', color: '#ff006e' };
}
};
return (
<div className="space-y-4">
{/* Permission Screen */}
{step === 'permission' && (
<div className="p-6 bg-[#16213e] rounded-xl border-2 border-[#00f0ff30] space-y-4">
<div className="text-center space-y-2">
<Mic size={48} className="mx-auto text-[#00f0ff]" />
<h3 className="text-sm font-bold text-[#00f0ff] uppercase">Microphone Permission Needed</h3>
<span className="px-3 py-1 bg-[#ff006e30] border border-[#ff006e] text-[#ff006e] rounded-full text-[10px] font-bold uppercase">
Beta Feature
</span>
</div>
<div className="space-y-2 text-xs text-[#6ef3f7]">
<p>To generate entropy, we need:</p>
<ul className="list-disc list-inside space-y-1 pl-2">
<li>Microphone access to capture ambient noise</li>
<li>Audio data processed locally (never transmitted)</li>
<li>3 seconds of audio capture</li>
<li>Microphone auto-closes after use</li>
</ul>
</div>
<div className="flex gap-3">
<button
onClick={requestMicrophoneAccess}
className="flex-1 py-2.5 bg-[#00f0ff] text-[#0a0a0f] rounded-lg font-bold text-sm hover:shadow-[0_0_20px_rgba(0,240,255,0.5)] transition-all"
>
Allow Microphone
</button>
<button
onClick={onCancel}
className="flex-1 py-2.5 bg-[#16213e] border-2 border-[#ff006e] text-[#ff006e] rounded-lg font-bold text-sm hover:bg-[#ff006e20] transition-all"
>
Cancel
</button>
</div>
</div>
)}
{/* Capture Screen */}
{step === 'capture' && (
<div className="space-y-4">
{/* Waveform Visualization */}
<div className="p-6 bg-[#16213e] rounded-xl border-2 border-[#00f0ff30]">
<div className="flex items-center justify-center h-32 relative">
{/* Animated audio level bars */}
<div className="flex items-end gap-1 h-full">
{[...Array(20)].map((_, i) => (
<div
key={i}
className="w-2 bg-[#00f0ff] rounded-t transition-all"
style={{
height: `${Math.max(10, audioLevel * (0.5 + Math.random() * 0.5))}%`,
opacity: 0.3 + (audioLevel / 100) * 0.7,
}}
/>
))}
</div>
</div>
</div>
{/* Status */}
<div className="p-4 bg-[#16213e] rounded-xl border-2 border-[#00f0ff30] space-y-3">
<div className="text-xs text-[#6ef3f7] space-y-1">
<p className="font-bold text-[#00f0ff]">Instructions:</p>
<p>Make noise: tap desk, rustle paper, speak, or play music</p>
</div>
<div className="space-y-2">
<div className="flex items-center justify-between text-xs">
<span className="text-[#00f0ff]">Audio Level:</span>
<span className="font-mono text-[#00f0ff]">{audioLevel.toFixed(1)}%</span>
</div>
<div className="w-full bg-[#0a0a0f] rounded-full h-2 overflow-hidden">
<div
className="h-full transition-all"
style={{
width: `${audioLevel}%`,
backgroundColor: getStatusMessage().color,
}}
/>
</div>
<div
className="text-xs font-medium"
style={{ color: getStatusMessage().color }}
>
{getStatusMessage().text}
</div>
</div>
<div className="flex gap-3">
<button
onClick={captureAudioEntropy}
disabled={!captureEnabled}
className="flex-1 py-2.5 bg-gradient-to-r from-[#ff006e] to-[#ff4d8f] text-white text-sm rounded-lg font-bold uppercase disabled:opacity-30 disabled:cursor-not-allowed hover:shadow-[0_0_20px_rgba(255,0,110,0.5)] transition-all"
>
<Mic className="inline mr-2" size={16} />
Capture (3s)
</button>
<button
onClick={onCancel}
className="px-4 py-2.5 bg-[#16213e] border-2 border-[#ff006e] text-[#ff006e] rounded-lg font-bold text-sm hover:bg-[#ff006e20] transition-all"
>
<X size={16} />
</button>
</div>
</div>
</div>
)}
{/* Processing Screen */}
{step === 'processing' && (
<div className="p-6 bg-[#16213e] rounded-xl border-2 border-[#00f0ff30] text-center space-y-3">
<div className="relative w-16 h-16 mx-auto">
<div className="animate-spin w-16 h-16 border-4 border-[#00f0ff30] border-t-[#00f0ff] rounded-full" />
<Mic className="absolute inset-0 m-auto text-[#00f0ff]" size={24} />
</div>
<p className="text-sm text-[#00f0ff]">Capturing audio entropy...</p>
<div className="w-full bg-[#0a0a0f] rounded-full h-2">
<div
className="h-full bg-[#00f0ff] rounded-full transition-all"
style={{ width: `${captureProgress}%` }}
/>
</div>
<p className="text-xs text-[#6ef3f7]">{captureProgress.toFixed(0)}%</p>
</div>
)}
{/* Stats Display */}
{step === 'stats' && stats && (
<div className="p-6 bg-[#16213e] rounded-xl border-2 border-[#39ff14] shadow-[0_0_30px_rgba(57,255,20,0.3)] space-y-4">
<div className="flex items-center gap-2 text-[#39ff14]">
<CheckCircle2 size={24} />
<h3 className="text-sm font-bold uppercase">Audio Entropy Analysis</h3>
</div>
<div className="space-y-3 text-xs">
<div>
<p className="text-[#00f0ff] font-bold mb-1">Primary Source:</p>
<p className="text-[#6ef3f7]">Microphone Ambient Noise</p>
</div>
<div>
<p className="text-[#00f0ff] font-bold mb-2">AUDIO METRICS:</p>
<div className="grid grid-cols-2 gap-2 font-mono text-[10px]">
<div>Sample Rate:</div>
<div className="text-[#39ff14]">{stats.sampleRate} Hz</div>
<div>Duration:</div>
<div className="text-[#39ff14]">{stats.duration}ms</div>
<div>Peak Amplitude:</div>
<div className="text-[#39ff14]">{stats.peakAmplitude.toFixed(3)}</div>
<div>RMS Amplitude:</div>
<div className="text-[#39ff14]">{stats.rmsAmplitude.toFixed(3)}</div>
<div>Zero Crossings:</div>
<div className="text-[#39ff14]">{stats.zeroCrossings.toLocaleString()}</div>
<div>Spectral Entropy:</div>
<div className="text-[#39ff14]">{stats.spectralEntropy.toFixed(2)}/3.00</div>
</div>
</div>
<div>
<p className="text-[#00f0ff] font-bold mb-2">FREQUENCY DISTRIBUTION:</p>
<div className="flex items-end justify-between h-16 gap-1">
{stats.frequencyBands.map((val, i) => (
<div
key={i}
className="flex-1 bg-[#00f0ff] rounded-t"
style={{ height: `${val}%` }}
/>
))}
</div>
<div className="flex justify-between text-[9px] text-[#6ef3f7] mt-1">
<span>Low</span>
<span>High</span>
</div>
</div>
<div>
<p className="text-[#00f0ff] font-bold mb-2">GENERATED SEED:</p>
<div className="p-3 bg-[#0a0a0f] rounded-lg border border-[#39ff1450]">
<p
className="font-mono text-[10px] text-[#39ff14] blur-sensitive"
title="Hover to reveal"
>
{generatedMnemonic}
</p>
<p className="text-[9px] text-[#6ef3f7] mt-1">
👆 Hover to reveal - Write this down securely
</p>
</div>
</div>
<div>
<p className="text-[#00f0ff] font-bold mb-2">HOW SEED IS GENERATED:</p>
<div className="space-y-1 text-[10px] text-[#6ef3f7]">
<div>1. Captured {stats.duration}ms of audio ({(audioDataRef.current.flat().length / 1024).toFixed(1)}KB)</div>
<div>2. Analyzed {stats.zeroCrossings.toLocaleString()} zero crossings</div>
<div>3. Extracted frequency spectrum (8 bands)</div>
<div>4. Mixed with {stats.interactionSamples} interaction samples</div>
<div>5. Enhanced with crypto.getRandomValues() (32 bytes)</div>
<div>6. Final hash {wordCount === 12 ? '128' : '256'} bits {wordCount} BIP39 words</div>
</div>
</div>
<div>
<p className="text-[#00f0ff] font-bold mb-2">MIXED WITH:</p>
<div className="space-y-1 text-[#6ef3f7]">
<div>- crypto.getRandomValues() </div>
<div>- performance.now() </div>
<div>- Interaction timing ({stats.interactionSamples} samples) </div>
</div>
</div>
<div className="pt-2 border-t border-[#00f0ff30]">
<div className="flex justify-between items-center">
<span className="text-[#00f0ff] font-bold">Total Entropy:</span>
<span className="text-lg font-bold text-[#39ff14]">{stats.totalBits} bits</span>
</div>
</div>
</div>
<div className="pt-4 border-t border-[#00f0ff30] space-y-3">
<button
onClick={() => onEntropyGenerated(generatedMnemonic, stats)}
className="w-full py-3 bg-gradient-to-r from-[#ff006e] to-[#ff4d8f] text-white text-sm rounded-xl font-bold uppercase hover:shadow-[0_0_30px_rgba(255,0,110,0.8)] transition-all"
>
Continue with this Seed
</button>
<button
onClick={() => {
setStep('permission');
setStats(null);
setGeneratedMnemonic('');
setAudioLevel(0);
audioDataRef.current = [];
audioLevelLoggedRef.current = false;
}}
className="w-full py-2 bg-[#16213e] border-2 border-[#00f0ff] text-[#00f0ff] rounded-lg text-sm font-bold hover:bg-[#00f0ff20] transition-all"
>
Capture Again
</button>
</div>
</div>
)}
{error && (
<div className="p-4 bg-[#16213e] border-2 border-[#ff006e] rounded-lg">
<p className="text-xs text-[#ff006e]">{error}</p>
</div>
)}
</div>
);
};
export default AudioEntropy;