This commit is contained in:
2026-02-05 23:43:19 -05:00
parent 168e6d5a61
commit 42a1ddf458
12 changed files with 746 additions and 44 deletions

View File

@@ -0,0 +1,93 @@
/**
* MergedWaveform — unified progress bar + waveform display
*
* Shows waveform bars coloured to indicate played vs unplayed portions.
* The played section doubles as the progress indicator, replacing the
* separate progress bar. Click-to-seek is supported.
*/
import { createSignal, createEffect, onCleanup } from "solid-js"
import { getWaveformData, getWaveformDataSync } from "../utils/audio-waveform"
type MergedWaveformProps = {
/** Audio URL — used to generate or retrieve waveform data */
audioUrl: string
/** Current playback position in seconds */
position: number
/** Total duration in seconds */
duration: number
/** Whether audio is currently playing */
isPlaying: boolean
/** Number of data points / columns */
resolution?: number
/** Callback when user clicks to seek */
onSeek?: (seconds: number) => void
}
/** Block characters for waveform amplitude levels */
const BARS = [".", "-", "~", "=", "#"]
export function MergedWaveform(props: MergedWaveformProps) {
const resolution = () => props.resolution ?? 64
// Waveform data — start with sync/cached, kick off async extraction
const [data, setData] = createSignal<number[]>(
getWaveformDataSync(props.audioUrl, resolution()),
)
// When the audioUrl changes, attempt async extraction for real data
createEffect(() => {
const url = props.audioUrl
const res = resolution()
if (!url) return
let cancelled = false
getWaveformData(url, res).then((result) => {
if (!cancelled) setData(result)
})
onCleanup(() => { cancelled = true })
})
const playedRatio = () =>
props.duration <= 0 ? 0 : Math.min(1, props.position / props.duration)
const renderLine = () => {
const d = data()
const played = Math.floor(d.length * playedRatio())
const playedColor = props.isPlaying ? "#6fa8ff" : "#7d8590"
const futureColor = "#3b4252"
const playedChars = d
.slice(0, played)
.map((v) => BARS[Math.min(BARS.length - 1, Math.floor(v * BARS.length))])
.join("")
const futureChars = d
.slice(played)
.map((v) => BARS[Math.min(BARS.length - 1, Math.floor(v * BARS.length))])
.join("")
return (
<box flexDirection="row" gap={0}>
<text fg={playedColor}>{playedChars || " "}</text>
<text fg={futureColor}>{futureChars || " "}</text>
</box>
)
}
const handleClick = (event: { x: number }) => {
const d = data()
const ratio = d.length === 0 ? 0 : event.x / d.length
const next = Math.max(
0,
Math.min(props.duration, Math.round(props.duration * ratio)),
)
props.onSeek?.(next)
}
return (
<box border padding={1} onMouseDown={handleClick}>
{renderLine()}
</box>
)
}

View File

@@ -4,7 +4,7 @@
* Right panel: episodes for the selected show * Right panel: episodes for the selected show
*/ */
import { createSignal, For, Show, createMemo } from "solid-js" import { createSignal, For, Show, createMemo, createEffect } from "solid-js"
import { useKeyboard } from "@opentui/solid" import { useKeyboard } from "@opentui/solid"
import { useFeedStore } from "../stores/feed" import { useFeedStore } from "../stores/feed"
import { format } from "date-fns" import { format } from "date-fns"
@@ -26,6 +26,9 @@ export function MyShowsPage(props: MyShowsPageProps) {
const [episodeIndex, setEpisodeIndex] = createSignal(0) const [episodeIndex, setEpisodeIndex] = createSignal(0)
const [isRefreshing, setIsRefreshing] = createSignal(false) const [isRefreshing, setIsRefreshing] = createSignal(false)
/** Threshold: load more when within this many items of the end */
const LOAD_MORE_THRESHOLD = 5
const shows = () => feedStore.getFilteredFeeds() const shows = () => feedStore.getFilteredFeeds()
const selectedShow = createMemo(() => { const selectedShow = createMemo(() => {
@@ -42,6 +45,19 @@ export function MyShowsPage(props: MyShowsPageProps) {
) )
}) })
// Detect when user navigates near the bottom and load more episodes
createEffect(() => {
const idx = episodeIndex()
const eps = episodes()
const show = selectedShow()
if (!show || eps.length === 0) return
const nearBottom = idx >= eps.length - LOAD_MORE_THRESHOLD
if (nearBottom && feedStore.hasMoreEpisodes(show.id) && !feedStore.isLoadingMore()) {
feedStore.loadMoreEpisodes(show.id)
}
})
const formatDate = (date: Date): string => { const formatDate = (date: Date): string => {
return format(date, "MMM d, yyyy") return format(date, "MMM d, yyyy")
} }
@@ -231,6 +247,16 @@ export function MyShowsPage(props: MyShowsPageProps) {
</box> </box>
)} )}
</For> </For>
<Show when={feedStore.isLoadingMore()}>
<box paddingLeft={2} paddingTop={1}>
<text fg="yellow">Loading more episodes...</text>
</box>
</Show>
<Show when={!feedStore.isLoadingMore() && selectedShow() && feedStore.hasMoreEpisodes(selectedShow()!.id)}>
<box paddingLeft={2} paddingTop={1}>
<text fg="gray">Scroll down for more episodes</text>
</box>
</Show>
</scrollbox> </scrollbox>
</Show> </Show>
</Show> </Show>

View File

@@ -1,7 +1,6 @@
import { useKeyboard } from "@opentui/solid" import { useKeyboard } from "@opentui/solid"
import { PlaybackControls } from "./PlaybackControls" import { PlaybackControls } from "./PlaybackControls"
import { Waveform } from "./Waveform" import { MergedWaveform } from "./MergedWaveform"
import { createWaveform } from "../utils/waveform"
import { useAudio } from "../hooks/useAudio" import { useAudio } from "../hooks/useAudio"
import type { Episode } from "../types/episode" import type { Episode } from "../types/episode"
@@ -24,8 +23,6 @@ const SAMPLE_EPISODE: Episode = {
export function Player(props: PlayerProps) { export function Player(props: PlayerProps) {
const audio = useAudio() const audio = useAudio()
const waveform = () => createWaveform(64)
// The episode to display — prefer a passed-in episode, then the // The episode to display — prefer a passed-in episode, then the
// currently-playing episode, then fall back to the sample. // currently-playing episode, then fall back to the sample.
const episode = () => props.episode ?? audio.currentEpisode() ?? SAMPLE_EPISODE const episode = () => props.episode ?? audio.currentEpisode() ?? SAMPLE_EPISODE
@@ -86,7 +83,7 @@ export function Player(props: PlayerProps) {
<strong>Now Playing</strong> <strong>Now Playing</strong>
</text> </text>
<text fg="gray"> <text fg="gray">
{formatTime(audio.position())} / {formatTime(dur())} {formatTime(audio.position())} / {formatTime(dur())} ({progressPercent()}%)
</text> </text>
</box> </box>
@@ -100,27 +97,13 @@ export function Player(props: PlayerProps) {
</text> </text>
<text fg="gray">{episode().description}</text> <text fg="gray">{episode().description}</text>
<box flexDirection="column" gap={1}> <MergedWaveform
<box flexDirection="row" gap={1} alignItems="center"> audioUrl={episode().audioUrl}
<text fg="gray">Progress:</text> position={audio.position()}
<box flexGrow={1} height={1} backgroundColor="#2a2f3a"> duration={dur()}
<box isPlaying={audio.isPlaying()}
width={`${progressPercent()}%`} onSeek={(next: number) => audio.seek(next)}
height={1} />
backgroundColor={audio.isPlaying() ? "#6fa8ff" : "#7d8590"}
/>
</box>
<text fg="gray">{progressPercent()}%</text>
</box>
<Waveform
data={waveform()}
position={audio.position()}
duration={dur()}
isPlaying={audio.isPlaying()}
onSeek={(next: number) => audio.seek(next)}
/>
</box>
</box> </box>
<PlaybackControls <PlaybackControls

View File

@@ -20,12 +20,18 @@ import {
migrateSourcesFromLocalStorage, migrateSourcesFromLocalStorage,
} from "../utils/feeds-persistence" } from "../utils/feeds-persistence"
/** Max episodes to fetch on refresh */ /** Max episodes to load per page/chunk */
const MAX_EPISODES_REFRESH = 50 const MAX_EPISODES_REFRESH = 50
/** Max episodes to fetch on initial subscribe */ /** Max episodes to fetch on initial subscribe */
const MAX_EPISODES_SUBSCRIBE = 20 const MAX_EPISODES_SUBSCRIBE = 20
/** Cache of all parsed episodes per feed (feedId -> Episode[]) */
const fullEpisodeCache = new Map<string, Episode[]>()
/** Track how many episodes are currently loaded per feed */
const episodeLoadCount = new Map<string, number>()
/** Save feeds to file (async, fire-and-forget) */ /** Save feeds to file (async, fire-and-forget) */
function saveFeeds(feeds: Feed[]): void { function saveFeeds(feeds: Feed[]): void {
saveFeedsToFile(feeds).catch(() => {}) saveFeedsToFile(feeds).catch(() => {})
@@ -56,6 +62,7 @@ export function createFeedStore() {
sortDirection: "desc", sortDirection: "desc",
}) })
const [selectedFeedId, setSelectedFeedId] = createSignal<string | null>(null) const [selectedFeedId, setSelectedFeedId] = createSignal<string | null>(null)
const [isLoadingMore, setIsLoadingMore] = createSignal(false)
/** Get filtered and sorted feeds */ /** Get filtered and sorted feeds */
const getFilteredFeeds = (): Feed[] => { const getFilteredFeeds = (): Feed[] => {
@@ -132,8 +139,8 @@ export function createFeedStore() {
return allEpisodes return allEpisodes
} }
/** Fetch latest episodes from an RSS feed URL */ /** Fetch latest episodes from an RSS feed URL, caching all parsed episodes */
const fetchEpisodes = async (feedUrl: string, limit: number): Promise<Episode[]> => { const fetchEpisodes = async (feedUrl: string, limit: number, feedId?: string): Promise<Episode[]> => {
try { try {
const response = await fetch(feedUrl, { const response = await fetch(feedUrl, {
headers: { headers: {
@@ -144,7 +151,15 @@ export function createFeedStore() {
if (!response.ok) return [] if (!response.ok) return []
const xml = await response.text() const xml = await response.text()
const parsed = parseRSSFeed(xml, feedUrl) const parsed = parseRSSFeed(xml, feedUrl)
return parsed.episodes.slice(0, limit) const allEpisodes = parsed.episodes
// Cache all parsed episodes for pagination
if (feedId) {
fullEpisodeCache.set(feedId, allEpisodes)
episodeLoadCount.set(feedId, Math.min(limit, allEpisodes.length))
}
return allEpisodes.slice(0, limit)
} catch { } catch {
return [] return []
} }
@@ -152,9 +167,10 @@ export function createFeedStore() {
/** Add a new feed and auto-fetch latest 20 episodes */ /** Add a new feed and auto-fetch latest 20 episodes */
const addFeed = async (podcast: Podcast, sourceId: string, visibility: FeedVisibility = FeedVisibility.PUBLIC) => { const addFeed = async (podcast: Podcast, sourceId: string, visibility: FeedVisibility = FeedVisibility.PUBLIC) => {
const episodes = await fetchEpisodes(podcast.feedUrl, MAX_EPISODES_SUBSCRIBE) const feedId = crypto.randomUUID()
const episodes = await fetchEpisodes(podcast.feedUrl, MAX_EPISODES_SUBSCRIBE, feedId)
const newFeed: Feed = { const newFeed: Feed = {
id: crypto.randomUUID(), id: feedId,
podcast, podcast,
episodes, episodes,
visibility, visibility,
@@ -174,7 +190,7 @@ export function createFeedStore() {
const refreshFeed = async (feedId: string) => { const refreshFeed = async (feedId: string) => {
const feed = getFeed(feedId) const feed = getFeed(feedId)
if (!feed) return if (!feed) return
const episodes = await fetchEpisodes(feed.podcast.feedUrl, MAX_EPISODES_REFRESH) const episodes = await fetchEpisodes(feed.podcast.feedUrl, MAX_EPISODES_REFRESH, feedId)
setFeeds((prev) => { setFeeds((prev) => {
const updated = prev.map((f) => const updated = prev.map((f) =>
f.id === feedId ? { ...f, episodes, lastUpdated: new Date() } : f f.id === feedId ? { ...f, episodes, lastUpdated: new Date() } : f
@@ -194,6 +210,8 @@ export function createFeedStore() {
/** Remove a feed */ /** Remove a feed */
const removeFeed = (feedId: string) => { const removeFeed = (feedId: string) => {
fullEpisodeCache.delete(feedId)
episodeLoadCount.delete(feedId)
setFeeds((prev) => { setFeeds((prev) => {
const updated = prev.filter((f) => f.id !== feedId) const updated = prev.filter((f) => f.id !== feedId)
saveFeeds(updated) saveFeeds(updated)
@@ -283,18 +301,76 @@ export function createFeedStore() {
return id ? getFeed(id) : undefined return id ? getFeed(id) : undefined
} }
/** Check if a feed has more episodes available beyond what's currently loaded */
const hasMoreEpisodes = (feedId: string): boolean => {
const cached = fullEpisodeCache.get(feedId)
if (!cached) return false
const loaded = episodeLoadCount.get(feedId) ?? 0
return loaded < cached.length
}
/** Load the next chunk of episodes for a feed from the cache.
* If no cache exists (e.g. app restart), re-fetches from the RSS feed. */
const loadMoreEpisodes = async (feedId: string) => {
if (isLoadingMore()) return
const feed = getFeed(feedId)
if (!feed) return
setIsLoadingMore(true)
try {
let cached = fullEpisodeCache.get(feedId)
// If no cache, re-fetch and parse the full feed
if (!cached) {
const response = await fetch(feed.podcast.feedUrl, {
headers: {
"Accept-Encoding": "identity",
"Accept": "application/rss+xml, application/xml, text/xml, */*",
},
})
if (!response.ok) return
const xml = await response.text()
const parsed = parseRSSFeed(xml, feed.podcast.feedUrl)
cached = parsed.episodes
fullEpisodeCache.set(feedId, cached)
// Set current load count to match what's already displayed
episodeLoadCount.set(feedId, feed.episodes.length)
}
const currentCount = episodeLoadCount.get(feedId) ?? feed.episodes.length
const newCount = Math.min(currentCount + MAX_EPISODES_REFRESH, cached.length)
if (newCount <= currentCount) return // nothing more to load
episodeLoadCount.set(feedId, newCount)
const episodes = cached.slice(0, newCount)
setFeeds((prev) => {
const updated = prev.map((f) =>
f.id === feedId ? { ...f, episodes } : f
)
saveFeeds(updated)
return updated
})
} finally {
setIsLoadingMore(false)
}
}
return { return {
// State // State
feeds, feeds,
sources, sources,
filter, filter,
selectedFeedId, selectedFeedId,
isLoadingMore,
// Computed // Computed
getFilteredFeeds, getFilteredFeeds,
getAllEpisodesChronological, getAllEpisodesChronological,
getFeed, getFeed,
getSelectedFeed, getSelectedFeed,
hasMoreEpisodes,
// Actions // Actions
setFilter, setFilter,
@@ -305,6 +381,7 @@ export function createFeedStore() {
togglePinned, togglePinned,
refreshFeed, refreshFeed,
refreshAllFeeds, refreshAllFeeds,
loadMoreEpisodes,
addSource, addSource,
removeSource, removeSource,
toggleSource, toggleSource,

149
src/utils/audio-waveform.ts Normal file
View File

@@ -0,0 +1,149 @@
/**
* Audio waveform analysis for PodTUI
*
* Extracts amplitude data from audio files using ffmpeg (when available)
* or generates procedural waveform data as a fallback. Results are cached
* in-memory keyed by audio URL.
*/
/** Number of amplitude data points to generate */
const DEFAULT_RESOLUTION = 128
/** In-memory cache: audioUrl -> amplitude data */
const waveformCache = new Map<string, number[]>()
/**
* Try to extract real waveform data from an audio URL using ffmpeg.
* Returns null if ffmpeg is not available or the extraction fails.
*/
async function extractWithFfmpeg(audioUrl: string, resolution: number): Promise<number[] | null> {
try {
if (!Bun.which("ffmpeg")) return null
// Use ffmpeg to output raw PCM samples, then downsample to `resolution` points.
// -t 300: read at most 5 minutes (enough data to fill the waveform)
const proc = Bun.spawn(
[
"ffmpeg",
"-i", audioUrl,
"-t", "300",
"-ac", "1", // mono
"-ar", "8000", // low sample rate to keep data small
"-f", "s16le", // raw signed 16-bit PCM
"-v", "quiet",
"-",
],
{ stdout: "pipe", stderr: "ignore" },
)
const output = await new Response(proc.stdout).arrayBuffer()
await proc.exited
if (output.byteLength === 0) return null
const samples = new Int16Array(output)
if (samples.length === 0) return null
// Downsample to `resolution` buckets by taking the max absolute amplitude
// in each bucket.
const bucketSize = Math.max(1, Math.floor(samples.length / resolution))
const data: number[] = []
for (let i = 0; i < resolution; i++) {
const start = i * bucketSize
const end = Math.min(start + bucketSize, samples.length)
let maxAbs = 0
for (let j = start; j < end; j++) {
const abs = Math.abs(samples[j])
if (abs > maxAbs) maxAbs = abs
}
// Normalise to 0-1
data.push(Number((maxAbs / 32768).toFixed(3)))
}
return data
} catch {
return null
}
}
/**
* Generate a procedural (fake) waveform that looks plausible.
* Uses a combination of sine waves with different frequencies to
* simulate varying audio energy.
*/
function generateProcedural(resolution: number, seed: number): number[] {
const data: number[] = []
for (let i = 0; i < resolution; i++) {
const t = i + seed
const value =
0.15 +
Math.abs(Math.sin(t / 3.7)) * 0.35 +
Math.abs(Math.sin(t / 7.3)) * 0.25 +
Math.abs(Math.sin(t / 13.1)) * 0.15 +
(Math.random() * 0.1)
data.push(Number(Math.min(1, value).toFixed(3)))
}
return data
}
/**
* Simple numeric hash of a string, used to seed procedural generation
* so the same URL always produces the same waveform.
*/
function hashString(s: string): number {
let h = 0
for (let i = 0; i < s.length; i++) {
h = (h * 31 + s.charCodeAt(i)) | 0
}
return Math.abs(h)
}
/**
* Get waveform data for an audio URL.
*
* Returns cached data if available, otherwise attempts ffmpeg extraction
* and falls back to procedural generation.
*/
export async function getWaveformData(
audioUrl: string,
resolution: number = DEFAULT_RESOLUTION,
): Promise<number[]> {
const cacheKey = `${audioUrl}:${resolution}`
const cached = waveformCache.get(cacheKey)
if (cached) return cached
// Try real extraction first
const real = await extractWithFfmpeg(audioUrl, resolution)
if (real) {
waveformCache.set(cacheKey, real)
return real
}
// Fall back to procedural
const procedural = generateProcedural(resolution, hashString(audioUrl))
waveformCache.set(cacheKey, procedural)
return procedural
}
/**
* Synchronous fallback: get a waveform immediately (from cache or procedural).
* Use this when you need data without waiting for async extraction.
*/
export function getWaveformDataSync(
audioUrl: string,
resolution: number = DEFAULT_RESOLUTION,
): number[] {
const cacheKey = `${audioUrl}:${resolution}`
const cached = waveformCache.get(cacheKey)
if (cached) return cached
const procedural = generateProcedural(resolution, hashString(audioUrl))
waveformCache.set(cacheKey, procedural)
return procedural
}
/** Clear the waveform cache (for memory management) */
export function clearWaveformCache(): void {
waveformCache.clear()
}

View File

@@ -45,11 +45,16 @@ Each feature has its own directory with:
**Tasks:** 5 tasks **Tasks:** 5 tasks
**Directory:** `tasks/config-persistence/` **Directory:** `tasks/config-persistence/`
### 8. Audio Playback Fix
**Feature:** Fix non-functional volume/speed controls and add multimedia key support
**Tasks:** 5 tasks
**Directory:** `tasks/audio-playback-fix/`
## Task Summary ## Task Summary
**Total Features:** 7 **Total Features:** 8
**Total Tasks:** 27 **Total Tasks:** 32
**Critical Path:** Feature 7 (Config Persistence) - 5 tasks **Critical Path:** Feature 7 (Config Persistence) - 5 tasks, Feature 8 (Audio Playback Fix) - 5 tasks
## Task Dependencies ## Task Dependencies
@@ -82,11 +87,17 @@ Each feature has its own directory with:
- 21 → 22 - 21 → 22
### Feature 7: Config Persistence to XDG_CONFIG_HOME ### Feature 7: Config Persistence to XDG_CONFIG_HOME
- 23 24 - 23 -> 24
- 23 25 - 23 -> 25
- 24 26 - 24 -> 26
- 25 26 - 25 -> 26
- 26 27 - 26 -> 27
### Feature 8: Audio Playback Fix
- 28 -> 29
- 29 -> 30
- 30 -> 31
- 31 -> 32
## Priority Overview ## Priority Overview
@@ -95,19 +106,23 @@ Each feature has its own directory with:
- 24: Refactor feeds persistence to JSON file - 24: Refactor feeds persistence to JSON file
- 25: Refactor theme persistence to JSON file - 25: Refactor theme persistence to JSON file
- 26: Add config file validation and migration - 26: Add config file validation and migration
- 28: Fix volume and speed controls in audio backends
- 32: Test multimedia controls across platforms
**P2 (High):** **P2 (High):**
- All other tasks (01-22, 27) - All other tasks (01-22, 27, 29-31)
**P3 (Medium):** **P3 (Medium):**
- 09: Optimize waveform rendering performance - 09: Optimize waveform rendering performance
- 13: Add loading indicator for pagination - 13: Add loading indicator for pagination
- 19: Create download queue management - 19: Create download queue management
- 30: Add multimedia key detection and handling
- 31: Implement platform-specific media stream integration
## Next Steps ## Next Steps
1. Review all task files for accuracy 1. Review all task files for accuracy
2. Confirm task dependencies 2. Confirm task dependencies
3. Start with P1 tasks (Feature 7) 3. Start with P1 tasks (Feature 7 or Feature 8)
4. Follow dependency order within each feature 4. Follow dependency order within each feature
5. Mark tasks complete as they're finished 5. Mark tasks complete as they're finished

View File

@@ -0,0 +1,65 @@
# 01. Fix volume and speed controls in audio backends
meta:
id: audio-playback-fix-01
feature: audio-playback-fix
priority: P1
depends_on: []
tags: [implementation, backend-fix, testing-required]
objective:
- Fix non-functional volume and speed controls in audio player backends (mpv, ffplay, afplay)
- Implement proper error handling and validation for volume/speed commands
- Ensure commands are successfully received and applied by the audio player
deliverables:
- Fixed `MpvBackend.setVolume()` and `MpvBackend.setSpeed()` methods with proper IPC command validation
- Enhanced `AfplayBackend.setVolume()` and `AfplayBackend.setSpeed()` for runtime changes
- Added command response validation in all backends
- Unit tests for volume and speed control methods
steps:
- Step 1: Analyze current IPC implementation in MpvBackend (lines 206-223)
- Step 2: Implement proper response validation for setVolume and setSpeed IPC commands
- Step 3: Fix afplay backend to apply volume/speed changes at runtime (currently only on next play)
- Step 4: Add error handling and logging for failed volume/speed commands
- Step 5: Add unit tests in `src/utils/audio-player.test.ts` for volume/speed methods
- Step 6: Verify volume changes apply immediately and persist across playback
- Step 7: Verify speed changes apply immediately and persist across playback
tests:
- Unit:
- Test MpvBackend.setVolume() sends correct IPC command and receives valid response
- Test MpvBackend.setSpeed() sends correct IPC command and receives valid response
- Test AfplayBackend.setVolume() applies volume immediately
- Test AfplayBackend.setSpeed() applies speed immediately
- Test volume clamp values (0-1 range)
- Test speed clamp values (0.25-3 range)
- Integration:
- Test volume control through Player component UI
- Test speed control through Player component UI
- Test volume/speed changes persist across pause/resume cycles
- Test volume/speed changes persist across track changes
acceptance_criteria:
- Volume slider in Player component changes volume in real-time
- Speed controls in Player component change playback speed in real-time
- Volume changes are visible in system audio output
- Speed changes are immediately reflected in playback rate
- No errors logged when changing volume or speed
- Volume/speed settings persist when restarting the app
validation:
- Run `bun test src/utils/audio-player.test.ts` to verify unit tests pass
- Test volume control using Up/Down arrow keys in Player
- Test speed control using 'S' key in Player
- Verify volume level is visible in PlaybackControls component
- Verify speed level is visible in PlaybackControls component
- Check console logs for any IPC errors
notes:
- mpv backend uses JSON IPC over Unix socket - need to validate response format
- afplay backend needs to restart process for volume/speed changes (current behavior)
- ffplay backend doesn't support runtime volume/speed changes (document limitation)
- Volume and speed state is stored in backend class properties and should be updated on successful commands
- Reference: src/utils/audio-player.ts lines 206-223 (mpv send method), lines 789-791 (afplay setVolume), lines 793-795 (afplay setSpeed)

View File

@@ -0,0 +1,61 @@
# 02. Add multimedia key detection and handling
meta:
id: audio-playback-fix-02
feature: audio-playback-fix
priority: P2
depends_on: []
tags: [implementation, keyboard, multimedia]
objective:
- Implement detection and handling of multimedia keys (Play/Pause, Next/Previous, Volume Up/Down)
- Create reusable multimedia key handler hook
- Map multimedia keys to audio playback actions
deliverables:
- New `useMultimediaKeys()` hook in `src/hooks/useMultimediaKeys.ts`
- Integration with existing audio hook to handle multimedia key events
- Documentation of supported multimedia keys and their mappings
steps:
- Step 1: Research @opentui/solid keyboard event types for multimedia key detection
- Step 2: Create `useMultimediaKeys()` hook with event listener for multimedia keys
- Step 3: Define multimedia key mappings (Play/Pause, Next, Previous, Volume Up, Volume Down)
- Step 4: Integrate hook with audio hook to trigger playback actions
- Step 5: Add keyboard event filtering to prevent conflicts with other shortcuts
- Step 6: Test multimedia key detection across different platforms
- Step 7: Add help text to Player component showing multimedia key bindings
tests:
- Unit:
- Test multimedia key events are detected correctly
- Test key mapping functions return correct audio actions
- Test hook cleanup removes event listeners
- Integration:
- Test Play/Pause key toggles playback
- Test Next/Previous keys skip tracks (placeholder for future)
- Test Volume Up/Down keys adjust volume
- Test keys don't trigger when input is focused
- Test keys don't trigger when player is not focused
acceptance_criteria:
- Multimedia keys are detected and logged when pressed
- Play/Pause key toggles audio playback
- Volume Up/Down keys adjust volume level
- Keys work when Player component is focused
- Keys don't interfere with other keyboard shortcuts
- Help text displays multimedia key bindings
validation:
- Press multimedia keys while Player is focused and verify playback responds
- Check console logs for detected multimedia key events
- Verify Up/Down keys adjust volume display in Player component
- Verify Space key still works for play/pause
- Test in different terminal emulators (iTerm2, Terminal.app, etc.)
notes:
- Multimedia key detection may vary by platform and terminal emulator
- Common multimedia keys: Space (Play/Pause), ArrowUp (Volume Up), ArrowDown (Volume Down)
- Some terminals don't pass multimedia keys to application
- May need to use platform-specific APIs or terminal emulator-specific key codes
- Reference: @opentui/solid keyboard event types and existing useKeyboard hook patterns

View File

@@ -0,0 +1,66 @@
# 03. Implement platform-specific media stream integration
meta:
id: audio-playback-fix-03
feature: audio-playback-fix
priority: P2
depends_on: []
tags: [implementation, platform-integration, media-apis]
objective:
- Register audio player with platform-specific media frameworks
- Enable OS media controls (notification center, lock screen, multimedia keys)
- Support macOS AVFoundation, Windows Media Foundation, and Linux PulseAudio/GStreamer
deliverables:
- Platform-specific media registration module in `src/utils/media-registry.ts`
- Integration with audio hook to register/unregister media streams
- Platform detection and conditional registration logic
- Documentation of supported platforms and media APIs
steps:
- Step 1: Research platform-specific media API integration options
- Step 2: Create `MediaRegistry` class with platform detection
- Step 3: Implement macOS AVFoundation integration (AVPlayer + AVAudioSession)
- Step 4: Implement Windows Media Foundation integration (MediaSession + PlaybackInfo)
- Step 5: Implement Linux PulseAudio/GStreamer integration (Mpris or libpulse)
- Step 6: Integrate with audio hook to register media stream on play
- Step 7: Unregister media stream on stop or dispose
- Step 8: Handle platform-specific limitations and fallbacks
- Step 9: Test media registration across platforms
tests:
- Unit:
- Test platform detection returns correct platform name
- Test MediaRegistry.register() calls platform-specific APIs
- Test MediaRegistry.unregister() cleans up platform resources
- Integration:
- Test audio player appears in macOS notification center
- Test audio player appears in Windows media controls
- Test audio player appears in Linux media player notifications
- Test media controls update with playback position
- Test multimedia keys control playback through media APIs
acceptance_criteria:
- Audio player appears in platform media controls (notification center, lock screen)
- Media controls update with current track info and playback position
- Multimedia keys work through media APIs (not just terminal)
- Media registration works on macOS, Windows, and Linux
- Media unregistration properly cleans up resources
- No memory leaks from media stream registration
validation:
- On macOS: Check notification center for audio player notification
- On Windows: Check media controls in taskbar/notification area
- On Linux: Check media player notifications in desktop environment
- Test multimedia keys work with system media player (not just terminal)
- Monitor memory usage for leaks
notes:
- Platform-specific media APIs are complex and may have limitations
- macOS AVFoundation: Use AVPlayer with AVAudioSession for media registration
- Windows Media Foundation: Use MediaSession API and PlaybackInfo for media controls
- Linux: Use Mpris (Media Player Remote Interface Specification) or libpulse
- May need additional platform-specific dependencies or native code
- Fallback to terminal multimedia key handling if platform APIs unavailable
- Reference: Platform-specific media API documentation and examples

View File

@@ -0,0 +1,63 @@
# 04. Add media key listeners to audio hook
meta:
id: audio-playback-fix-04
feature: audio-playback-fix
priority: P2
depends_on: []
tags: [implementation, integration, event-handling]
objective:
- Integrate multimedia key handling with existing audio hook
- Route multimedia key events to appropriate audio control actions
- Ensure proper cleanup of event listeners
deliverables:
- Updated `useAudio()` hook with multimedia key event handling
- Media key event listener registration in audio hook
- Integration with multimedia key detection hook
- Proper cleanup of event listeners on component unmount
steps:
- Step 1: Import multimedia key detection hook into audio hook
- Step 2: Register multimedia key event listener in audio hook
- Step 3: Map multimedia key events to audio control actions (play/pause, seek, volume)
- Step 4: Add event listener cleanup on hook dispose
- Step 5: Test event listener cleanup with multiple component instances
- Step 6: Add error handling for failed multimedia key events
- Step 7: Test multimedia key events trigger correct audio actions
tests:
- Unit:
- Test multimedia key events are captured in audio hook
- Test events are mapped to correct audio control actions
- Test event listeners are properly cleaned up
- Test multiple audio hook instances don't conflict
- Integration:
- Test multimedia keys control playback from any component
- Test multimedia keys work when player is not focused
- Test multimedia keys don't interfere with other keyboard shortcuts
- Test event listeners are removed when audio hook is disposed
acceptance_criteria:
- Multimedia key events are captured by audio hook
- Multimedia keys trigger correct audio control actions
- Event listeners are properly cleaned up on unmount
- No duplicate event listeners when components re-render
- No memory leaks from event listeners
- Error handling prevents crashes from invalid events
validation:
- Use multimedia keys and verify audio responds correctly
- Unmount and remount audio hook to test cleanup
- Check for memory leaks with browser dev tools or system monitoring
- Verify event listener count is correct after cleanup
- Test with multiple Player components to ensure no conflicts
notes:
- Audio hook is a singleton, so event listeners should be registered once
- Multimedia key detection hook should be reused to avoid duplicate listeners
- Event listener cleanup should use onCleanup from solid-js
- Reference: src/hooks/useAudio.ts for event listener patterns
- Multimedia keys may only work when terminal is focused (platform limitation)
- Consider adding platform-specific key codes for better compatibility

View File

@@ -0,0 +1,78 @@
# 05. Test multimedia controls across platforms
meta:
id: audio-playback-fix-05
feature: audio-playback-fix
priority: P1
depends_on: []
tags: [testing, integration, cross-platform]
objective:
- Comprehensive testing of volume/speed controls and multimedia key support
- Verify platform-specific media integration works correctly
- Validate all controls across different audio backends
deliverables:
- Test suite for volume/speed controls in `src/utils/audio-player.test.ts`
- Integration tests for multimedia key handling in `src/hooks/useMultimediaKeys.test.ts`
- Platform-specific integration tests in `src/utils/media-registry.test.ts`
- Test coverage report showing all features tested
steps:
- Step 1: Run existing unit tests for audio player backends
- Step 2: Add volume control tests (setVolume, volume clamp, persistence)
- Step 3: Add speed control tests (setSpeed, speed clamp, persistence)
- Step 4: Create integration test for multimedia key handling
- Step 5: Test volume/speed controls with Player component UI
- Step 6: Test multimedia keys with Player component UI
- Step 7: Test platform-specific media integration on each platform
- Step 8: Test all controls across mpv, ffplay, and afplay backends
- Step 9: Document any platform-specific limitations or workarounds
tests:
- Unit:
- Test volume control methods in all backends
- Test speed control methods in all backends
- Test volume clamp logic (0-1 range)
- Test speed clamp logic (0.25-3 range)
- Test multimedia key detection
- Test event listener cleanup
- Integration:
- Test volume control via Player component UI
- Test speed control via Player component UI
- Test multimedia keys via keyboard
- Test volume/speed persistence across pause/resume
- Test volume/speed persistence across track changes
- Cross-platform:
- Test volume/speed controls on macOS
- Test volume/speed controls on Linux
- Test volume/speed controls on Windows
- Test multimedia keys on each platform
- Test media registration on each platform
acceptance_criteria:
- All unit tests pass with >90% code coverage
- All integration tests pass
- Volume controls work correctly on all platforms
- Speed controls work correctly on all platforms
- Multimedia keys work on all platforms
- Media controls appear on all supported platforms
- All audio backends (mpv, ffplay, afplay) work correctly
- No regressions in existing audio functionality
validation:
- Run full test suite: `bun test`
- Check test coverage: `bun test --coverage`
- Manually test volume controls on each platform
- Manually test speed controls on each platform
- Manually test multimedia keys on each platform
- Verify media controls appear on each platform
- Check for any console errors or warnings
notes:
- Test suite should cover all audio backend implementations
- Integration tests should verify UI controls work correctly
- Platform-specific tests should run on actual platform if possible
- Consider using test doubles for platform-specific APIs
- Document any platform-specific issues or limitations found
- Reference: Test patterns from existing test files in src/utils/

View File

@@ -0,0 +1,26 @@
# Audio Playback Fix
Objective: Fix volume and speed controls and add multimedia key support with platform media stream integration
Status legend: [ ] todo, [~] in-progress, [x] done
Tasks
- [ ] 01 — Fix volume and speed controls in audio backends → `01-fix-volume-speed-controls.md`
- [ ] 02 — Add multimedia key detection and handling → `02-add-multimedia-key-detection.md`
- [ ] 03 — Implement platform-specific media stream integration → `03-implement-platform-media-integration.md`
- [ ] 04 — Add media key listeners to audio hook → `04-add-media-key-listeners.md`
- [ ] 05 — Test multimedia controls across platforms → `05-test-multimedia-controls.md`
Dependencies
- 01 depends on 02
- 02 depends on 03
- 03 depends on 04
- 04 depends on 05
Exit criteria
- Volume controls change playback volume in real-time
- Speed controls change playback speed in real-time
- Multimedia keys (Space, Arrow keys, Volume keys, Media keys) control playback
- Audio player appears in system media controls
- System multimedia keys trigger appropriate playback actions
- All controls work across mpv, ffplay, and afplay backends