current org
This commit is contained in:
34
tasks/FRE-28.yaml
Normal file
34
tasks/FRE-28.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
date: 2026-03-08
|
||||
day_of_week: Sunday
|
||||
task_id: FRE-28
|
||||
title: Optimize Generation Speed for Long Books
|
||||
status: todo
|
||||
company_id: FrenoCorp
|
||||
objective: Reduce generation time for books with many segments
|
||||
context: |
|
||||
- Current generation is sequential and slow
|
||||
- Can optimize model inference and post-processing
|
||||
- Batch processing improvements needed
|
||||
issue_type: enhancement
|
||||
priority: medium
|
||||
assignee: Atlas
|
||||
parent_task: FRE-32
|
||||
goal_id: MVP_Pipeline_Working
|
||||
blocking_tasks: []
|
||||
expected_outcome: |
|
||||
- Generation time under 2x real-time for 1.7B model
|
||||
- Efficient memory usage during long runs
|
||||
- Configurable quality/speed tradeoffs
|
||||
acceptance_criteria:
|
||||
- Benchmark shows <2x real-time generation
|
||||
- Memory stays stable during long books
|
||||
- Speed/quality options available
|
||||
|
||||
notes:
|
||||
- Profile generation pipeline to find bottlenecks
|
||||
- Consider model quantization for speed
|
||||
|
||||
links:
|
||||
tts_model: /home/mike/code/AudiobookPipeline/src/generation/tts_model.py
|
||||
---
|
||||
Reference in New Issue
Block a user