feat: v3.0 - canvas editor, JSON-only, no Excel, new UI
Some checks failed
Build & Push Docker / build (push) Has been cancelled

- Remove all Excel code (import, export, template, pandas, openpyxl)
- New canvas-based schedule editor with drag & drop (interact.js)
- Modern 3-panel UI: sidebar, canvas, documentation tab
- New data model: Block with id/date/start/end, ProgramType with id/name/color
- Clean API: GET /api/health, POST /api/validate, GET /api/sample, POST /api/generate-pdf
- Rewritten PDF generator using ScenarioDocument directly (no DataFrame)
- Professional PDF output: dark header, colored blocks, merged cells, legend, footer
- Sample JSON: "Zimní výjezd oddílu" with 11 blocks, 3 program types
- 30 tests passing (API, core models, PDF generation)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-20 17:02:51 +01:00
parent e2bdadd0ce
commit 25fd578543
27 changed files with 2004 additions and 3016 deletions

View File

@@ -1,28 +1,10 @@
"""Core business logic for Scenar Creator."""
"""Core business logic for Scenar Creator v3."""
from .validator import (
ScenarsError,
ValidationError,
TemplateError,
validate_inputs,
validate_excel_template,
normalize_time,
)
from .timetable import create_timetable, calculate_row_height, calculate_column_width
from .excel_reader import read_excel, get_program_types, parse_inline_schedule, parse_inline_types
from .validator import ScenarsError, ValidationError
from .pdf_generator import generate_pdf
__all__ = [
"ScenarsError",
"ValidationError",
"TemplateError",
"validate_inputs",
"validate_excel_template",
"normalize_time",
"create_timetable",
"calculate_row_height",
"calculate_column_width",
"read_excel",
"get_program_types",
"parse_inline_schedule",
"parse_inline_types",
"generate_pdf",
]

View File

@@ -1,274 +0,0 @@
"""
Excel reading and form parsing logic for Scenar Creator.
Extracted from scenar/core.py — read_excel, get_program_types, parse_inline_schedule, parse_inline_types.
"""
import pandas as pd
from io import BytesIO
import logging
from .validator import (
validate_excel_template,
normalize_time,
ValidationError,
TemplateError,
DEFAULT_COLOR,
)
logger = logging.getLogger(__name__)
def read_excel(file_content: bytes, show_debug: bool = False) -> tuple:
"""
Parse Excel file and return (valid_data, error_rows).
Handles different column naming conventions:
- Old format: Datum, Zacatek, Konec, Program, Typ, Garant, Poznamka
- New template: Datum, Zacatek bloku, Konec bloku, Nazev bloku, Typ bloku, Garant, Poznamka
Returns:
tuple: (pandas.DataFrame with valid rows, list of dicts with error details)
"""
try:
excel_data = pd.read_excel(BytesIO(file_content), skiprows=0)
except Exception as e:
raise TemplateError(f"Failed to read Excel file: {str(e)}")
# Map column names from various possible names to our standard names
column_mapping = {
'Zacatek bloku': 'Zacatek',
'Konec bloku': 'Konec',
'Nazev bloku': 'Program',
'Typ bloku': 'Typ',
}
excel_data = excel_data.rename(columns=column_mapping)
# Validate template
validate_excel_template(excel_data)
if show_debug:
logger.debug(f"Raw data:\n{excel_data.head()}")
error_rows = []
valid_data = []
for index, row in excel_data.iterrows():
try:
datum = pd.to_datetime(row["Datum"], errors='coerce').date()
zacatek = normalize_time(str(row["Zacatek"]))
konec = normalize_time(str(row["Konec"]))
if pd.isna(datum) or zacatek is None or konec is None:
raise ValueError("Invalid date or time format")
valid_data.append({
"index": index,
"Datum": datum,
"Zacatek": zacatek,
"Konec": konec,
"Program": row["Program"],
"Typ": row["Typ"],
"Garant": row["Garant"],
"Poznamka": row["Poznamka"],
"row_data": row
})
except Exception as e:
error_rows.append({"index": index, "row": row, "error": str(e)})
valid_data = pd.DataFrame(valid_data)
# Early return if no valid rows
if valid_data.empty:
logger.warning("No valid rows after parsing")
return valid_data.drop(columns='index', errors='ignore'), error_rows
if show_debug:
logger.debug(f"Cleaned data:\n{valid_data.head()}")
logger.debug(f"Error rows: {error_rows}")
# Detect overlaps
overlap_errors = []
for date, group in valid_data.groupby('Datum'):
sorted_group = group.sort_values(by='Zacatek')
previous_end_time = None
for _, r in sorted_group.iterrows():
if previous_end_time and r['Zacatek'] < previous_end_time:
overlap_errors.append({
"index": r["index"],
"Datum": r["Datum"],
"Zacatek": r["Zacatek"],
"Konec": r["Konec"],
"Program": r["Program"],
"Typ": r["Typ"],
"Garant": r["Garant"],
"Poznamka": r["Poznamka"],
"Error": f"Overlapping time block with previous block ending at {previous_end_time}",
"row_data": r["row_data"]
})
previous_end_time = r['Konec']
if overlap_errors:
if show_debug:
logger.debug(f"Overlap errors: {overlap_errors}")
valid_data = valid_data[~valid_data.index.isin([e['index'] for e in overlap_errors])]
error_rows.extend(overlap_errors)
return valid_data.drop(columns='index'), error_rows
def get_program_types(form_data: dict) -> tuple:
"""
Extract program types from form data.
Form fields: type_code_{i}, desc_{i}, color_{i}
Returns:
tuple: (program_descriptions dict, program_colors dict)
"""
program_descriptions = {}
program_colors = {}
def get_value(data, key, default=''):
# Support both dict-like and cgi.FieldStorage objects
if hasattr(data, 'getvalue'):
return data.getvalue(key, default)
return data.get(key, default)
for key in list(form_data.keys()):
if key.startswith('type_code_'):
index = key.split('_')[-1]
type_code = (get_value(form_data, f'type_code_{index}', '') or '').strip()
description = (get_value(form_data, f'desc_{index}', '') or '').strip()
raw_color = (get_value(form_data, f'color_{index}', DEFAULT_COLOR) or DEFAULT_COLOR)
if not type_code:
continue
color_hex = 'FF' + str(raw_color).lstrip('#')
program_descriptions[type_code] = description
program_colors[type_code] = color_hex
return program_descriptions, program_colors
def parse_inline_schedule(form_data) -> pd.DataFrame:
"""
Parse inline schedule form data into DataFrame.
Form fields:
datum_{i}, zacatek_{i}, konec_{i}, program_{i}, typ_{i}, garant_{i}, poznamka_{i}
Args:
form_data: dict or cgi.FieldStorage with form data
Returns:
DataFrame with parsed schedule data
Raises:
ValidationError: if required fields missing or invalid
"""
rows = []
row_indices = set()
# Helper to get value from both dict and FieldStorage
def get_value(data, key, default=''):
if hasattr(data, 'getvalue'): # cgi.FieldStorage
return data.getvalue(key, default).strip()
else: # dict
return data.get(key, default).strip()
# Find all row indices
for key in form_data.keys():
if key.startswith('datum_'):
idx = key.split('_')[-1]
row_indices.add(idx)
for idx in sorted(row_indices, key=int):
datum_str = get_value(form_data, f'datum_{idx}', '')
zacatek_str = get_value(form_data, f'zacatek_{idx}', '')
konec_str = get_value(form_data, f'konec_{idx}', '')
program = get_value(form_data, f'program_{idx}', '')
typ = get_value(form_data, f'typ_{idx}', '')
garant = get_value(form_data, f'garant_{idx}', '')
poznamka = get_value(form_data, f'poznamka_{idx}', '')
# Skip empty rows
if not any([datum_str, zacatek_str, konec_str, program, typ]):
continue
# Validate required fields
if not all([datum_str, zacatek_str, konec_str, program, typ]):
raise ValidationError(
f"Řádek {int(idx)+1}: Všechna povinná pole (Datum, Začátek, Konec, Program, Typ) musí být vyplněna"
)
try:
datum = pd.to_datetime(datum_str).date()
except Exception:
raise ValidationError(f"Řádek {int(idx)+1}: Neplatné datum")
zacatek = normalize_time(zacatek_str)
konec = normalize_time(konec_str)
if zacatek is None or konec is None:
raise ValidationError(f"Řádek {int(idx)+1}: Neplatný čas (použijte HH:MM nebo HH:MM:SS)")
rows.append({
'Datum': datum,
'Zacatek': zacatek,
'Konec': konec,
'Program': program,
'Typ': typ,
'Garant': garant if garant else None,
'Poznamka': poznamka if poznamka else None,
})
if not rows:
raise ValidationError("Žádné platné řádky ve formuláři")
return pd.DataFrame(rows)
def parse_inline_types(form_data) -> tuple:
"""
Parse inline type definitions from form data.
Form fields: type_name_{i}, type_desc_{i}, type_color_{i}
Args:
form_data: dict or cgi.FieldStorage with form data
Returns:
tuple: (program_descriptions dict, program_colors dict)
"""
descriptions = {}
colors = {}
type_indices = set()
# Helper to get value from both dict and FieldStorage
def get_value(data, key, default=''):
if hasattr(data, 'getvalue'): # cgi.FieldStorage
return data.getvalue(key, default).strip()
else: # dict
return data.get(key, default).strip()
# Find all type indices
for key in form_data.keys():
if key.startswith('type_name_'):
idx = key.split('_')[-1]
type_indices.add(idx)
for idx in sorted(type_indices, key=int):
type_name = get_value(form_data, f'type_name_{idx}', '')
type_desc = get_value(form_data, f'type_desc_{idx}', '')
type_color = get_value(form_data, f'type_color_{idx}', DEFAULT_COLOR)
# Skip empty types
if not type_name:
continue
descriptions[type_name] = type_desc
colors[type_name] = 'FF' + type_color.lstrip('#')
return descriptions, colors

View File

@@ -1,17 +1,17 @@
"""
PDF generation for Scenar Creator using ReportLab.
PDF generation for Scenar Creator v3 using ReportLab.
Generates A4 landscape timetable PDF with colored blocks and legend.
"""
import pandas as pd
from io import BytesIO
from datetime import datetime
from collections import defaultdict
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4, landscape
from reportlab.lib.units import mm
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.enums import TA_CENTER, TA_LEFT
from reportlab.lib.enums import TA_CENTER, TA_LEFT, TA_RIGHT
import logging
from .validator import ScenarsError
@@ -20,175 +20,270 @@ logger = logging.getLogger(__name__)
def hex_to_reportlab_color(hex_color: str) -> colors.Color:
"""Convert hex color (AARRGGBB or #RRGGBB) to ReportLab color."""
"""Convert #RRGGBB hex color to ReportLab color."""
h = hex_color.lstrip('#')
if len(h) == 8: # AARRGGBB format
h = h[2:] # strip alpha
if len(h) == 8:
h = h[2:]
if len(h) == 6:
r, g, b = int(h[0:2], 16), int(h[2:4], 16), int(h[4:6], 16)
return colors.Color(r / 255.0, g / 255.0, b / 255.0)
return colors.white
def generate_pdf(data: pd.DataFrame, title: str, detail: str,
program_descriptions: dict, program_colors: dict) -> bytes:
def is_light_color(hex_color: str) -> bool:
"""Check if a color is light (needs dark text)."""
h = hex_color.lstrip('#')
if len(h) == 8:
h = h[2:]
if len(h) == 6:
r, g, b = int(h[0:2], 16), int(h[2:4], 16), int(h[4:6], 16)
luminance = (0.299 * r + 0.587 * g + 0.114 * b) / 255
return luminance > 0.6
return False
def time_to_minutes(time_str: str) -> int:
"""Convert HH:MM to minutes since midnight."""
parts = time_str.split(":")
return int(parts[0]) * 60 + int(parts[1])
def generate_pdf(doc) -> bytes:
"""
Generate a PDF timetable.
Generate a PDF timetable from a ScenarioDocument.
Args:
data: DataFrame with validated schedule data
title: Event title
detail: Event detail/description
program_descriptions: {type: description}
program_colors: {type: color_hex in AARRGGBB format}
doc: ScenarioDocument instance
Returns:
bytes: PDF file content
Raises:
ScenarsError: if data is invalid
"""
if data.empty:
raise ScenarsError("Data is empty after validation")
if not doc.blocks:
raise ScenarsError("No blocks provided")
missing_types = [typ for typ in data["Typ"].unique() if typ not in program_colors]
if missing_types:
raise ScenarsError(
f"Missing type definitions: {', '.join(missing_types)}. "
"Please define all program types."
)
type_map = {pt.id: pt for pt in doc.program_types}
for block in doc.blocks:
if block.type_id not in type_map:
raise ScenarsError(
f"Missing type definition: '{block.type_id}'. "
"Please define all program types."
)
buffer = BytesIO()
doc = SimpleDocTemplate(
page_w, page_h = landscape(A4)
doc_pdf = SimpleDocTemplate(
buffer,
pagesize=landscape(A4),
leftMargin=10 * mm,
rightMargin=10 * mm,
topMargin=10 * mm,
bottomMargin=10 * mm,
leftMargin=12 * mm,
rightMargin=12 * mm,
topMargin=12 * mm,
bottomMargin=12 * mm,
)
styles = getSampleStyleSheet()
title_style = ParagraphStyle(
'TimetableTitle', parent=styles['Title'],
fontSize=18, alignment=TA_CENTER, spaceAfter=2 * mm
fontSize=20, alignment=TA_LEFT, spaceAfter=1 * mm,
textColor=colors.Color(0.118, 0.161, 0.231),
fontName='Helvetica-Bold'
)
detail_style = ParagraphStyle(
'TimetableDetail', parent=styles['Normal'],
fontSize=12, alignment=TA_CENTER, spaceAfter=4 * mm,
textColor=colors.gray
subtitle_style = ParagraphStyle(
'TimetableSubtitle', parent=styles['Normal'],
fontSize=12, alignment=TA_LEFT, spaceAfter=1 * mm,
textColor=colors.Color(0.4, 0.4, 0.4),
fontName='Helvetica'
)
cell_style = ParagraphStyle(
'CellStyle', parent=styles['Normal'],
fontSize=7, alignment=TA_CENTER, leading=9
info_style = ParagraphStyle(
'InfoStyle', parent=styles['Normal'],
fontSize=10, alignment=TA_LEFT, spaceAfter=4 * mm,
textColor=colors.Color(0.5, 0.5, 0.5),
fontName='Helvetica'
)
cell_style_white = ParagraphStyle(
'CellStyleWhite', parent=styles['Normal'],
fontSize=8, alignment=TA_CENTER, leading=10,
textColor=colors.white, fontName='Helvetica-Bold'
)
cell_style_dark = ParagraphStyle(
'CellStyleDark', parent=styles['Normal'],
fontSize=8, alignment=TA_CENTER, leading=10,
textColor=colors.Color(0.1, 0.1, 0.1), fontName='Helvetica-Bold'
)
time_style = ParagraphStyle(
'TimeStyle', parent=styles['Normal'],
fontSize=7, alignment=TA_RIGHT, leading=9,
textColor=colors.Color(0.5, 0.5, 0.5), fontName='Helvetica'
)
legend_style = ParagraphStyle(
'LegendStyle', parent=styles['Normal'],
fontSize=8, alignment=TA_LEFT
fontSize=9, alignment=TA_LEFT, fontName='Helvetica'
)
footer_style = ParagraphStyle(
'FooterStyle', parent=styles['Normal'],
fontSize=8, alignment=TA_CENTER,
textColor=colors.Color(0.6, 0.6, 0.6), fontName='Helvetica-Oblique'
)
elements = []
elements.append(Paragraph(title, title_style))
elements.append(Paragraph(detail, detail_style))
data = data.sort_values(by=["Datum", "Zacatek"])
# Header
elements.append(Paragraph(doc.event.title, title_style))
if doc.event.subtitle:
elements.append(Paragraph(doc.event.subtitle, subtitle_style))
start_times = data["Zacatek"]
end_times = data["Konec"]
info_parts = []
if doc.event.date:
info_parts.append(f"Datum: {doc.event.date}")
if doc.event.location:
info_parts.append(f"M\u00edsto: {doc.event.location}")
if info_parts:
elements.append(Paragraph(" | ".join(info_parts), info_style))
min_time = min(start_times)
max_time = max(end_times)
elements.append(Spacer(1, 3 * mm))
time_slots = pd.date_range(
datetime.combine(datetime.today(), min_time),
datetime.combine(datetime.today(), max_time),
freq='15min'
).time
# Group blocks by date
blocks_by_date = defaultdict(list)
for block in doc.blocks:
blocks_by_date[block.date].append(block)
# Build header row
header = ["Datum"] + [t.strftime("%H:%M") for t in time_slots]
sorted_dates = sorted(blocks_by_date.keys())
# Find global time range
all_starts = [time_to_minutes(b.start) for b in doc.blocks]
all_ends = [time_to_minutes(b.end) for b in doc.blocks]
global_start = (min(all_starts) // 30) * 30
global_end = ((max(all_ends) + 29) // 30) * 30
# Generate 30-min time slots
time_slots = []
t = global_start
while t <= global_end:
h, m = divmod(t, 60)
time_slots.append(f"{h:02d}:{m:02d}")
t += 30
# Build table: time column + one column per day
header = [""] + [d for d in sorted_dates]
table_data = [header]
cell_colors = [] # list of (row, col, color) for styling
slot_count = len(time_slots) - 1
grouped_data = data.groupby(data['Datum'])
row_idx = 1
# Build grid and track colored cells
cell_colors_list = []
for date_val, group in grouped_data:
day_name = date_val.strftime("%A")
date_str = date_val.strftime(f"%d.%m {day_name}")
for slot_idx in range(slot_count):
slot_start = time_slots[slot_idx]
slot_end = time_slots[slot_idx + 1]
row = [Paragraph(slot_start, time_style)]
for date_key in sorted_dates:
cell_content = ""
for block in blocks_by_date[date_key]:
block_start_min = time_to_minutes(block.start)
block_end_min = time_to_minutes(block.end)
slot_start_min = time_to_minutes(slot_start)
slot_end_min = time_to_minutes(slot_end)
if block_start_min <= slot_start_min and block_end_min >= slot_end_min:
pt = type_map[block.type_id]
light = is_light_color(pt.color)
cs = cell_style_dark if light else cell_style_white
if block_start_min == slot_start_min:
label = block.title
if block.responsible:
label += f"<br/><font size='6'>{block.responsible}</font>"
cell_content = Paragraph(label, cs)
else:
cell_content = ""
cell_colors_list.append((len(table_data), len(row), pt.color))
break
row.append(cell_content if cell_content else "")
row = [date_str] + [""] * len(time_slots)
table_data.append(row)
row_idx += 1
# Create a sub-row for blocks
block_row = [""] * (len(time_slots) + 1)
for _, blk in group.iterrows():
try:
start_idx = list(time_slots).index(blk["Zacatek"]) + 1
end_idx = list(time_slots).index(blk["Konec"]) + 1
except ValueError:
continue
# Column widths
avail_width = page_w - 24 * mm
time_col_width = 18 * mm
day_col_width = (avail_width - time_col_width) / max(len(sorted_dates), 1)
col_widths = [time_col_width] + [day_col_width] * len(sorted_dates)
label = blk['Program']
if pd.notna(blk.get('Garant')):
label += f"\n{blk['Garant']}"
block_row[start_idx] = Paragraph(label.replace('\n', '<br/>'), cell_style)
rl_color = hex_to_reportlab_color(program_colors[blk["Typ"]])
for ci in range(start_idx, end_idx):
cell_colors.append((row_idx, ci, rl_color))
table_data.append(block_row)
row_idx += 1
# Calculate column widths
avail_width = landscape(A4)[0] - 20 * mm
date_col_width = 30 * mm
slot_width = max(12 * mm, (avail_width - date_col_width) / max(len(time_slots), 1))
col_widths = [date_col_width] + [slot_width] * len(time_slots)
table = Table(table_data, colWidths=col_widths, repeatRows=1)
row_height = 20
table = Table(table_data, colWidths=col_widths, rowHeights=[24] + [row_height] * slot_count)
style_cmds = [
('BACKGROUND', (0, 0), (-1, 0), colors.Color(0.83, 0.83, 0.83)),
('TEXTCOLOR', (0, 0), (-1, 0), colors.black),
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
('BACKGROUND', (0, 0), (-1, 0), colors.Color(0.118, 0.161, 0.231)),
('TEXTCOLOR', (0, 0), (-1, 0), colors.white),
('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
('FONTSIZE', (0, 0), (-1, 0), 10),
('ALIGN', (0, 0), (-1, 0), 'CENTER'),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('FONTSIZE', (0, 0), (-1, 0), 7),
('FONTSIZE', (0, 1), (-1, -1), 6),
('GRID', (0, 0), (-1, -1), 0.5, colors.black),
('ROWBACKGROUNDS', (0, 1), (-1, -1), [colors.white, colors.Color(0.97, 0.97, 0.97)]),
('ALIGN', (0, 1), (0, -1), 'RIGHT'),
('FONTSIZE', (0, 1), (-1, -1), 7),
('GRID', (0, 0), (-1, -1), 0.5, colors.Color(0.85, 0.85, 0.85)),
('LINEBELOW', (0, 0), (-1, 0), 1.5, colors.Color(0.118, 0.161, 0.231)),
('ROWBACKGROUNDS', (1, 1), (-1, -1), [colors.white, colors.Color(0.98, 0.98, 0.98)]),
]
for r, c, clr in cell_colors:
style_cmds.append(('BACKGROUND', (c, r), (c, r), clr))
for r, c, hex_clr in cell_colors_list:
rl_color = hex_to_reportlab_color(hex_clr)
style_cmds.append(('BACKGROUND', (c, r), (c, r), rl_color))
# Merge cells for blocks spanning multiple time slots
for date_idx, date_key in enumerate(sorted_dates):
col = date_idx + 1
for block in blocks_by_date[date_key]:
block_start_min = time_to_minutes(block.start)
block_end_min = time_to_minutes(block.end)
start_row = None
end_row = None
for slot_idx in range(slot_count):
slot_min = time_to_minutes(time_slots[slot_idx])
if slot_min == block_start_min:
start_row = slot_idx + 1
if slot_idx + 1 < len(time_slots):
next_slot_min = time_to_minutes(time_slots[slot_idx + 1])
if next_slot_min == block_end_min:
end_row = slot_idx + 1
if start_row is not None and end_row is not None and end_row > start_row:
style_cmds.append(('SPAN', (col, start_row), (col, end_row)))
table.setStyle(TableStyle(style_cmds))
elements.append(table)
# Legend
elements.append(Spacer(1, 5 * mm))
elements.append(Paragraph("<b>Legenda:</b>", legend_style))
legend_items = []
for pt in doc.program_types:
legend_items.append([Paragraph(f" {pt.name}", legend_style)])
legend_data = []
legend_colors_list = []
for i, (typ, desc) in enumerate(program_descriptions.items()):
legend_data.append([Paragraph(f"{desc} ({typ})", legend_style)])
legend_colors_list.append(hex_to_reportlab_color(program_colors[typ]))
if legend_data:
legend_table = Table(legend_data, colWidths=[80 * mm])
if legend_items:
elements.append(Paragraph("<b>Legenda:</b>", legend_style))
elements.append(Spacer(1, 2 * mm))
legend_table = Table(legend_items, colWidths=[60 * mm])
legend_cmds = [
('GRID', (0, 0), (-1, -1), 0.5, colors.black),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('BOX', (0, 0), (-1, -1), 0.5, colors.Color(0.85, 0.85, 0.85)),
('INNERGRID', (0, 0), (-1, -1), 0.5, colors.Color(0.85, 0.85, 0.85)),
]
for i, clr in enumerate(legend_colors_list):
legend_cmds.append(('BACKGROUND', (0, i), (0, i), clr))
for i, pt in enumerate(doc.program_types):
rl_color = hex_to_reportlab_color(pt.color)
legend_cmds.append(('BACKGROUND', (0, i), (0, i), rl_color))
if not is_light_color(pt.color):
legend_cmds.append(('TEXTCOLOR', (0, i), (0, i), colors.white))
legend_table.setStyle(TableStyle(legend_cmds))
elements.append(legend_table)
doc.build(elements)
# Footer
elements.append(Spacer(1, 5 * mm))
gen_date = datetime.now().strftime("%d.%m.%Y %H:%M")
elements.append(Paragraph(
f"Vygenerov\u00e1no Scen\u00e1r Creatorem | {gen_date}",
footer_style
))
doc_pdf.build(elements)
return buffer.getvalue()

View File

@@ -1,242 +0,0 @@
"""
Timetable generation logic for Scenar Creator.
Extracted from scenar/core.py — create_timetable (Excel output).
"""
import pandas as pd
from openpyxl import Workbook
from openpyxl.styles import Alignment, Border, Font, PatternFill, Side
from openpyxl.utils import get_column_letter
from datetime import datetime
import logging
from .validator import ScenarsError
logger = logging.getLogger(__name__)
def calculate_row_height(cell_value, column_width):
"""Calculate row height based on content."""
if not cell_value:
return 15
max_line_length = column_width * 1.2
lines = str(cell_value).split('\n')
line_count = 0
for line in lines:
line_count += len(line) // max_line_length + 1
return line_count * 15
def calculate_column_width(text):
"""Calculate column width based on text length."""
max_length = max(len(line) for line in str(text).split('\n'))
return max_length * 1.2
def create_timetable(data: pd.DataFrame, title: str, detail: str,
program_descriptions: dict, program_colors: dict) -> Workbook:
"""
Create an OpenPyXL timetable workbook.
Args:
data: DataFrame with validated schedule data
title: Event title
detail: Event detail/description
program_descriptions: {type: description}
program_colors: {type: color_hex}
Returns:
openpyxl.Workbook
Raises:
ScenarsError: if data is invalid or types are missing
"""
if data.empty:
raise ScenarsError("Data is empty after validation")
missing_types = [typ for typ in data["Typ"].unique() if typ not in program_colors]
if missing_types:
raise ScenarsError(
f"Missing type definitions: {', '.join(missing_types)}. "
"Please define all program types."
)
wb = Workbook()
ws = wb.active
thick_border = Border(left=Side(style='thick', color='000000'),
right=Side(style='thick', color='000000'),
top=Side(style='thick', color='000000'),
bottom=Side(style='thick', color='000000'))
# Title and detail
ws['A1'] = title
ws['A1'].alignment = Alignment(horizontal="center", vertical="center")
ws['A1'].font = Font(size=24, bold=True)
ws['A1'].border = thick_border
ws['A2'] = detail
ws['A2'].alignment = Alignment(horizontal="center", vertical="center")
ws['A2'].font = Font(size=16, italic=True)
ws['A2'].border = thick_border
if ws.column_dimensions[get_column_letter(1)].width is None:
ws.column_dimensions[get_column_letter(1)].width = 40
title_row_height = calculate_row_height(title, ws.column_dimensions[get_column_letter(1)].width)
detail_row_height = calculate_row_height(detail, ws.column_dimensions[get_column_letter(1)].width)
ws.row_dimensions[1].height = title_row_height
ws.row_dimensions[2].height = detail_row_height
data = data.sort_values(by=["Datum", "Zacatek"])
start_times = data["Zacatek"]
end_times = data["Konec"]
if start_times.isnull().any() or end_times.isnull().any():
raise ScenarsError("Data contains invalid time values")
try:
min_time = min(start_times)
max_time = max(end_times)
except ValueError as e:
raise ScenarsError(f"Error determining time range: {e}")
time_slots = pd.date_range(
datetime.combine(datetime.today(), min_time),
datetime.combine(datetime.today(), max_time),
freq='15min'
).time
total_columns = len(time_slots) + 1
ws.merge_cells(start_row=1, start_column=1, end_row=1, end_column=total_columns)
ws.merge_cells(start_row=2, start_column=1, end_row=2, end_column=total_columns)
row_offset = 3
col_offset = 1
cell = ws.cell(row=row_offset, column=col_offset, value="Datum")
cell.fill = PatternFill(start_color="D3D3D3", end_color="D3D3D3", fill_type="solid")
cell.alignment = Alignment(horizontal="center", vertical="center")
cell.font = Font(bold=True)
cell.border = thick_border
for i, time_slot in enumerate(time_slots, start=col_offset + 1):
cell = ws.cell(row=row_offset, column=i, value=time_slot.strftime("%H:%M"))
cell.fill = PatternFill(start_color="D3D3D3", end_color="D3D3D3", fill_type="solid")
cell.alignment = Alignment(horizontal="center", vertical="center")
cell.font = Font(bold=True)
cell.border = thick_border
current_row = row_offset + 1
grouped_data = data.groupby(data['Datum'])
for date, group in grouped_data:
day_name = date.strftime("%A")
date_str = date.strftime(f"%d.%m {day_name}")
cell = ws.cell(row=current_row, column=col_offset, value=date_str)
cell.alignment = Alignment(horizontal="center", vertical="center")
cell.fill = PatternFill(start_color="D3D3D3", end_color="D3D3D3", fill_type="solid")
cell.font = Font(bold=True, size=14)
cell.border = thick_border
# Track which cells are already filled (for overlap detection)
date_row = current_row
occupied_cells = set() # (row, col) pairs already filled
for _, row in group.iterrows():
start_time = row["Zacatek"]
end_time = row["Konec"]
try:
start_index = list(time_slots).index(start_time) + col_offset + 1
end_index = list(time_slots).index(end_time) + col_offset + 1
except ValueError as e:
logger.error(f"Time slot not found: {start_time} to {end_time}")
continue
cell_value = f"{row['Program']}"
if pd.notna(row['Garant']):
cell_value += f"\n{row['Garant']}"
if pd.notna(row['Poznamka']):
cell_value += f"\n\n{row['Poznamka']}"
# Check for overlaps
working_row = date_row + 1
conflict = False
for col in range(start_index, end_index):
if (working_row, col) in occupied_cells:
conflict = True
break
# If conflict, find next available row
if conflict:
while any((working_row, col) in occupied_cells for col in range(start_index, end_index)):
working_row += 1
# Mark cells as occupied
for col in range(start_index, end_index):
occupied_cells.add((working_row, col))
try:
ws.merge_cells(start_row=working_row, start_column=start_index,
end_row=working_row, end_column=end_index - 1)
# Get the first cell of the merge (not the merged cell)
cell = ws.cell(row=working_row, column=start_index)
cell.value = cell_value
except Exception as e:
raise ScenarsError(f"Error creating timetable cell: {str(e)}")
cell.alignment = Alignment(wrap_text=True, horizontal="center", vertical="center")
lines = str(cell_value).split("\n")
for idx, _ in enumerate(lines):
if idx == 0:
cell.font = Font(bold=True)
elif idx == 1:
cell.font = Font(bold=False)
elif idx > 1 and pd.notna(row['Poznamka']):
cell.font = Font(italic=True)
cell.fill = PatternFill(start_color=program_colors[row["Typ"]],
end_color=program_colors[row["Typ"]],
fill_type="solid")
cell.border = thick_border
# Update current_row to be after all rows for this date
if occupied_cells:
max_row_for_date = max(r for r, c in occupied_cells)
current_row = max_row_for_date + 1
else:
current_row += 1
# Legend
legend_row = current_row + 2
legend_max_length = 0
ws.cell(row=legend_row, column=1, value="Legenda:").font = Font(bold=True)
legend_row += 1
for typ, desc in program_descriptions.items():
legend_text = f"{desc} ({typ})"
legend_cell = ws.cell(row=legend_row, column=1, value=legend_text)
legend_cell.fill = PatternFill(start_color=program_colors[typ], fill_type="solid")
legend_max_length = max(legend_max_length, calculate_column_width(legend_text))
legend_row += 1
ws.column_dimensions[get_column_letter(1)].width = legend_max_length
for col in range(2, total_columns + 1):
ws.column_dimensions[get_column_letter(col)].width = 15
for row in ws.iter_rows(min_row=1, max_row=current_row - 1, min_col=1, max_col=total_columns):
for cell in row:
cell.alignment = Alignment(wrap_text=True, horizontal="center", vertical="center")
cell.border = thick_border
for row in ws.iter_rows(min_row=1, max_row=current_row - 1):
max_height = 0
for cell in row:
if cell.value:
height = calculate_row_height(cell.value, ws.column_dimensions[get_column_letter(cell.column)].width)
if height > max_height:
max_height = height
ws.row_dimensions[row[0].row].height = max_height
return wb

View File

@@ -1,18 +1,9 @@
"""
Validation logic for Scenar Creator.
Extracted from scenar/core.py — validate_inputs, validate_excel_template, overlap detection.
"""
"""Validation logic for Scenar Creator v3."""
import pandas as pd
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
DEFAULT_COLOR = "#ffffff"
MAX_FILE_SIZE_MB = 10
REQUIRED_COLUMNS = ["Datum", "Zacatek", "Konec", "Program", "Typ", "Garant", "Poznamka"]
class ScenarsError(Exception):
"""Base exception for Scenar Creator."""
@@ -22,48 +13,3 @@ class ScenarsError(Exception):
class ValidationError(ScenarsError):
"""Raised when input validation fails."""
pass
class TemplateError(ScenarsError):
"""Raised when Excel template is invalid."""
pass
def validate_inputs(title: str, detail: str, file_size: int) -> None:
"""Validate user inputs for security and sanity."""
if not title or not isinstance(title, str):
raise ValidationError("Title is required and must be a string")
if len(title.strip()) == 0:
raise ValidationError("Title cannot be empty")
if len(title) > 200:
raise ValidationError("Title is too long (max 200 characters)")
if not detail or not isinstance(detail, str):
raise ValidationError("Detail is required and must be a string")
if len(detail.strip()) == 0:
raise ValidationError("Detail cannot be empty")
if len(detail) > 500:
raise ValidationError("Detail is too long (max 500 characters)")
if file_size > MAX_FILE_SIZE_MB * 1024 * 1024:
raise ValidationError(f"File size exceeds {MAX_FILE_SIZE_MB} MB limit")
def normalize_time(time_str: str):
"""Parse time string in formats %H:%M or %H:%M:%S."""
for fmt in ('%H:%M', '%H:%M:%S'):
try:
return datetime.strptime(time_str, fmt).time()
except ValueError:
continue
return None
def validate_excel_template(df: pd.DataFrame) -> None:
"""Validate that Excel has required columns."""
missing_cols = set(REQUIRED_COLUMNS) - set(df.columns)
if missing_cols:
raise TemplateError(
f"Excel template missing required columns: {', '.join(missing_cols)}. "
f"Expected: {', '.join(REQUIRED_COLUMNS)}"
)