diff --git a/badges_app/Dockerfile b/badges_app/Dockerfile
new file mode 100644
index 0000000..a1307be
--- /dev/null
+++ b/badges_app/Dockerfile
@@ -0,0 +1,23 @@
+FROM ubuntu:18.04
+
+RUN apt update && \
+ apt install -y wget bash python3.6-venv python3.6-dev python3-pip build-essential inkscape unzip librsvg2-bin poppler-utils
+
+ENV LANG=C.UTF-8
+ENV LANGUAGE=C.UTF-8
+ENV LC_ALL=C.UTF-8
+
+WORKDIR /app
+
+COPY requirements.txt .
+RUN pip3 install --upgrade pip && pip install -r requirements.txt
+
+COPY . .
+
+RUN wget --header 'Host: dl.dafont.com' --header 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' --header 'Accept-Language: en-US,en;q=0.5' --referer 'https://www.dafont.com/sansation.font' --header 'Upgrade-Insecure-Requests: 1' 'https://dl.dafont.com/dl/?f=sansation' --output-document 'sansation.zip' && \
+ unzip sansation.zip -d /usr/local/share/fonts && \
+ fc-cache -f -v
+
+EXPOSE 8000
+
+CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
diff --git a/badges_app/README.md b/badges_app/README.md
new file mode 100644
index 0000000..afad6fe
--- /dev/null
+++ b/badges_app/README.md
@@ -0,0 +1,33 @@
+# Conference Badge Generator WebApp
+
+A web application for generating conference badges from SVG templates and participant CSV data.
+
+## Features
+
+- Upload multiple SVG badge templates
+- Upload multiple CSV files with attendee data
+- Two output options:
+ - Separate PDF files for each badge
+ - Single merged PDF with 4 badges per page (with customizable dimensions)
+
+## 🚀 Quick Start with Docker
+
+1. Navigate to the project directory:
+
+```bash
+cd badges_app
+```
+
+2. Build the Docker image:
+
+```bash
+docker build -t badge-generator .
+```
+
+3. Run the container:
+
+```bash
+docker run -d -p 8000:8000 --name badge-app badge-generator
+```
+
+4. Open the app in your browser: http://localhost:8000
diff --git a/badges_app/__init__.py b/badges_app/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/badges_app/analyze_csv_and_svg.py b/badges_app/analyze_csv_and_svg.py
new file mode 100644
index 0000000..e0119cd
--- /dev/null
+++ b/badges_app/analyze_csv_and_svg.py
@@ -0,0 +1,225 @@
+import argparse
+from difflib import SequenceMatcher
+import base64
+import io
+from fastapi import UploadFile
+import xml.etree.ElementTree as ET
+import re
+from typing import List
+from pdf2image import convert_from_bytes
+import os
+import shutil
+import csv
+
+from generate_badges import create_badges
+
+# Known naming variants for each target field
+KNOWN_VARIANTS = {
+ 'First Name': ['firstname', 'fname', 'givenname'],
+ 'Last Name': ['lastname', 'surname', 'lname', 'familyname'],
+ 'Company Name': ['companyname', 'company name', 'company', 'organization', 'employer'],
+}
+
+
+def normalize(name: str) -> str:
+ """Normalize column name: remove special characters and lowercase it."""
+ return re.sub(r'[\s_\-\.]', '', name).lower()
+
+
+def best_match(variants, normalized_columns, used_columns):
+ """Return the best match from normalized_columns for the given variants."""
+ best_score = 0
+ best_match_column = None
+
+ for variant in variants:
+ for column in normalized_columns:
+ if column in used_columns:
+ continue
+ score = SequenceMatcher(None, variant, column).ratio()
+ if score > best_score:
+ best_score = score
+ best_match_column = column
+
+ return best_match_column if best_score >= 0.6 else None
+
+
+def find_matching_columns(target, normalized_columns, used_columns, results):
+ """Find best matching columns from a CSV file or a list of columns for the given target fields."""
+ variants = KNOWN_VARIANTS.get(target, [target])
+ match_column = best_match(variants, normalized_columns, used_columns)
+
+ if match_column:
+ matched_column = normalized_columns[match_column]
+ used_columns.add(match_column)
+
+ results[target] = {
+ 'csv_column': matched_column,
+ 'samples': []
+ }
+ return matched_column
+ return ""
+
+
+def find_matching_columns_from_list(columns: list, target_fields: list) -> dict:
+ """Find best matching columns from a columns list for the given target fields."""
+ normalized_columns = {normalize(c): c for c in columns}
+ results = {}
+ used_columns = set()
+
+ for target in target_fields:
+ matched_column = find_matching_columns(target, normalized_columns, used_columns, results)
+ if not matched_column:
+ results[target] = None
+
+ return results
+
+
+def find_matching_columns_from_csv(csv_file: str, target_fields: list) -> dict:
+ """Find best matching columns from a CSV file for the given target fields."""
+ with open(csv_file, mode='r', encoding='utf-8') as file:
+ reader = csv.reader(file)
+ columns = next(reader)
+
+ normalized_columns = {normalize(c): c for c in columns}
+ results = {}
+ used_columns = set()
+
+ for target in target_fields:
+ matched_column = find_matching_columns(target, normalized_columns, used_columns, results)
+ if matched_column:
+ # Collect up to 3 non-empty sample values
+ file.seek(0)
+ next(reader) # Skip header again
+ sample_count = 0
+ for row in reader:
+ idx = columns.index(matched_column)
+ if idx >= len(row):
+ continue
+ value = row[idx].strip()
+ if value:
+ results[target]['samples'].append(value)
+ sample_count += 1
+ if sample_count >= 3:
+ break
+ else:
+ results[target] = None # No match found
+
+ return results
+
+
+def print_analysis(results: dict):
+ """Print the matching results in a readable format."""
+ print("\nCSV Column Matching Results")
+ print("=" * 50)
+
+ for target, data in results.items():
+ print(f"\nTarget field: '{target}'")
+ if data:
+ print(f" Matched column: '{data['csv_column']}'")
+ if data['samples']:
+ print(f" Sample values: {', '.join(data['samples'])}")
+ else:
+ print(" (No sample values found)")
+ else:
+ print(" No matching column found.")
+
+ print("\n" + "=" * 50)
+
+
+def list_of_strings(arg):
+ return arg.split(',')
+
+
+def svg_to_image(tmp_preview_dir, role, template_filename: str, template_vars) -> str:
+ create_badges(template_filename, os.path.join(tmp_preview_dir, "preview_data.csv"), tmp_preview_dir, template_vars)
+
+ with open(os.path.join(tmp_preview_dir, f"{role}_0.pdf"), "rb") as f:
+ images = convert_from_bytes(f.read(), first_page=1, last_page=1)
+
+ buffered = io.BytesIO()
+ images[0].save(buffered, format="JPEG", quality=85)
+ return base64.b64encode(buffered.getvalue()).decode()
+
+
+def prepare_preview_data(tmp_preview_dir, template_vars):
+ preview_data = {
+ "First Name": "Klaus",
+ "Last Name": "Templatemann",
+ "Company Name": "Badgeify"
+ }
+ matches = find_matching_columns_from_list(template_vars, preview_data.keys())
+
+ column_value_map = {}
+ for target_key, data in matches.items():
+ if data:
+ column_name = data["csv_column"]
+ column_value_map[column_name] = preview_data.get(target_key, "")
+
+ row = [column_value_map.get(col, "") for col in template_vars]
+
+ with open(os.path.join(tmp_preview_dir, "preview_data.csv"), "w", newline="", encoding="utf-8") as f:
+ writer = csv.writer(f)
+ writer.writerow(template_vars or [","])
+ writer.writerow(row or [","])
+
+
+def analyze_svg_templates(svg_files: List[UploadFile], templates_dir: str):
+ tmp_preview_dir = "tmp_preview"
+ os.makedirs(tmp_preview_dir, exist_ok=True)
+ pattern = re.compile(r"\{\{([^}]+)\}\}")
+ results = {}
+ previews = {}
+
+ try:
+ for svg_file in svg_files:
+ template_filename = os.path.join(templates_dir, svg_file.filename)
+ with open(os.path.join(templates_dir, svg_file.filename), "rb") as f:
+ try:
+ root = ET.fromstring(f.read())
+ template_vars = set()
+ for elem in root.iter():
+ if elem.text and pattern.search(elem.text):
+ template_vars.update(pattern.findall(elem.text))
+ for attr in elem.attrib.values():
+ if pattern.search(attr):
+ template_vars.update(pattern.findall(attr))
+
+ results[svg_file.filename] = sorted(template_vars)
+ prepare_preview_data(tmp_preview_dir, template_vars)
+ previews[svg_file.filename] = svg_to_image(tmp_preview_dir, svg_file.filename.split('.')[0], template_filename, template_vars)
+ except ET.ParseError as e:
+ print(f"Error parsing SVG {svg_file.filename}: {e}")
+ results[svg_file.filename] = ["Invalid SVG file"]
+ previews[svg_file.filename] = None
+
+ return results, previews
+ finally:
+ shutil.rmtree(tmp_preview_dir, ignore_errors=True)
+
+
+if __name__ == "__main__":
+ TARGET_FIELDS = ['First Name', 'Last Name', 'Company Name']
+
+ parser = argparse.ArgumentParser(
+ description="Find matching columns in a CSV file."
+ )
+ parser.add_argument(
+ "--csv_file",
+ help="Path to the CSV file to analyze",
+ type=str,
+ default=None
+ )
+ parser.add_argument(
+ "--columns",
+ help="List of columns to analyze",
+ type=list_of_strings,
+ default=None
+ )
+ args = parser.parse_args()
+
+ matches = {}
+ if args.csv_file:
+ matches = find_matching_columns_from_csv(args.csv_file, TARGET_FIELDS)
+ elif args.columns:
+ matches = find_matching_columns_from_list(args.columns, TARGET_FIELDS)
+ print_analysis(matches)
diff --git a/badges_app/generate_badges.py b/badges_app/generate_badges.py
new file mode 100644
index 0000000..182b509
--- /dev/null
+++ b/badges_app/generate_badges.py
@@ -0,0 +1,52 @@
+import csv
+import json
+import os
+import shutil
+import subprocess
+
+
+class MissingCSVFieldsException(Exception):
+ def __init__(self, svg_filename, csv_filename, missing_fields):
+ message = f"""
+ We’re unable to proceed with badge generation.
+ Details:
+ The following fields are used in the badge template {os.path.basename(svg_filename)} but they are missing in the uploaded CSV file {os.path.basename(csv_filename)}:
+
+ {missing_fields}
+
+
+ Please check your CSV file and ensure all required fields are present
+ """
+ super().__init__(message)
+
+
+def check_svg_and_csv_consistency(svg_file, csv_file, svg_analysis):
+ with open(csv_file, mode='r', encoding='utf-8') as file:
+ reader = csv.reader(file)
+ columns = next(reader)
+
+ missing_fields = [field for field in svg_analysis if field not in columns]
+ if missing_fields:
+ raise MissingCSVFieldsException(svg_file, csv_file, missing_fields)
+
+
+def create_badges(template_file, input_file, output_dir, svg_analysis):
+ check_svg_and_csv_consistency(template_file, input_file, svg_analysis)
+
+ cmd = f'docstamp create -i {input_file} -t {template_file} -d pdf -o {output_dir} --index ""'
+ print('Calling {}'.format(cmd))
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+ _stdout, stderr = process.communicate()
+ if stderr and b'Failed to get connection' not in stderr:
+ raise Exception(stderr.decode())
+
+
+def create_all_badges(roles, templates_dir, data_dir, output_dir, svg_analysis_results):
+ if os.path.exists(output_dir):
+ shutil.rmtree(output_dir)
+ os.makedirs(output_dir)
+ for role in roles:
+ create_badges(
+ os.path.join(templates_dir, f"{role}.svg"), os.path.join(data_dir, f"{role}.csv"), output_dir,
+ json.loads(svg_analysis_results)[f"{role}.svg"]
+ )
diff --git a/badges_app/main.py b/badges_app/main.py
new file mode 100644
index 0000000..9bf9a3a
--- /dev/null
+++ b/badges_app/main.py
@@ -0,0 +1,200 @@
+import io
+import os
+import shutil
+import zipfile
+from typing import List
+
+from fastapi import FastAPI, File, Form, Request, UploadFile
+from fastapi.responses import HTMLResponse, FileResponse
+from fastapi.staticfiles import StaticFiles
+from fastapi.templating import Jinja2Templates
+
+from generate_badges import create_all_badges, MissingCSVFieldsException
+from merge_badges_into_pdf import BadgeMerger
+from analyze_csv_and_svg import analyze_svg_templates
+
+
+app = FastAPI(title="Badge Generator")
+app.mount("/static", StaticFiles(directory="static"), name="static")
+TEMPLATES = Jinja2Templates(directory="templates")
+ERROR_MESSAGE = (
+ "We’re unable to proceed with badge generation.
+
+{% endblock %}
\ No newline at end of file
diff --git a/docstamp/cli/cli.py b/docstamp/cli/cli.py
index 7c6eee9..db9154c 100644
--- a/docstamp/cli/cli.py
+++ b/docstamp/cli/cli.py
@@ -50,7 +50,7 @@ def cli():
default='inkscape', show_default=True,
help='The rendering command to be used in case file name '
'extension is not specific.')
-@click.option('--index', default=[],
+@click.option('--index', type=int, multiple=True,
help='Index/es of the CSV file that you want to create the '
'document from. Note that the samples numbers start from 0 '
'and the empty ones do not count.')
@@ -98,7 +98,7 @@ def create(input, template, field, outdir, prefix, otype, command, index,
# filter the items if index
if index:
- myitems = {int(idx): items[int(idx)] for idx in index}
+ myitems = {idx: items[idx] for idx in index}
items = myitems
log.debug('Using the elements with index {} of the input '
'file.'.format(index))