Created
January 11, 2026 07:10
-
-
Save ripwu/5bc6723af9215235afed7666bec2fac8 to your computer and use it in GitHub Desktop.
Convert Obsidian WikiLinks to standard Markdown links for better compatibility.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env python3 | |
| """ | |
| Convert Obsidian WikiLinks to standard Markdown links for better compatibility. | |
| Problem: | |
| - Obsidian WikiLink syntax (![[...]] and [[...]]) is not supported by GitHub Markdown. | |
| - Raw link destinations with spaces or unbalanced parentheses can break parsing. | |
| Root cause: | |
| - WikiLinks are Obsidian-specific extensions outside CommonMark. | |
| - Markdown link destinations require escaping for certain characters. | |
| Solution: | |
| - Protect fenced code blocks, inline code, and existing Markdown links. | |
| - Resolve WikiLink targets against the vault and convert to standard Markdown. | |
| - Encode only characters that would break parsing (whitespace, control chars, | |
| unbalanced parentheses, #). | |
| Conversion rules: | |
| 1. Image: ![[path/to/image.png]] ->  | |
| 2. Image with alias: ![[path/to/image.png|alt text]] ->  | |
| 3. Link: [[url]] -> [url](url) | |
| 4. Link with alias: [[url|display text]] -> [display text](url) | |
| 5. Note link: [[Note]] -> [Note](relative/path/Note.md) | |
| 6. Image size: ![[img.png|100x200]] -> <img src="img.png" width="100" height="200" alt=""> | |
| Usage: | |
| python convert_wikilinks.py # preview changes (default) | |
| python convert_wikilinks.py --execute # apply changes | |
| """ | |
| import os | |
| import re | |
| import argparse | |
| from collections import defaultdict | |
| from pathlib import Path | |
| from typing import Optional, Tuple | |
| from urllib.parse import quote, urlsplit, urlunsplit | |
| def scan_files(base_path: Path) -> tuple[list[Path], list[Path]]: | |
| """Scan the directory and return (md_files, all_files).""" | |
| md_files = [] | |
| all_files = [] | |
| for root, dirs, files in os.walk(base_path): | |
| # Skip hidden directories like .obsidian. | |
| dirs[:] = [d for d in dirs if not d.startswith('.')] | |
| for file in files: | |
| path = Path(root) / file | |
| all_files.append(path) | |
| if file.endswith('.md'): | |
| md_files.append(path) | |
| return md_files, all_files | |
| def normalize_path(raw_path: str) -> str: | |
| """Normalize path separators and trim whitespace.""" | |
| return raw_path.strip().replace('\\', '/') | |
| def is_probable_url(target: str) -> bool: | |
| """Detect whether target looks like a URL (exclude Windows drive letters).""" | |
| if not target: | |
| return False | |
| match = re.match(r'^([a-zA-Z][a-zA-Z0-9+.-]*):', target) | |
| if not match: | |
| return False | |
| scheme = match.group(1) | |
| return not (len(scheme) == 1 and target[1:2] == ':') | |
| def percent_encode_char(ch: str) -> str: | |
| """Percent-encode a single character (UTF-8).""" | |
| return ''.join(f'%{byte:02X}' for byte in ch.encode('utf-8')) | |
| def find_unbalanced_parentheses(text: str) -> set[int]: | |
| """Return indices of unbalanced parentheses to avoid Markdown parsing errors.""" | |
| stack = [] | |
| unbalanced = set() | |
| for idx, ch in enumerate(text): | |
| if ch == '(': | |
| stack.append(idx) | |
| elif ch == ')': | |
| if stack: | |
| stack.pop() | |
| else: | |
| unbalanced.add(idx) | |
| unbalanced.update(stack) | |
| return unbalanced | |
| def encode_component_minimal(text: str) -> str: | |
| """Percent-encode only when necessary and keep original non-ASCII characters.""" | |
| if not text: | |
| return text | |
| unbalanced = find_unbalanced_parentheses(text) | |
| output = [] | |
| for idx, ch in enumerate(text): | |
| if ch.isspace() or ord(ch) < 0x20 or ord(ch) == 0x7F or ch == '#' or idx in unbalanced: | |
| output.append(percent_encode_char(ch)) | |
| else: | |
| output.append(ch) | |
| return ''.join(output) | |
| def encode_url(raw_url: str) -> str: | |
| """Encode URLs/paths only when needed to keep Markdown parsing safe.""" | |
| if not raw_url: | |
| return raw_url | |
| raw_url = normalize_path(raw_url) | |
| if is_probable_url(raw_url): | |
| parsed = urlsplit(raw_url) | |
| encoded_path = encode_component_minimal(parsed.path) | |
| encoded_query = encode_component_minimal(parsed.query) | |
| encoded_fragment = encode_component_minimal(parsed.fragment) | |
| return urlunsplit(( | |
| parsed.scheme, | |
| parsed.netloc, | |
| encoded_path, | |
| encoded_query, | |
| encoded_fragment | |
| )) | |
| if '?' in raw_url: | |
| path, query = raw_url.split('?', 1) | |
| else: | |
| path, query = raw_url, '' | |
| encoded_path = encode_component_minimal(path) | |
| if query: | |
| encoded_query = encode_component_minimal(query) | |
| return f'{encoded_path}?{encoded_query}' | |
| return encoded_path | |
| def encode_fragment(fragment: str) -> str: | |
| """Encode a URL fragment (anchor).""" | |
| return quote(fragment, safe='-._~%') | |
| def is_within_base(candidate: Path, base_path: Path) -> bool: | |
| """Check whether a path is within base_path.""" | |
| candidate_abs = os.path.abspath(candidate) | |
| base_abs = os.path.abspath(base_path) | |
| return os.path.commonpath([candidate_abs, base_abs]) == base_abs | |
| def build_md_index(md_files: list[Path], base_path: Path) -> dict: | |
| """Build an index for Markdown files to resolve [[Note]] links.""" | |
| base_abs = Path(os.path.abspath(base_path)) | |
| index = { | |
| 'base_abs': base_abs, | |
| 'all_paths': set(), | |
| 'by_stem': defaultdict(list), | |
| 'by_stem_lower': defaultdict(list), | |
| 'by_rel': {}, | |
| 'by_rel_no_ext': {}, | |
| 'by_rel_lower': defaultdict(list), | |
| 'by_rel_no_ext_lower': defaultdict(list), | |
| } | |
| for md_path in md_files: | |
| abs_path = Path(os.path.abspath(md_path)) | |
| index['all_paths'].add(abs_path) | |
| rel = abs_path.relative_to(base_abs) | |
| rel_posix = rel.as_posix() | |
| rel_no_ext = rel.with_suffix('').as_posix() | |
| index['by_rel'][rel_posix] = abs_path | |
| index['by_rel_no_ext'][rel_no_ext] = abs_path | |
| index['by_rel_lower'][rel_posix.lower()].append(abs_path) | |
| index['by_rel_no_ext_lower'][rel_no_ext.lower()].append(abs_path) | |
| stem = abs_path.stem | |
| index['by_stem'][stem].append(abs_path) | |
| index['by_stem_lower'][stem.lower()].append(abs_path) | |
| return index | |
| def build_file_index(all_files: list[Path], base_path: Path) -> dict: | |
| """Build an index for all files (attachments / non-md assets).""" | |
| base_abs = Path(os.path.abspath(base_path)) | |
| index = { | |
| 'base_abs': base_abs, | |
| 'all_paths': set(), | |
| 'by_rel': {}, | |
| 'by_rel_lower': defaultdict(list), | |
| 'by_name': defaultdict(list), | |
| 'by_name_lower': defaultdict(list), | |
| } | |
| for file_path in all_files: | |
| abs_path = Path(os.path.abspath(file_path)) | |
| index['all_paths'].add(abs_path) | |
| rel = abs_path.relative_to(base_abs) | |
| rel_posix = rel.as_posix() | |
| index['by_rel'][rel_posix] = abs_path | |
| index['by_rel_lower'][rel_posix.lower()].append(abs_path) | |
| name = abs_path.name | |
| index['by_name'][name].append(abs_path) | |
| index['by_name_lower'][name.lower()].append(abs_path) | |
| return index | |
| def select_best_candidate(candidates: list[Path], current_dir: Path) -> Optional[Path]: | |
| """Select the candidate closest to the current file.""" | |
| if not candidates: | |
| return None | |
| current_dir_abs = Path(os.path.abspath(current_dir)) | |
| same_dir = [c for c in candidates if c.parent == current_dir_abs] | |
| if same_dir: | |
| return sorted(same_dir, key=lambda p: str(p))[0] | |
| def score(path: Path) -> tuple[int, int, int]: | |
| rel = Path(os.path.relpath(path, current_dir_abs)) | |
| parts = rel.parts | |
| ups = sum(1 for part in parts if part == '..') | |
| return (ups, len(parts), len(str(path))) | |
| return sorted(candidates, key=score)[0] | |
| def resolve_exact_or_case_insensitive(candidate: Path, index: dict) -> Optional[Path]: | |
| """Prefer exact match, otherwise fall back to case-insensitive matching.""" | |
| if candidate in index['all_paths']: | |
| return candidate | |
| base_abs = index['base_abs'] | |
| try: | |
| rel = candidate.relative_to(base_abs) | |
| except ValueError: | |
| return None | |
| rel_posix = rel.as_posix() | |
| candidates = index['by_rel_lower'].get(rel_posix.lower(), []) | |
| if candidates: | |
| return sorted(candidates, key=lambda p: str(p))[0] | |
| return None | |
| def resolve_relative_path(target: str, current_dir: Path, index: dict, | |
| allow_extension_fallback: bool = False) -> Optional[Path]: | |
| """Resolve a relative path (./ or ../).""" | |
| base_abs = index['base_abs'] | |
| candidate = Path(os.path.abspath(current_dir / Path(target))) | |
| if not is_within_base(candidate, base_abs): | |
| return None | |
| resolved = resolve_exact_or_case_insensitive(candidate, index) | |
| if resolved: | |
| return resolved | |
| if allow_extension_fallback and candidate.suffix.lower() != '.md': | |
| if candidate.suffix == '': | |
| candidate_md = candidate.with_suffix('.md') | |
| else: | |
| candidate_md = candidate.with_name(f'{candidate.name}.md') | |
| resolved = resolve_exact_or_case_insensitive(candidate_md, index) | |
| if resolved: | |
| return resolved | |
| return None | |
| def resolve_vault_relative_path(target: str, base_path: Path, index: dict, | |
| allow_extension_fallback: bool = False) -> Optional[Path]: | |
| """Resolve a vault-root-relative path.""" | |
| base_abs = Path(os.path.abspath(base_path)) | |
| rel_target = target.lstrip('/') | |
| candidate = Path(os.path.abspath(base_abs / Path(rel_target))) | |
| if not is_within_base(candidate, base_abs): | |
| return None | |
| resolved = resolve_exact_or_case_insensitive(candidate, index) | |
| if resolved: | |
| return resolved | |
| if allow_extension_fallback and candidate.suffix.lower() != '.md': | |
| if candidate.suffix == '': | |
| candidate_md = candidate.with_suffix('.md') | |
| else: | |
| candidate_md = candidate.with_name(f'{candidate.name}.md') | |
| resolved = resolve_exact_or_case_insensitive(candidate_md, index) | |
| if resolved: | |
| return resolved | |
| return None | |
| def resolve_by_stem(name: str, current_dir: Path, md_index: dict) -> Optional[Path]: | |
| """Resolve a Markdown file by stem (no extension).""" | |
| candidates = md_index['by_stem'].get(name, []) | |
| if not candidates: | |
| candidates = md_index['by_stem_lower'].get(name.lower(), []) | |
| return select_best_candidate(candidates, current_dir) | |
| def resolve_by_filename(name: str, current_dir: Path, file_index: dict) -> Optional[Path]: | |
| """Resolve an attachment by filename (with extension).""" | |
| candidates = file_index['by_name'].get(name, []) | |
| if not candidates: | |
| candidates = file_index['by_name_lower'].get(name.lower(), []) | |
| return select_best_candidate(candidates, current_dir) | |
| def make_relative_link(target_abs: Path, md_path: Path) -> str: | |
| """Generate a path relative to the current Markdown file.""" | |
| rel = os.path.relpath(target_abs, start=md_path.parent) | |
| return Path(rel).as_posix() | |
| def split_anchor(target: str) -> tuple[str, str]: | |
| """Split an anchor, supporting # or ^.""" | |
| anchor_idx = None | |
| for char in ('#', '^'): | |
| idx = target.find(char) | |
| if idx != -1 and (anchor_idx is None or idx < anchor_idx): | |
| anchor_idx = idx | |
| if anchor_idx is None: | |
| return target, '' | |
| return target[:anchor_idx], target[anchor_idx + 1:] | |
| def parse_image_size(alt_text: str) -> Optional[Tuple[Optional[int], Optional[int]]]: | |
| """Parse Obsidian image size syntax like 100 or 100x200.""" | |
| if not alt_text: | |
| return None | |
| alt_text = alt_text.strip() | |
| if re.fullmatch(r'\d+', alt_text): | |
| return int(alt_text), None | |
| match = re.fullmatch(r'(\d+)\s*[xX]\s*(\d+)', alt_text) | |
| if match: | |
| return int(match.group(1)), int(match.group(2)) | |
| return None | |
| def protect_fenced_code_blocks(content: str) -> tuple[str, list[str]]: | |
| """Protect fenced code blocks from WikiLink replacements.""" | |
| protected = [] | |
| output = [] | |
| in_fence = False | |
| fence_char = '' | |
| fence_len = 0 | |
| buffer = [] | |
| for line in content.splitlines(keepends=True): | |
| if not in_fence: | |
| match = re.match(r'^\s*(`{3,}|~{3,})', line) | |
| if match: | |
| in_fence = True | |
| fence_seq = match.group(1) | |
| fence_char = fence_seq[0] | |
| fence_len = len(fence_seq) | |
| buffer = [line] | |
| else: | |
| output.append(line) | |
| else: | |
| buffer.append(line) | |
| match = re.match(r'^\s*(`{3,}|~{3,})', line) | |
| if match and match.group(1)[0] == fence_char and len(match.group(1)) >= fence_len: | |
| in_fence = False | |
| protected.append(''.join(buffer)) | |
| output.append(f'__PROTECTED_FENCED_{len(protected) - 1}__') | |
| buffer = [] | |
| if buffer: | |
| protected.append(''.join(buffer)) | |
| output.append(f'__PROTECTED_FENCED_{len(protected) - 1}__') | |
| return ''.join(output), protected | |
| def protect_inline_code_spans(content: str) -> tuple[str, list[str]]: | |
| """Protect inline code spans from WikiLink replacements.""" | |
| protected = [] | |
| def replace_span(match): | |
| protected.append(match.group(0)) | |
| return f'__PROTECTED_CODESPAN_{len(protected) - 1}__' | |
| pattern = re.compile(r'(`+)([^`\n]*?)\1') | |
| return pattern.sub(replace_span, content), protected | |
| def protect_markdown_links(content: str) -> tuple[str, list[str]]: | |
| """Protect existing Markdown links from being treated as WikiLinks.""" | |
| protected = [] | |
| def protect(match): | |
| protected.append(match.group(0)) | |
| return f'__PROTECTED_LINK_{len(protected) - 1}__' | |
| # Support nested [] and allow one level of parentheses inside the URL. | |
| pattern = re.compile( | |
| r'!?\[(?:[^\[\]]|\[[^\[\]]*\])*\]\((?:[^()\\]|\\.|\\([^()]*\\))*\)' | |
| ) | |
| return pattern.sub(protect, content), protected | |
| def restore_protected(content: str, protected: list[str], prefix: str) -> str: | |
| """Restore protected content.""" | |
| pattern = re.compile(re.escape(prefix) + r'(\d+)__') | |
| def restore(match): | |
| index = int(match.group(1)) | |
| return protected[index] | |
| return pattern.sub(restore, content) | |
| def resolve_explicit_path(target: str, current_dir: Path, base_path: Path, | |
| file_index: dict) -> Optional[Path]: | |
| """Resolve a file path that includes an extension.""" | |
| if target.startswith('./') or target.startswith('../'): | |
| resolved = resolve_relative_path(target, current_dir, file_index) | |
| if resolved: | |
| return resolved | |
| if '/' in target: | |
| resolved = resolve_vault_relative_path(target, base_path, file_index) | |
| if resolved: | |
| return resolved | |
| resolved = resolve_relative_path(target, current_dir, file_index) | |
| if resolved: | |
| return resolved | |
| filename = Path(target).name | |
| return resolve_by_filename(filename, current_dir, file_index) | |
| def resolve_md_path(target: str, current_dir: Path, base_path: Path, | |
| md_index: dict) -> Optional[Path]: | |
| """Resolve a Markdown note link (no extension).""" | |
| if target.startswith('./') or target.startswith('../'): | |
| resolved = resolve_relative_path(target, current_dir, md_index, True) | |
| if resolved: | |
| return resolved | |
| if '/' in target: | |
| resolved = resolve_vault_relative_path(target, base_path, md_index, True) | |
| if resolved: | |
| return resolved | |
| resolved = resolve_relative_path(target, current_dir, md_index, True) | |
| if resolved: | |
| return resolved | |
| resolved = resolve_by_stem(target, current_dir, md_index) | |
| if resolved: | |
| return resolved | |
| return resolve_relative_path(target, current_dir, md_index, True) | |
| def resolve_wikilink_target(target: str, md_path: Path, base_path: Path, | |
| md_index: dict, file_index: dict) -> tuple[str, bool]: | |
| """Resolve a WikiLink target to a usable Markdown path.""" | |
| target = normalize_path(target) | |
| if not target: | |
| return '', False | |
| if is_probable_url(target): | |
| return target, True | |
| current_dir = md_path.parent | |
| suffix = Path(target).suffix | |
| if suffix: | |
| resolved_abs = resolve_explicit_path(target, current_dir, base_path, file_index) | |
| if resolved_abs: | |
| return make_relative_link(resolved_abs, md_path), True | |
| if '/' not in target: | |
| resolved_abs = resolve_by_stem(target, current_dir, md_index) | |
| if resolved_abs: | |
| return make_relative_link(resolved_abs, md_path), True | |
| return target, False | |
| resolved_abs = resolve_md_path(target, current_dir, base_path, md_index) | |
| if resolved_abs: | |
| return make_relative_link(resolved_abs, md_path), True | |
| return target, False | |
| def convert_wikilinks(content: str, md_path: Path, base_path: Path, | |
| md_index: dict, file_index: dict) -> tuple[str, list[dict]]: | |
| """ | |
| Convert WikiLinks to standard Markdown. | |
| Returns: (converted_content, change_records) | |
| """ | |
| changes = [] | |
| # ======================================================================== | |
| # Step 1: Protect code blocks/inline code/existing Markdown links. | |
| # ======================================================================== | |
| protected_content, protected_fenced = protect_fenced_code_blocks(content) | |
| protected_content, protected_spans = protect_inline_code_spans(protected_content) | |
| protected_content, protected_links = protect_markdown_links(protected_content) | |
| # ======================================================================== | |
| # Step 2: Convert WikiLinks to standard Markdown. | |
| # ======================================================================== | |
| # WikiLink forms: | |
| # - Image: ![[path]] or ![[path|alt]] | |
| # - Link: [[url]] or [[url|display text]] | |
| # Output: | |
| # - Image:  | |
| # - Link: [text](url) | |
| # ------------------------------------------------------------------------ | |
| def replace_image(match): | |
| """ | |
| Handle image WikiLinks. | |
| ![[./attachments/img.png]] ->  | |
| ![[./attachments/img.png|alt]] ->  | |
| """ | |
| full_match = match.group(0) | |
| inner = match.group(1) | |
| # Alias syntax: ![[path|alt]], split only on the first |. | |
| if '|' in inner: | |
| path, alt = inner.split('|', 1) | |
| else: | |
| path = inner | |
| alt = '' | |
| path = normalize_path(path) | |
| alt = alt.strip() | |
| encoded_path = encode_url(path) | |
| size = parse_image_size(alt) | |
| if size: | |
| width, height = size | |
| attrs = [f'src="{encoded_path}"', 'alt=""'] | |
| if width: | |
| attrs.append(f'width="{width}"') | |
| if height: | |
| attrs.append(f'height="{height}"') | |
| new_text = f'<img {" ".join(attrs)}>' | |
| else: | |
| new_text = f'' | |
| if full_match != new_text: | |
| changes.append({ | |
| 'type': 'image', | |
| 'original': full_match, | |
| 'converted': new_text | |
| }) | |
| return new_text | |
| def replace_link(match): | |
| """ | |
| Handle link WikiLinks. | |
| [[svn://192.168.1.98/path]] -> [svn://192.168.1.98/path](svn://192.168.1.98/path) | |
| [[url|display text]] -> [display text](url) | |
| """ | |
| full_match = match.group(0) | |
| inner = match.group(1) | |
| # Alias syntax: [[url|text]]. | |
| if '|' in inner: | |
| url, text = inner.split('|', 1) | |
| else: | |
| url = inner | |
| text = inner # Without alias, the link text equals the URL. | |
| url = url.strip() | |
| text = text.strip() | |
| url, anchor = split_anchor(url) | |
| resolved_url, _ = resolve_wikilink_target(url, md_path, base_path, md_index, file_index) | |
| encoded_url = encode_url(resolved_url) | |
| if anchor: | |
| fragment = encode_fragment(anchor) | |
| encoded_url = f'{encoded_url}#{fragment}' if encoded_url else f'#{fragment}' | |
| new_text = f'[{text}]({encoded_url})' | |
| if full_match != new_text: | |
| changes.append({ | |
| 'type': 'link', | |
| 'original': full_match, | |
| 'converted': new_text | |
| }) | |
| return new_text | |
| # Image WikiLinks: ![[...]] | |
| # Use .+? instead of [^\]]+ to allow single ] inside the target. | |
| image_pattern = r'!\[\[(.+?)\]\]' | |
| protected_content = re.sub(image_pattern, replace_image, protected_content) | |
| # Link WikiLinks: [[...]] | |
| # (?<!!) ensures we do not match the image form. | |
| link_pattern = r'(?<!!)\[\[(.+?)\]\]' | |
| protected_content = re.sub(link_pattern, replace_link, protected_content) | |
| # ======================================================================== | |
| # Step 3: Restore protected content. | |
| # ======================================================================== | |
| result = restore_protected(protected_content, protected_links, '__PROTECTED_LINK_') | |
| result = restore_protected(result, protected_spans, '__PROTECTED_CODESPAN_') | |
| result = restore_protected(result, protected_fenced, '__PROTECTED_FENCED_') | |
| return result, changes | |
| def process_md_file(md_path: Path, base_path: Path, md_index: dict, file_index: dict, | |
| dry_run: bool = True) -> tuple[bool, list[dict]]: | |
| """ | |
| Process a single Markdown file. | |
| Returns: (updated, change_records) | |
| """ | |
| try: | |
| with open(md_path, 'r', encoding='utf-8') as f: | |
| content = f.read() | |
| except Exception as e: | |
| print(f" β Read failed: {md_path} - {e}") | |
| return False, [] | |
| new_content, changes = convert_wikilinks(content, md_path, base_path, md_index, file_index) | |
| if changes: | |
| if not dry_run: | |
| try: | |
| with open(md_path, 'w', encoding='utf-8') as f: | |
| f.write(new_content) | |
| except Exception as e: | |
| print(f" β Write failed: {md_path} - {e}") | |
| return False, [] | |
| return True, changes | |
| return False, [] | |
| def main(): | |
| parser = argparse.ArgumentParser( | |
| description='Convert Obsidian WikiLinks to standard Markdown links', | |
| formatter_class=argparse.RawDescriptionHelpFormatter, | |
| epilog=""" | |
| Examples: | |
| python convert_wikilinks.py # preview changes (default) | |
| python convert_wikilinks.py --execute # apply changes | |
| Conversion rules: | |
| ![[./attachments/img.png]] ->  | |
| ![[./attachments/img.png|alt text]] ->  | |
| ![[./attachments/img.png|100x200]] -> <img src=".../img.png" width="100" height="200" alt=""> | |
| [[svn://192.168.1.98/path]] -> [svn://192.168.1.98/path](svn://192.168.1.98/path) | |
| [[url|display text]] -> [display text](url) | |
| [[Note]] -> [Note](relative/path/Note.md) | |
| """ | |
| ) | |
| parser.add_argument('--execute', action='store_true', | |
| help='execute mode (default is preview)') | |
| args = parser.parse_args() | |
| dry_run = not args.execute | |
| # Use the parent of the script directory as the vault base path. | |
| base_path = Path(__file__).parent.parent.resolve() | |
| print("=" * 70) | |
| if dry_run: | |
| print("π Preview mode - no files will be modified") | |
| else: | |
| print("β‘ Execute mode - files will be modified") | |
| print(f"π Scan directory: {base_path}") | |
| print("=" * 70) | |
| # Find and process Markdown files. | |
| print("\nπ Scan Markdown files...") | |
| print("-" * 50) | |
| md_files, all_files = scan_files(base_path) | |
| print(f"Found {len(md_files)} .md files\n") | |
| md_index = build_md_index(md_files, base_path) | |
| file_index = build_file_index(all_files, base_path) | |
| updated_files = 0 | |
| total_image_count = 0 | |
| total_link_count = 0 | |
| for md_path in md_files: | |
| updated, changes = process_md_file(md_path, base_path, md_index, file_index, dry_run) | |
| if updated: | |
| updated_files += 1 | |
| relative_path = md_path.relative_to(base_path) | |
| image_changes = [c for c in changes if c['type'] == 'image'] | |
| link_changes = [c for c in changes if c['type'] == 'link'] | |
| total_image_count += len(image_changes) | |
| total_link_count += len(link_changes) | |
| print(f"β {relative_path}") | |
| print(f" πΈ Images: {len(image_changes)}, π Links: {len(link_changes)}") | |
| # Show detailed changes (up to 5 entries). | |
| for i, change in enumerate(changes[:5]): | |
| icon = 'πΈ' if change['type'] == 'image' else 'π' | |
| orig = change['original'] | |
| conv = change['converted'] | |
| print(f" {icon} {orig}") | |
| print(f" β {conv}") | |
| if len(changes) > 5: | |
| print(f" ... {len(changes) - 5} more changes") | |
| print() | |
| # Summary. | |
| print("=" * 70) | |
| if dry_run: | |
| print("π Preview complete!") | |
| print(f" - {updated_files} files will be updated") | |
| print(f" - {total_image_count} image WikiLinks") | |
| print(f" - {total_link_count} link WikiLinks") | |
| if updated_files > 0: | |
| print("\nπ‘ If everything looks good, run this to apply changes:") | |
| print(" python convert_wikilinks.py --execute") | |
| else: | |
| print("β Execution complete!") | |
| print(f" - {updated_files} files updated") | |
| print(f" - {total_image_count} image WikiLinks converted") | |
| print(f" - {total_link_count} link WikiLinks converted") | |
| print("=" * 70) | |
| if __name__ == '__main__': | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment