Fix Hamiltonian analysis and add DCA stay count metrics
- Save graph_path to output for accurate Hamiltonian tracking - DCA analysis now shows avg/max voice stay counts - Fix: use actual graph node hashes instead of rehashing transposed chords
This commit is contained in:
parent
34a6ebfabd
commit
559c868313
|
|
@ -8,6 +8,7 @@ from pathlib import Path
|
|||
def analyze_chords(
|
||||
chords: list,
|
||||
config: dict | None = None,
|
||||
graph_path: list | None = None,
|
||||
) -> dict:
|
||||
"""Analyze chord sequence and return metrics.
|
||||
|
||||
|
|
@ -18,6 +19,7 @@ def analyze_chords(
|
|||
- melodic_threshold_max: max cents per voice movement (default: 300)
|
||||
- max_path: path length (default: 50)
|
||||
- graph_nodes: total nodes in graph (optional, for Hamiltonian coverage)
|
||||
graph_path: Optional list of graph node hashes for Hamiltonian analysis
|
||||
|
||||
Returns:
|
||||
Dict with analysis metrics
|
||||
|
|
@ -44,9 +46,11 @@ def analyze_chords(
|
|||
# ========== Contrary Motion ==========
|
||||
contrary_motion_steps = 0
|
||||
|
||||
# ========== DCA (Voice Changes) ==========
|
||||
voice_changes_per_step = []
|
||||
all_voices_change_count = 0
|
||||
# ========== DCA (Voice Stay Counts) ==========
|
||||
# Track how long each voice stays before changing
|
||||
voice_stay_counts = [0] * num_voices # Current stay count per voice
|
||||
stay_counts_when_changed = [] # All stay counts recorded when voices changed
|
||||
max_voice_stay = 0
|
||||
|
||||
# ========== Hamiltonian ==========
|
||||
unique_nodes = set()
|
||||
|
|
@ -87,10 +91,17 @@ def analyze_chords(
|
|||
if sorted_diffs[0] < 0 and sorted_diffs[-1] > 0:
|
||||
contrary_motion_steps += 1
|
||||
|
||||
# DCA: all voices change
|
||||
voice_changes_per_step.append(voices_changed)
|
||||
if voices_changed == num_voices:
|
||||
all_voices_change_count += 1
|
||||
# DCA: Track stay counts per voice
|
||||
for v in range(num_voices):
|
||||
curr_cents = chords[i][v]["cents"]
|
||||
prev_cents = chords[i - 1][v]["cents"]
|
||||
if curr_cents != prev_cents:
|
||||
# Voice changed - record how long it stayed
|
||||
stay_counts_when_changed.append(voice_stay_counts[v])
|
||||
max_voice_stay = max(max_voice_stay, voice_stay_counts[v])
|
||||
voice_stay_counts[v] = 0 # Reset stay count
|
||||
else:
|
||||
voice_stay_counts[v] += 1 # Increment stay count
|
||||
|
||||
# ========== Target Range ==========
|
||||
target_cents = target_octaves * 1200
|
||||
|
|
@ -104,20 +115,21 @@ def analyze_chords(
|
|||
start_avg = end_avg = actual_cents = target_percent = 0
|
||||
|
||||
# ========== DCA Summary ==========
|
||||
avg_voice_changes = (
|
||||
sum(voice_changes_per_step) / len(voice_changes_per_step)
|
||||
if voice_changes_per_step
|
||||
else 0
|
||||
)
|
||||
pct_all_change = (
|
||||
(all_voices_change_count / len(voice_changes_per_step)) * 100
|
||||
if voice_changes_per_step
|
||||
avg_voice_stay = (
|
||||
sum(stay_counts_when_changed) / len(stay_counts_when_changed)
|
||||
if stay_counts_when_changed
|
||||
else 0
|
||||
)
|
||||
|
||||
# ========== Hamiltonian Coverage ==========
|
||||
# Use graph_path if provided (accurate), otherwise hash output chords (may differ due to transposition)
|
||||
if graph_path:
|
||||
hamiltonian_unique_nodes = len(set(graph_path))
|
||||
else:
|
||||
hamiltonian_unique_nodes = len(unique_nodes)
|
||||
|
||||
hamiltonian_coverage = (
|
||||
(len(unique_nodes) / graph_nodes * 100) if graph_nodes else None
|
||||
(hamiltonian_unique_nodes / graph_nodes * 100) if graph_nodes else None
|
||||
)
|
||||
|
||||
return {
|
||||
|
|
@ -136,11 +148,10 @@ def analyze_chords(
|
|||
(contrary_motion_steps / num_steps * 100) if num_steps > 0 else 0
|
||||
),
|
||||
# DCA
|
||||
"dca_avg_voice_changes": avg_voice_changes,
|
||||
"dca_all_voices_change_count": all_voices_change_count,
|
||||
"dca_all_voices_change_percent": pct_all_change,
|
||||
"dca_avg_voice_stay": avg_voice_stay,
|
||||
"dca_max_voice_stay": max_voice_stay,
|
||||
# Hamiltonian
|
||||
"hamiltonian_unique_nodes": len(unique_nodes),
|
||||
"hamiltonian_unique_nodes": hamiltonian_unique_nodes,
|
||||
"hamiltonian_coverage": hamiltonian_coverage,
|
||||
# Target Range
|
||||
"target_octaves": target_octaves,
|
||||
|
|
@ -169,9 +180,9 @@ def format_analysis(metrics: dict) -> str:
|
|||
f"Steps with contrary: {metrics['contrary_motion_steps']}",
|
||||
f"Percentage: {metrics['contrary_motion_percent']:.1f}%",
|
||||
"",
|
||||
"--- DCA (Voice Changes) ---",
|
||||
f"Avg voices changing: {metrics['dca_avg_voice_changes']:.2f} / {metrics['num_voices']}",
|
||||
f"All voices change: {metrics['dca_all_voices_change_count']} steps ({metrics['dca_all_voices_change_percent']:.1f}%)",
|
||||
"--- DCA (Voice Stay) ---",
|
||||
f"Avg stay count: {metrics['dca_avg_voice_stay']:.2f} steps",
|
||||
f"Max stay count: {metrics['dca_max_voice_stay']} steps",
|
||||
"",
|
||||
"--- Hamiltonian ---",
|
||||
f"Unique nodes: {metrics['hamiltonian_unique_nodes']}",
|
||||
|
|
@ -196,9 +207,18 @@ def format_analysis(metrics: dict) -> str:
|
|||
|
||||
def analyze_file(file_path: str | Path, config: dict | None = None) -> dict:
|
||||
"""Load and analyze a chord file."""
|
||||
file_path = Path(file_path)
|
||||
with open(file_path) as f:
|
||||
chords = json.load(f)
|
||||
return analyze_chords(chords, config)
|
||||
|
||||
# Try to load graph_path if it exists
|
||||
graph_path = None
|
||||
graph_path_file = file_path.parent / "graph_path.json"
|
||||
if graph_path_file.exists():
|
||||
with open(graph_path_file) as f:
|
||||
graph_path = json.load(f)
|
||||
|
||||
return analyze_chords(chords, config, graph_path)
|
||||
|
||||
|
||||
def main():
|
||||
|
|
|
|||
14
src/graph.py
14
src/graph.py
|
|
@ -20,14 +20,20 @@ class PathFinder:
|
|||
start_chord: "Chord | None" = None,
|
||||
max_length: int = 100,
|
||||
weights_config: dict | None = None,
|
||||
) -> list["Chord"]:
|
||||
"""Find a stochastic path through the graph."""
|
||||
) -> tuple[list["Chord"], list["Chord"]]:
|
||||
"""Find a stochastic path through the graph.
|
||||
|
||||
Returns:
|
||||
Tuple of (path, graph_path) where:
|
||||
- path: list of output Chord objects (transposed)
|
||||
- graph_path: list of original graph Chord objects (untransposed)
|
||||
"""
|
||||
if weights_config is None:
|
||||
weights_config = self._default_weights_config()
|
||||
|
||||
chord = self._initialize_chords(start_chord)
|
||||
if not chord or chord[0] is None or len(self.graph.nodes()) == 0:
|
||||
return []
|
||||
return [], []
|
||||
|
||||
original_chord = chord[0]
|
||||
graph_node = original_chord
|
||||
|
|
@ -102,7 +108,7 @@ class PathFinder:
|
|||
if len(last_graph_nodes) > 2:
|
||||
last_graph_nodes = last_graph_nodes[-2:]
|
||||
|
||||
return path
|
||||
return path, graph_path
|
||||
|
||||
def _initialize_chords(self, start_chord: "Chord | None") -> tuple:
|
||||
"""Initialize chord sequence."""
|
||||
|
|
|
|||
21
src/io.py
21
src/io.py
|
|
@ -448,7 +448,7 @@ def main():
|
|||
|
||||
weights_config["max_path"] = args.max_path
|
||||
|
||||
path = path_finder.find_stochastic_path(
|
||||
path, graph_path = path_finder.find_stochastic_path(
|
||||
max_length=args.max_path, weights_config=weights_config
|
||||
)
|
||||
print(f"Path length: {len(path)}")
|
||||
|
|
@ -458,6 +458,15 @@ def main():
|
|||
|
||||
os.makedirs(args.output_dir, exist_ok=True)
|
||||
|
||||
# Save graph_path for Hamiltonian analysis
|
||||
import json
|
||||
|
||||
graph_path_data = [hash(node) for node in graph_path]
|
||||
graph_path_file = os.path.join(args.output_dir, "graph_path.json")
|
||||
with open(graph_path_file, "w") as f:
|
||||
json.dump(graph_path_data, f)
|
||||
print(f"Written to {graph_path_file}")
|
||||
|
||||
write_chord_sequence(path, os.path.join(args.output_dir, "output_chords.json"))
|
||||
print(f"Written to {args.output_dir}/output_chords.json")
|
||||
|
||||
|
|
@ -487,7 +496,15 @@ def main():
|
|||
chords_file = os.path.join(args.output_dir, "output_chords.json")
|
||||
with open(chords_file) as f:
|
||||
chords = json.load(f)
|
||||
metrics = analyze_chords(chords, config)
|
||||
|
||||
# Load graph_path for Hamiltonian analysis
|
||||
graph_path_file = os.path.join(args.output_dir, "graph_path.json")
|
||||
graph_path = None
|
||||
if os.path.exists(graph_path_file):
|
||||
with open(graph_path_file) as f:
|
||||
graph_path = json.load(f)
|
||||
|
||||
metrics = analyze_chords(chords, config, graph_path)
|
||||
print()
|
||||
print(format_analysis(metrics))
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue