You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
compact_sets_sandbox/compact_sets_extended_simpl...

409 lines
20 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": 8,
"id": "806f6f69-1e0b-4d34-aac9-695c8531cdb1",
"metadata": {},
"outputs": [],
"source": [
"from itertools import chain, combinations, permutations, product\n",
"from math import prod, log\n",
"from copy import deepcopy\n",
"import networkx as nx\n",
"from fractions import Fraction\n",
"import json\n",
"from operator import add\n",
"\n",
"def hs_array_to_fr(hs_array):\n",
" return prod([pow(dims[d], hs_array[d]) for d in range(len(dims))])\n",
"\n",
"def hs_array_to_cents(hs_array):\n",
" return (1200 * log(hs_array_to_fr(hs_array), 2))\n",
"\n",
"def expand_pitch(hs_array):\n",
" expanded_pitch = list(hs_array)\n",
" frequency_ratio = hs_array_to_fr(hs_array)\n",
" if frequency_ratio < 1:\n",
" while frequency_ratio < 1:\n",
" frequency_ratio *= 2\n",
" expanded_pitch[0] += 1\n",
" elif frequency_ratio >= 2:\n",
" while frequency_ratio >= 2:\n",
" frequency_ratio *= 1/2\n",
" expanded_pitch[0] += -1\n",
" return tuple(expanded_pitch)\n",
"\n",
"def expand_chord(chord):\n",
" return tuple(expand_pitch(p) for p in chord)\n",
"\n",
"def collapse_pitch(hs_array):\n",
" collapsed_pitch = list(hs_array)\n",
" collapsed_pitch[0] = 0\n",
" return tuple(collapsed_pitch)\n",
"\n",
"def collapse_chord(chord):\n",
" return tuple(collapse_pitch(p) for p in chord)\n",
"\n",
"def transpose_pitch(pitch, trans):\n",
" return tuple(map(add, pitch, trans))\n",
"\n",
"def transpose_chord(chord, trans):\n",
" return tuple(transpose_pitch(p, trans) for p in chord)\n",
"\n",
"def cent_difference(hs_array1, hs_array2):\n",
" return hs_array_to_cents(hs_array2) - hs_array_to_cents(hs_array1)\n",
"\n",
"def pitch_difference(hs_array1, hs_array2):\n",
" return transpose_pitch(hs_array1, [p * -1 for p in hs_array2])\n",
"\n",
"# this is modified for different chord sizes like original version\n",
"def grow_chords(chord, root, min_chord_size, max_chord_size):\n",
" #this could use the tranpose_pitch function\n",
" branches = [branch for alt in [-1, 1] for d in range(1, len(root)) if (branch:=(*(r:=root)[:d], r[d] + alt, *r[(d + 1):])) not in chord]\n",
" subsets = chain.from_iterable(combinations(branches, r) for r in range(1, max_chord_size - len(chord) + 1))\n",
" for subset in subsets:\n",
" extended_chord = chord + subset\n",
" if(len(extended_chord) < max_chord_size):\n",
" for branch in subset:\n",
" yield from grow_chords(extended_chord, branch, min_chord_size, max_chord_size)\n",
" if(len(extended_chord) >= min_chord_size):\n",
" yield tuple(sorted(extended_chord, key=hs_array_to_fr))\n",
"\n",
"def chords(chord, root, min_chord_size, max_chord_size):\n",
" # this will filter out the 4x dups of paths that are loops, there might be a faster way to test this\n",
" return set(grow_chords(chord, root, min_chord_size, max_chord_size))\n",
"\n",
"def reverse_movements(movements):\n",
" return {value['destination']:{'destination':key, 'cent_difference':value['cent_difference']} for key, value in movements.items()}\n",
"\n",
"def is_directly_tunable(intersection, diff):\n",
" return max([len(collapse_pitch(pitch_difference(d, set(list(intersection)[0])))) for d in diff]) == 1\n",
"\n",
"def edge_data(chords, min_symdiff, max_symdiff, max_chord_size):\n",
" [expanded_base, expanded_comp] = [expand_chord(chord) for chord in chords]\n",
" edges = []\n",
" transpositions = set(pitch_difference(pair[0], pair[1]) for pair in set(product(expanded_base, expanded_comp)))\n",
" for trans in transpositions:\n",
" expanded_comp_transposed = transpose_chord(expanded_comp, trans)\n",
" intersection = set(expanded_base) & set(expanded_comp_transposed)\n",
" symdiff_len = sum([len(chord) - len(intersection) for chord in [expanded_base, expanded_comp_transposed]])\n",
" if (min_symdiff <= symdiff_len <= max_symdiff):\n",
" rev_trans = tuple(t * -1 for t in trans)\n",
" [diff1, diff2] = [list(set(chord) - intersection) for chord in [expanded_base, expanded_comp_transposed]]\n",
" base_map = {val: {'destination':transpose_pitch(val, rev_trans), 'cent_difference': 0} for val in intersection}\n",
" base_map_rev = reverse_movements(base_map)\n",
" tunability = is_directly_tunable(intersection, diff2)\n",
" maps = []\n",
" diff1 += [None] * (max_chord_size - len(diff1) - len(intersection))\n",
" perms = [list(perm) + [None] * (max_chord_size - len(perm) - len(intersection)) for perm in set(permutations(diff2))]\n",
" for p in perms:\n",
" appended_map = {\n",
" diff1[index]:\n",
" {\n",
" 'destination': transpose_pitch(val, rev_trans) if val != None else None, \n",
" 'cent_difference': cent_difference(diff1[index], val) if None not in [diff1[index], val] else None\n",
" } for index, val in enumerate(p)}\n",
" edges.append((tuple(expanded_base), tuple(expanded_comp), {\n",
" 'transposition': trans,\n",
" 'symmetric_difference': symdiff_len, \n",
" 'is_directly_tunable': tunability,\n",
" 'movements': base_map | appended_map\n",
" }))\n",
" edges.append((tuple(expanded_comp), tuple(expanded_base), {\n",
" 'transposition': rev_trans,\n",
" 'symmetric_difference': symdiff_len, \n",
" 'is_directly_tunable': tunability,\n",
" 'movements': base_map_rev | reverse_movements(appended_map)\n",
" }))\n",
" return edges if edges != [] else None\n",
"\n",
"# this is very slow, I have an idea in mind that my be faster by simply growing the chords to max_chord_size + max_sim_diff\n",
"# technically at that point you have generated both chords and can get the second chord from the first\n",
"def edges(chords, min_symdiff, max_symdiff, max_chord_size): \n",
" return list(chain(*[e for c in combinations(chords, 2) if (e := edge_data(c, min_symdiff, max_symdiff, max_chord_size)) is not None]))\n",
"\n",
"def graph_from_edges(edges):\n",
" g = nx.MultiDiGraph()\n",
" g.add_edges_from(edges)\n",
" return g\n",
"\n",
"def generate_graph(chord_set, min_symdiff, max_symdiff, max_chord_size):\n",
" #chord_set = chords(pitch_set, min_chord_size, max_chord_size)\n",
" edge_set = edges(chord_set, min_symdiff, max_symdiff, max_chord_size)\n",
" res_graph = graph_from_edges(edge_set)\n",
" return res_graph\n",
"\n",
"def display_graph(graph):\n",
" show_graph = nx.Graph(graph)\n",
" pos = nx.draw_spring(show_graph, node_size=5, width=0.1)\n",
" plt.figure(1, figsize=(12,12)) \n",
" nx.draw(show_graph, pos, node_size=5, width=0.1)\n",
" plt.show()\n",
" #plt.savefig('compact_sets.png', dpi=150)\n",
"\n",
"def reconcile_path(path):\n",
" reconciled_path = [[tuple(0 for d in dims), sorted([p for p in list(path[0][2]['movements'].keys())], key=hs_array_to_fr)]] \n",
" #print(reconciled_path)\n",
" for cdx in range(len(path)-1):\n",
" movements = path[cdx][2]['movements']\n",
" next_chord = [movements[p]['destination'] for p in reconciled_path[-1][1]]\n",
" trans = path[cdx][2]['transposition']\n",
" reconciled_path.append([trans, next_chord])\n",
" return reconciled_path\n",
"\n",
"def path_to_chords(path):\n",
" current_root = Fraction(1, 1)\n",
" chords = []\n",
" for trans, points in path:\n",
" #print(trans)\n",
" current_root = current_root * hs_array_to_fr(trans)\n",
" chord = [float(current_root * hs_array_to_fr(p)) if p is not None else None for p in points]\n",
" chords.append(chord)\n",
" return chords\n",
"\n",
"def write_chord_sequence(path):\n",
" file = open(\"seq.txt\", \"w+\")\n",
" content = json.dumps(path)\n",
" content = content.replace(\", \\\"\", \",\\n\\t\\\"\")\n",
" file.write(content)\n",
" file.close()"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "4e3ef738-7f64-47c3-9129-0450fd031375",
"metadata": {},
"outputs": [],
"source": [
"dims = (2, 3, 5, 7, 11)\n",
"root = (0, 0, 0, 0, 0)\n",
"chord = (root,)\n",
"#%timeit chords(chord, root, 4, 4)\n",
"#print(len(chord_set))\n",
"chord_set = chords(chord, root, 3, 3)\n",
"#edge_set = edges(chord_set, 2, 2, 3)\n",
"#edge_set\n",
"#%timeit edges(chord_set, 2, 2, 4)\n",
"#print(len(edge_set))\n",
"graph = generate_graph(chord_set, 4, 4, 3)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "aea5215c-8551-4685-b761-11c2dc74cf22",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"131"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from random import choice, choices\n",
"\n",
"def stochastic_hamiltonian(graph):\n",
" \n",
" def movement_size_weights(edges):\n",
" \n",
" def max_cent_diff(edge):\n",
" res = max([abs(v) for val in edge[2]['movements'].values() if (v:=val['cent_difference']) is not None])\n",
" return res\n",
" \n",
" def min_cent_diff(edge):\n",
" res = [abs(v) for val in edge[2]['movements'].values() if (v:=val['cent_difference']) is not None]\n",
" res.remove(0)\n",
" return min(res)\n",
" \n",
" return [(1000 if ((max_cent_diff(e) < 200) and (min_cent_diff(e)) > 50) else 1) for e in edges]\n",
"\n",
" \n",
" def hamiltonian_weights(edges):\n",
" return [(10 if e[1] not in [path_edge[0] for path_edge in path] else 1) for e in edges] \n",
"\n",
" \n",
" def contrary_motion_weights(edges):\n",
"\n",
" def is_contrary(edge):\n",
" cent_diffs = [v for val in edge[2]['movements'].values() if (v:=val['cent_difference']) is not None]\n",
" cent_diffs.sort()\n",
" return (cent_diffs[0] < 0) and (cent_diffs[1] == 0) and (cent_diffs[2] > 0)\n",
" \n",
" return [(10 if is_contrary(e) else 1) for e in edges]\n",
"\n",
" \n",
" def is_directly_tunable_weights(edges):\n",
" return [(10 if e[2]['is_directly_tunable'] else 1) for e in edges]\n",
"\n",
" \n",
" def voice_crossing_weights(edges):\n",
" \n",
" def has_voice_crossing(edge):\n",
" source = list(edge[0])\n",
" ordered_source = sorted(source, key=hs_array_to_fr) \n",
" source_order = [ordered_source.index(p) for p in source]\n",
" destination = [transpose_pitch(edge[2]['movements'][p]['destination'], edge[2]['transposition']) for p in source]\n",
" ordered_destination = sorted(destination, key=hs_array_to_fr)\n",
" destination_order = [ordered_destination.index(p) for p in destination]\n",
" #print(source_order != destination_order)\n",
" return source_order != destination_order\n",
" \n",
" return [(10 if not has_voice_crossing(e) else 0) for e in edges]\n",
" \n",
" \n",
" check_graph = graph.copy()\n",
" next_node = choice(list(graph.nodes()))\n",
" check_graph.remove_node(next_node)\n",
" path = []\n",
" while (nx.number_of_nodes(check_graph) > 0) and (len(path) < 5000):\n",
" out_edges = list(graph.out_edges(next_node, data=True))\n",
" #print([l for l in zip(movement_size_weights(out_edges), hamiltonian_weights(out_edges))])\n",
" factors = [\n",
" movement_size_weights(out_edges), \n",
" hamiltonian_weights(out_edges), \n",
" contrary_motion_weights(out_edges), \n",
" is_directly_tunable_weights(out_edges),\n",
" voice_crossing_weights(out_edges)\n",
" ]\n",
" weights = [prod(a) for a in zip(*factors)]\n",
" #weights = [reduce(mul, x) for x in [movement_size_weights(out_edges), hamiltonian_weights(out_edges)]]\n",
" #print(weights)\n",
" edge = choices(out_edges, weights=weights)[0]\n",
" #edge = random.choice(out_edges)\n",
" next_node = edge[1]\n",
" path.append(edge)\n",
" if next_node in check_graph.nodes:\n",
" check_graph.remove_node(next_node)\n",
" return path\n",
" \n",
"stochastic_ham = stochastic_hamiltonian(graph)\n",
"path = reconcile_path(stochastic_ham)\n",
"write_chord_sequence(path_to_chords(path))\n",
"len(path)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "7f2d356f-6377-46cf-bbb1-32111be90f4f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The line_profiler extension is already loaded. To reload it, use:\n",
" %reload_ext line_profiler\n"
]
}
],
"source": [
"%load_ext line_profiler"
]
},
{
"cell_type": "code",
"execution_count": 134,
"id": "7f141bf5-fdcb-4c01-a10b-3e86d3d1a7b4",
"metadata": {},
"outputs": [],
"source": [
"chord_set = chords(chord, root, 3, 3)"
]
},
{
"cell_type": "code",
"execution_count": 136,
"id": "88850b8c-a743-44d0-b863-7cd9066690d9",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Timer unit: 1e-09 s\n",
"\n",
"Total time: 0.112228 s\n",
"File: /tmp/ipykernel_515812/2679284550.py\n",
"Function: edge_data at line 74\n",
"\n",
"Line # Hits Time Per Hit % Time Line Contents\n",
"==============================================================\n",
" 74 def edge_data(chords, min_symdiff, max_symdiff, max_chord_size):\n",
" 75 990 29603044.0 29902.1 26.4 [expanded_base, expanded_comp] = [expand_chord(chord) for chord in chords]\n",
" 76 990 229527.0 231.8 0.2 edges = []\n",
" 77 990 23648371.0 23887.2 21.1 transpositions = set(pitch_difference(pair[0], pair[1]) for pair in set(product(expanded_base, expanded_comp)))\n",
" 78 9193 2315267.0 251.9 2.1 for trans in transpositions:\n",
" 79 8203 33386775.0 4070.1 29.7 expanded_comp_transposed = transpose_chord(expanded_comp, trans)\n",
" 80 8203 8393773.0 1023.3 7.5 intersection = set(expanded_base) & set(expanded_comp_transposed)\n",
" 81 8203 11812057.0 1440.0 10.5 symdiff_len = sum([len(chord) - len(intersection) for chord in [expanded_base, expanded_comp_transposed]])\n",
" 82 8203 2530596.0 308.5 2.3 if (min_symdiff <= symdiff_len <= max_symdiff):\n",
" 83 rev_trans = tuple(t * -1 for t in trans)\n",
" 84 [diff1, diff2] = [list(set(chord) - intersection) for chord in [expanded_base, expanded_comp_transposed]]\n",
" 85 base_map = {val: {'destination':transpose_pitch(val, rev_trans), 'cent_difference': 0} for val in intersection}\n",
" 86 base_map_rev = reverse_movements(base_map)\n",
" 87 tunability = is_directly_tunable(intersection, diff2)\n",
" 88 maps = []\n",
" 89 diff1 += [None] * (max_chord_size - len(diff1) - len(intersection))\n",
" 90 perms = [list(perm) + [None] * (max_chord_size - len(perm) - len(intersection)) for perm in set(permutations(diff2))]\n",
" 91 for p in perms:\n",
" 92 appended_map = {\n",
" 93 diff1[index]:\n",
" 94 {\n",
" 95 'destination': transpose_pitch(val, rev_trans) if val != None else None, \n",
" 96 'cent_difference': cent_difference(diff1[index], val) if None not in [diff1[index], val] else None\n",
" 97 } for index, val in enumerate(p)}\n",
" 98 edges.append((tuple(expanded_base), tuple(expanded_comp), {\n",
" 99 'transposition': trans,\n",
" 100 'symmetric_difference': symdiff_len, \n",
" 101 'is_directly_tunable': tunability,\n",
" 102 'movements': base_map | appended_map\n",
" 103 }))\n",
" 104 edges.append((tuple(expanded_comp), tuple(expanded_base), {\n",
" 105 'transposition': rev_trans,\n",
" 106 'symmetric_difference': symdiff_len, \n",
" 107 'is_directly_tunable': tunability,\n",
" 108 'movements': base_map_rev | reverse_movements(appended_map)\n",
" 109 }))\n",
" 110 990 308812.0 311.9 0.3 return edges if edges != [] else None"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"lprun -f edge_data edges(chord_set, 3, 3, 4)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}