Day 12: Passage Pathing

Please post your solutions in your own top-level comment. Here's a template you can copy-paste into your comment to format it nicely, with the code collapsed by default inside an expandable section with syntax highlighting (you can replace python with any of the "short names" listed in this page of supported languages):

<details>
<summary>Part 1</summary>

```python
```

</details>

1. Crestwave
This was a neat maze. It took a while to grok the problem, but I quite liked part 1. Part 2 takes 14 seconds to run on my machine but it has minimal changes from part 1, so eh. Part 1...

This was a neat maze. It took a while to grok the problem, but I quite liked part 1. Part 2 takes 14 seconds to run on my machine but it has minimal changes from part 1, so eh.

Part 1
#!/usr/bin/awk -f
function walk(map, path, ptr, node,	i, j, visited, _path, _node) {
path[++ptr] = node

if (node == "end") {
total += 1
return
}

for (i = 1; i <= map[node]; ++i) {
_node = map[node, i]

visited = 0

if (tolower(_node) == _node)
for (j in path)
if (path[j] == _node)
visited = 1

if (! visited) {
for (j in path)
_path[j] = path[j]

walk(map, _path, ptr, _node)
}
}
}

BEGIN { FS = "-" }

{
map[\$1, ++map[\$1]] = \$2
map[\$2, ++map[\$2]] = \$1
}

END {
walk(map, path, ptr, "start")
print total
}

Part 2
#!/usr/bin/awk -f
function walk(map, path, ptr, node, time,	i, j, visited, _path, _node, _time) {
path[++ptr] = node

if (node == "end") {
total += 1
return
}

for (i = 1; i <= map[node]; ++i) {
_node = map[node, i]

visited = 0

if (tolower(_node) == _node)
for (j in path)
if (path[j] == _node)
visited = 1

if (_node == "start")
continue

_time = time

if (visited == 1 && ! time) {
visited = 0
_time = 1
}

if (! visited) {
for (j in path)
_path[j] = path[j]

walk(map, _path, ptr, _node, _time)
}
}
}

BEGIN { FS = "-" }

{
map[\$1, ++map[\$1]] = \$2
map[\$2, ++map[\$2]] = \$1
}

END {
walk(map, path, ptr, "start", 0)
print total
}

2. Crespyl
This is one of those recursive problems that can sometimes get me into trouble in languages like Ruby that like to pass collections by mutable references. I rarely want recursive calls to be able...

This is one of those recursive problems that can sometimes get me into trouble in languages like Ruby that like to pass collections by mutable references. I rarely want recursive calls to be able to mutate lists/sets that are still being used further up the stack, but in Ruby you have to be careful to call .clone to ensure that won't happen. I always forget this at least once during AoC, and it always causes me some headaches.

Part 1 Ruby
def compute_p1(input)
connections = Hash.new { Set.new }

input
.lines
.map(&:chomp)
.map { _1.split('-') }
.each do |a, b|
connections[a] += [b]
connections[b] += [a]
end

count_paths_p1(connections, 'start')
end

def count_paths_p1(connections, start, visited_small_caves = Set.new)
return 1 if start == 'end'

frontier = connections[start] - visited_small_caves

return 0 if frontier.empty?

visited_small_caves += [start] if start.match?(/[[:lower:]]/)
frontier
.map { count_paths_p1(connections, _1, visited_small_caves) }
.sum
end

Part 2 Ruby

This set-oriented approach I tend to lean towards is easy to reason about, but performance in this case is definitely on the lower end (~2.5s on my machine). Alternatives could be tracking the current path and counting occurrences, or just keeping a flag for whether we've passed any small cave twice so far. In that vein, I think it'd also probably be cleaner to combine the idea of the visit_counts and closed_caves into just having a set of visited small caves and the flag.

def compute_p2(input)
connections = Hash.new { Set.new }

input
.lines
.map(&:chomp)
.map { _1.split('-') }
.each do |a, b|
connections[a] += [b]
connections[b] += [a]
end

count_paths_p2(connections, 'start', Set.new(['start']))
end

def count_paths_p2(connections, node, closed_caves = Set.new, visit_counts = Hash.new(0))
return 1 if node == 'end'

if node.match?(/[[:lower:]]/)
visit_counts[node] += 1
if visit_counts[node] >= 2
closed_caves += visit_counts.keys
elsif visit_counts.values.include?(2)
closed_caves += [node]
end
end

frontier = connections[node] - closed_caves
return 0 if frontier.empty?

frontier
.map { count_paths_p2(connections, _1, closed_caves.clone, visit_counts.clone) }
.sum
end

3. spit-evil-olive-tips
Part 1 more fun implementing recursive things iteratively. my pending queue this time is "partial paths that need further exploring" and I seed it with a path of just the starting cave. then, from...
Part 1

more fun implementing recursive things iteratively. my pending queue this time is "partial paths that need further exploring" and I seed it with a path of just the starting cave.

then, from the current cave (the last item of the partial path) I look up where my options are. if that option is a small cave that this path has already visited, I skip it. if that option takes me to the finish cave, I can add it to the list of completed paths. otherwise, I add it as a partial path to be explored further.

biggest forehead-smacking mistake I made was not making the edge connection bi-directional (the second write to the edges dictionary when parsing). yeah, that'll throw the results off a bit.

from collections import defaultdict, deque

def main():
with open('012.txt') as file:
map = Map.parse(lines)

paths = map.find_paths('start', 'end')

print(len(paths))

class Map:
def __init__(self, edges):
self.edges = edges

@classmethod
def parse(cls, lines):
edges = defaultdict(list)
for line in lines:
start, end = line.strip().split('-')
edges[start].append(end)
edges[end].append(start)
return cls(edges)

def find_paths(self, start, target):
pending = deque()
pending.append([start])
paths = []

while pending:
current = pending.popleft()
options = self.edges[current[-1]]
for option in options:
path = current + [option]

if option.lower() in current:
continue

if option == target:
paths.append(path)
else:
pending.append(path)

return paths

main()

Part 2

I initially tried having a separate "has this path used the double-cave option yet?" function, then realized that it's easier to track it myself because I know when I'm using it. so my pending queue now contains (partial path, has double-cave option been used yet?) tuples.

now, when I consider a small cave that I've already visited, I'll add a path using it to the pending queue, but only if I haven't used my double-visit yet already. and that partial path goes along with a flag so I know subsequent iterations shouldn't try to do a second double-visit.

I also had to add an explicit check never to go back to the start cave. for part 1 that was implicit because the start is a small cave, but needs to be handled explicitly now otherwise it would generate paths that use the start cave as their double-cave.

@@ -26,22 +26,29 @@

def find_paths(self, start, target):
pending = deque()
-        pending.append([start])
+        pending.append(([start], False))
paths = []

while pending:
-            current = pending.popleft()
+            current, used_doublecave = pending.popleft()
options = self.edges[current[-1]]
+
for option in options:
path = current + [option]

-                if option.lower() in current:
+                if option == 'start':
continue

-                if option == target:
-                    paths.append(path)
+                if option.lower() in current:
+                    if used_doublecave:
+                        continue
+                    else:
+                        pending.append((path, True))
else:
-                    pending.append(path)
+                    if option == target:
+                        paths.append(path)
+                    else:
+                        pending.append((path, used_doublecave))

return paths

4. primordial-soup
(edited )
Part 1, Python(-ish) edges = ls > fe(X.split("-") | frozenset) | set # define a recursive λ via variables rather than via Y, which seems to be necessary in # order to get speedup from @cache...
Part 1, Python(-ish)
edges = ls > fe(X.split("-") | frozenset) | set
# define a recursive λ via variables rather than via Y, which seems to be necessary in
# order to get speedup from @cache
count_paths = None
count_paths = cache(λ start, used_smalls=frozenset():
1 if start == "end" else
sum(count_paths(choice,
used_smalls | {start} if start.islower() else used_smalls)
for edge in edges if start in edge and
(choice := one(edge - {start})) not in used_smalls))
count_paths("start")

Part 2, Python(-ish)
edges = ls > fe(X.split("-") | frozenset) | set
# define a recursive λ via variables rather than via Y, which seems to be necessary in
# order to get speedup from @cache
count_paths = None
count_paths = cache(λ start, used_smalls=frozenset(), double_used=None:
1 if start == "end" else
sum(count_paths(choice,
used_smalls | {start} if start.islower() else used_smalls,
choice if choice in used_smalls else double_used)
for edge in edges if start in edge and
((choice := one(edge - {start})) not in used_smalls or
(double_used is None and choice != "start"))))
count_paths("start")

Caching gives a big speedup if you do a recursive graph search like I did. With caching, part 2 only takes ~ 105 ms on my laptop (or 210 ms if you include interpreter startup time).

5. PapaNachos
I legit stared at this problem for 15 minutes before I wrote any code. Eventually I solved part A and moved on to B. Getting a working test case for B didn't take too long, but when I went to run...

I legit stared at this problem for 15 minutes before I wrote any code. Eventually I solved part A and moved on to B. Getting a working test case for B didn't take too long, but when I went to run the real solution it just didn't stop calculating. Eventually I stopped waiting and wrote a new algorithm. I finished the new algorithm and ran it a few minutes before the old one finally ran out of RAM and locked up Python. All in all a bit of a ride.

Day 12 Part A - Python

Mapped out the connections each way and then walked through them recursively from the start. If a path ran into a duplicate lower case entry I abandoned it.

import re
from collections import defaultdict

#data = test_data_2
data = real_data
data = data.split('\n')

connections = defaultdict(list)

for row in data:
a, b, = row.split('-')
connections[a].append(b)
connections[b].append(a)

paths = []

#print(connections)

def search(start_point, path):
for next_spot in connections[start_point]:
if next_spot == 'end':
paths.append(list(path+[next_spot]))
elif next_spot not in path or next_spot.upper() == next_spot:
search(next_spot, list(path+[next_spot]))

search('start', ['start'])
print(len(paths))
#for path in paths:
#    print(path)

Day 12 Part B - Python

I modified the recursive function to allow for one letter to be found twice, then I ran it for each letter and removed all the duplicate paths. It was extremely fast.

import re
from collections import defaultdict
from collections import Counter

#data = test_data
data = real_data
data = data.split('\n')

connections = defaultdict(list)

for row in data:
a, b, = row.split('-')
connections[a].append(b)
connections[b].append(a)

paths = []

lowers = []

for key in connections.keys():
if key == key.lower():
lowers.append(key)

lowers.remove('start')
lowers.remove('end')

def search(start_point, path, duplicate_letter):
for next_spot in connections[start_point]:
if next_spot == 'end':
paths.append(list(path+[next_spot]))
letter_paths.append(list(path+[next_spot]))
elif next_spot not in path or next_spot.upper() == next_spot:
search(next_spot, list(path+[next_spot]), duplicate_letter)
elif next_spot == duplicate_letter:
counts = dict(Counter(path))
if counts[next_spot] == 1:
search(next_spot, list(path+[next_spot]), duplicate_letter)
for letter in lowers:
#print(letter)
letter_paths = []
search('start', ['start'], letter)
#for path in letter_paths:
#print(path)
reduced_paths = []
for path in paths:
reduced_paths.append(tuple(path))
reduced_paths = list(set(reduced_paths))
print(len(reduced_paths))
#for path in paths:
#    print(path)

Day 12 Part B - Deeply Cursed

This looks for all paths that contain 2 or less of each small cavern. Then it takes that list and removes any entries that have more than 1 lower case cavern visited twice. It worked on the test case and hit 22GB of RAM before it locked up my machine when I ran it for realsies.

As you can see from my variable naming, I knew it was cursed when I wrote it. I just didn't know HOW cursed

import re
from collections import defaultdict

#data = test_data
data = real_data
data = data.split('\n')

connections = defaultdict(list)

for row in data:
a, b, = row.split('-')
connections[a].append(b)
connections[b].append(a)

paths = []

lowers = []

for key in connections.keys():
if key == key.lower():
lowers.append(key)

#print(connections)

def search(start_point, path):
for next_spot in connections[start_point]:
path_copy_because_AAAAAAHHHHHHHHHH = list(path)
if next_spot in path_copy_because_AAAAAAHHHHHHHHHH:
path_copy_because_AAAAAAHHHHHHHHHH.remove(next_spot)
path_copy_because_AAAAAAHHHHHHHHHH.append('start') #NO DUPLICATE STARTS POINTS
if next_spot == 'end':
paths.append(list(path+[next_spot]))
elif next_spot not in path_copy_because_AAAAAAHHHHHHHHHH or next_spot.upper() == next_spot:
search(next_spot, list(path+[next_spot]))

search('start', ['start'])

valid_paths = []
for path in paths:
count = defaultdict(int)
for element in path:
if element == element.lower():
count[element] = count[element] + 1
num_twos = 0
for result in count.values():
if int(result) > 1:
num_twos = num_twos + 1
if num_twos <= 1:
valid_paths.append(path)
print(len(valid_paths))

Tips
• If your code seems like it's taking too long to run, it probably is. This probably has the ability to get WAY out of control with computational complexity

• For part B you want ONLY ONE small cavern to get visited twice and also you want every possible small cavern to get visited twice. This means for some iterations you need to avoid taking earlier duplicate moves so that they're available later

6. DataWraith
Day 12 (Rust) Data structures This one felt like a parsing puzzle; after parsing everything, the solution is (again) a Breadth-First-Search with counting (sometimes known as 'BFS reach'), same as...

Day 12 (Rust)

Data structures

This one felt like a parsing puzzle; after parsing everything, the solution is (again) a Breadth-First-Search with counting (sometimes known as 'BFS reach'), same as day 9.

use std::collections::VecDeque;

#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
enum CaveType {
Start,
Interior,
End,
}

#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
enum CaveSize {
Large,
Small,
}

#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Clone)]
pub struct Node {
cave_type: CaveType,
name: String,
size: CaveSize,
edges: Vec<usize>,
}

Parsing

This is the annoying part.

mod parse {
use nom::{
bytes::complete::tag,
character::complete::{alpha1, line_ending},
combinator::eof,
multi::separated_list1,
IResult,
};

use super::{CaveSize, CaveType, Node};

fn edge(input: &str) -> IResult<&str, (&str, &str)> {
let (input, a) = alpha1(input)?;
let (input, _) = tag("-")(input)?;
let (input, b) = alpha1(input)?;

Ok((input, (a, b)))
}

fn caves(input: &str) -> IResult<&str, Vec<(&str, &str)>> {
let (input, c) = separated_list1(line_ending, edge)(input)?;
let (input, _) = eof(input)?;

Ok((input, c))
}

fn str2node(node: &str) -> Node {
let name = node.to_string();

let size = if name.chars().next().unwrap().is_uppercase() {
CaveSize::Large
} else {
CaveSize::Small
};

let cave_type = match node {
"start" => CaveType::Start,
"end" => CaveType::End,
_ => CaveType::Interior,
};

Node {
name,
size,
cave_type,
edges: vec![],
}
}

pub fn parse(input: &str) -> Vec<Node> {
let mut nodes = Vec::new();
let cave_system = caves(input.trim()).expect("failed to parse caves").1;

for (a, b) in cave_system.iter() {
nodes.push(str2node(a));
nodes.push(str2node(b));
}

nodes.sort_unstable();
nodes.dedup();

for (a, b) in cave_system {
let a_idx = nodes.iter().position(|n| n.name == a).unwrap();
let b_idx = nodes.iter().position(|n| n.name == b).unwrap();

nodes[a_idx].edges.push(b_idx);
nodes[b_idx].edges.push(a_idx);
}

nodes
}
}

Solving

As stated above, this is again a Breadth-First-Search, though with a slightly obnoxious state transition condition.

#[derive(Clone, Debug)]
struct PartialPath {
path: Vec<usize>,
visited: Vec<bool>,
visited_twice: bool,
}

fn count_all_paths(nodes: &Vec<Node>, two_visits: bool) -> usize {
let mut count = 0;

let mut start = PartialPath {
path: vec!,
visited: vec![false; nodes.len()],
visited_twice: false,
};

start.visited = true;

let mut q = VecDeque::new();
q.push_back(start);

while let Some(partial_path) = q.pop_front() {
let last_node = &nodes[partial_path.path[partial_path.path.len() - 1]];

// We've reached the end, count and discard the path
if last_node.cave_type == CaveType::End {
count += 1;
continue;
}

// Enqueue all possible path continuations.
for &edge in last_node.edges.iter() {
// This if-statement exactly encodes the rules of the puzzles
if !partial_path.visited[edge]
|| nodes[edge].size == CaveSize::Large
|| (two_visits
&& !partial_path.visited_twice
&& nodes[edge].cave_type != CaveType::Start)
{
let mut new_path = partial_path.clone();
new_path.visited[edge] = true;
new_path.path.push(edge);

if partial_path.visited[edge] && nodes[edge].size == CaveSize::Small {
new_path.visited_twice = true;
}

q.push_back(new_path);
}
}
}

count
}

fn main() {
let input = include_str!("../../input-12.txt");
let cave_system = parse::parse(input);

println!("Part I:  {}", count_all_paths(&cave_system, false));
println!("Part II:  {}", count_all_paths(&cave_system, true));
}

7. 
bhrgunatha
(edited )
Data Structure Despite naming all my data structure creation procedures graph today's is actually a graph using the old work-horse hash table mapping cave -> set of caves. Since it's undirected...
Data Structure

Despite naming all my data structure creation procedures graph today's is actually a graph using the old work-horse hash table mapping cave -> set of caves. Since it's undirected every input line "a-b" needs 2 edges - a -> b & b -> a.

(define (graph input)
(for/fold ([G (hash)])
([l input])
(match-define (list from to) (string-split l "-"))
(hash-update (hash-update G from (curryr set-add to) (set))
to

Part 1

I wrote 2 separate but nearly identical path counts when I submitted my answers but it's refactored a bit here.
Now I use a bouncer (single-visit) to stop people re-visiting the caves.

(define (part-01 input)
(count-paths (graph input) single-visit))

count-paths is a dfs that ends when the cave end is visited.
Originally I accumulated the current path and a set of all the paths discovered as the search progressed because it was useful necessary information, but they really aren't needed.
Now I just increment a counter accumulated over recursive calls to dfs - one recursive call for each cave that can be visited from the current cave.

(define (count-paths G visit-policy)
(define (dfs p visited paths)
(cond [(string=? p "end") (add1 paths)]
[else (for/fold ([paths paths])
([n (neighbours G p visited visit-policy)])
(dfs n (visit-cave n visited) paths))]))
(dfs "start" (visit-cave "start") 0))

(define (neighbours G p visited exclude?)
(for/list ([n (in-set (hash-ref G p (set)))]
#:unless (exclude? visited n))
n))

(define (single-visit visited n)
(and (char-lower-case? (string-ref n 0))
(set-member? visited n)))

Part 2

For part 2 instead of counting how many times a cave is visited, I visit my "special" cave (double) and hired a new bouncer.

(define (part-02 input)
(count-paths (graph input) double-visit))

(define (double-visit visited n)
(or (string=? n "start")
(and (allow-visit? n)
(set-member? visited "double")
(set-member? visited n))))

(define (visit-cave n [visited (set)])
(cond [(string=? n "start") (set-add visited n)]
[(string=? n "end") (set-add visited n)]
[(allow-visit? n)
(if (set-member? visited n)
[else visited]))

It seems obvious, but I messed up the conditions for excluding a cave for a second visit so many times it's actually funny.

Performance update:

• I check whether a cave is small or not very, very often. A cave never changes size so this is an ideal candidate to memoize. I'll always love let over lambda but there's a neat library that provides define/memo that's just so convenient. It uses eq? (object identity) for key comparison. So...
• I converted the cave names to symbols which are (usually) interned and compared with eq?. This also means the graph is a hasheq now instead of regular hash which is also speed-up. I added a single call to check for a small cave and pass the result as a variable.
• I replaced the set used to store and check visited caves with a list, path, that gets built up. I originally ditched this idea because we're counting paths and not using them. The logic I use means only small caves are added so it isn't true path either. In theory adding & membership lookup of set are both O(log n) and list is O(1) / O(n). That doesn't seem like an improvement but n is small so the practical performance of O(1) / O(n) vs 2 O(log n) turns out to be a good speed boost. It pays to measure. For part 2 the runtime is down from about 320-340ms to 60-70ms. I'm always a little jealous of the C++/Rust solutions but I'll take the relative improvement. Part 1 has a similar factor increase too, so I'm satisfied.
1. 
DataWraith
That's an awesome solution! I need to use pure functions (and higher-order functions) more... Spoiler I didn't even consider using a DFS instead of a BrFS, because in my experience it is so rarely...

That's an awesome solution! I need to use pure functions (and higher-order functions) more...

Spoiler

I didn't even consider using a DFS instead of a BrFS, because in my experience it is so rarely useful; the code difference is minor (a stack vs a queue), but for this problem, DFS brings my runtime down from 20ms to 14ms.

1. bhrgunatha
I guessed it was OK because we know there are paths and Racket has tail call optimisation. I hope my (unwitting) 6ms gift is useful later on!

I guessed it was OK because we know there are paths and Racket has tail call optimisation.

I hope my (unwitting) 6ms gift is useful later on!

8. asterisk
Python from collections import defaultdict caves = defaultdict(set) for one, two in [line.split("-") for line in open("input.txt").read().splitlines()]: caves[one].add(two) caves[two].add(one)...
Python
from collections import defaultdict

caves = defaultdict(set)
for one, two in [line.split("-") for line in open("input.txt").read().splitlines()]:

caves = {i: caves[i] - {"start"} for i in caves}

def visit(twice=True, c="start", visited=set()):
count = int()
for cave in caves[c]:
if cave == "end":
count += 1
elif cave.isupper():
count += visit(twice, cave, visited)
elif cave not in visited:
count += visit(twice, cave, visited | {cave})
elif not twice:
count += visit(True, cave, visited | {cave})
return count

print("Part One:", visit())
print("Part Two:", visit(False))

9. 
3d12
Like others, I also stared at this problem for many minutes before writing any code. In fact, I thought about it so long, I fell asleep and decided to tackle it today instead. I'm not great at...

Like others, I also stared at this problem for many minutes before writing any code. In fact, I thought about it so long, I fell asleep and decided to tackle it today instead.

I'm not great at recursive functions, so imagine my surprise when the problem circumvented my expectations (and the reason I parsed the upper-cased-ness of the letters into a boolean flag in part 1) which led to my business side coding kicking in, and implementing a horrible "exclusion rule" which is used both in the selection criteria, then again in the actual recursion step in part 2. Hey, at least it worked. ¯\_(ツ)_/¯

Part 1
const fs = require('fs');

let inputArr = [];

input: fileStream,
crlfDelay: Infinity
});

for await (const line of rl) {
try {
inputArr.push(line);
} catch(e) {
console.error(e);
}
}
}

function parseCaves(input) {
let cavesArr = [];
for (const line of input) {
let lineRegex = /(\w+)-(\w+)/;
let found = line.match(lineRegex);
let dest1 = found;
let dest2 = found;
console.log("DEBUG: line parsed, dest1 = " + dest1 + ", dest2 = " + dest2);
let findDest1 = cavesArr.filter(e => e.name === dest1);
if (findDest1.length === 0) {
let multiplePassThrough = false;
if (dest1 === dest1.toUpperCase()) {
multiplePassThrough = true;
}
cavesArr.push({ name: dest1, leadsTo: [ dest2 ], multiplePassThrough: multiplePassThrough });
} else {
}
let findDest2 = cavesArr.filter(e => e.name === dest2);
if (findDest2.length === 0) {
let multiplePassThrough = false;
if (dest2 === dest2.toUpperCase()) {
multiplePassThrough = true;
}
cavesArr.push({ name: dest2, leadsTo: [ dest1 ], multiplePassThrough: multiplePassThrough});
} else {
}
}
return cavesArr;
}

function findPaths(mapArr, startRoom=mapArr.filter(e => e.name === 'start'), currentPath=[], pathsArr=[]) {
console.log("DEBUG: entering findPaths, startRoom is " + startRoom.name + " and currentPath is " + currentPath.map(e => e.name).join(','));
if (startRoom.name === 'end') {
console.log("DEBUG: ending findPaths, found end room");
let tempPath = currentPath.map(e => e);
tempPath.push(startRoom);
pathsArr.push(tempPath);
return pathsArr;
}
if (startRoom.name === 'start') {
console.log("DEBUG: starting findPaths, found start room");
currentPath.push(startRoom);
}
let destObjects = [];
for (const dest of dests) {
destObjects.push(mapArr.filter(e => e.name === dest));
}
let eligibleDests = destObjects.filter(e => (currentPath.filter(f => e.name === f.name).length === 0) || e.multiplePassThrough === true);
//console.log("DEBUG: eligible dests: " + eligibleDests.map(e => e.name).join(','));
for (const dest of eligibleDests) {
let tempPath = currentPath.map(e => e);
if (dest.name != 'end') {
tempPath.push(dest);
}
//console.log("DEBUG: about to recurse, startRoom = " + startRoom.name + ", dest = " + dest.name + ", currentPath = " + tempPath.map(e => e.name).join(',') + " and pathsArr = " + pathsArr.map(e => e.map(f => f.name).join(',')).join(';'))
findPaths(mapArr, dest, tempPath, pathsArr);
}
return pathsArr;
}

(async function mainExecution() {
let cavesArr = parseCaves(inputArr);
console.log(cavesArr);
let paths = findPaths(cavesArr);
console.log(paths);
console.log(paths.length);
})();

Part 2
const fs = require('fs');

let inputArr = [];

input: fileStream,
crlfDelay: Infinity
});

for await (const line of rl) {
try {
inputArr.push(line);
} catch(e) {
console.error(e);
}
}
}

function parseCaves(input) {
let cavesArr = [];
for (const line of input) {
let lineRegex = /(\w+)-(\w+)/;
let found = line.match(lineRegex);
let dest1 = found;
let dest2 = found;
console.log("DEBUG: line parsed, dest1 = " + dest1 + ", dest2 = " + dest2);
let findDest1 = cavesArr.filter(e => e.name === dest1);
if (findDest1.length === 0) {
let multiplePassThrough = false;
if (dest1 === dest1.toUpperCase()) {
multiplePassThrough = true;
}
cavesArr.push({ name: dest1, leadsTo: [ dest2 ], multiplePassThrough: multiplePassThrough });
} else {
}
let findDest2 = cavesArr.filter(e => e.name === dest2);
if (findDest2.length === 0) {
let multiplePassThrough = false;
if (dest2 === dest2.toUpperCase()) {
multiplePassThrough = true;
}
cavesArr.push({ name: dest2, leadsTo: [ dest1 ], multiplePassThrough: multiplePassThrough});
} else {
}
}
return cavesArr;
}

function findPaths(mapArr, startRoom=mapArr.filter(e => e.name === 'start'), currentPath=[], pathsArr=[], smallRoomDoubled=false) {
console.log("DEBUG: entering findPaths, startRoom is " + startRoom.name + ", smallRoomDoubled is " + smallRoomDoubled + ", and currentPath is " + currentPath.map(e => e.name).join(','));
if (startRoom.name === 'end') {
console.log("DEBUG: ending findPaths, found end room");
let tempPath = currentPath.map(e => e);
tempPath.push(startRoom);
pathsArr.push(tempPath);
return pathsArr;
}
if (startRoom.name === 'start') {
console.log("DEBUG: starting findPaths, found start room");
currentPath.push(startRoom);
}
let destObjects = [];
for (const dest of dests) {
destObjects.push(mapArr.filter(e => e.name === dest));
}
let eligibleDests = destObjects.filter(e =>
(
currentPath.filter(f => e.name === f.name).length === 0
||
e.multiplePassThrough === true
||
(
e.name === e.name.toLowerCase()
&& currentPath.filter(f => e.name === f.name).length === 1
&& smallRoomDoubled === false
&& e.name != 'start'
&& e.name != 'end'
)
));
//console.log("DEBUG: eligible dests: " + eligibleDests.map(e => e.name).join(','));
for (const dest of eligibleDests) {
let tempPath = currentPath.map(e => e);
let tempSmallRoomDoubled = smallRoomDoubled;
if (dest.name != 'end') {
tempPath.push(dest);
}
if (
dest.name === dest.name.toLowerCase()
&& currentPath.filter(f => dest.name === f.name).length === 1
&& tempSmallRoomDoubled === false
&& dest.name != 'start'
&& dest.name != 'end'
) {
findPaths(mapArr, dest, tempPath, pathsArr, true);
} else {
//console.log("DEBUG: about to recurse, startRoom = " + startRoom.name + ", dest = " + dest.name + ", currentPath = " + tempPath.map(e => e.name).join(',') + " and pathsArr = " + pathsArr.map(e => e.map(f => f.name).join(',')).join(';'))
findPaths(mapArr, dest, tempPath, pathsArr, smallRoomDoubled);
}
}
return pathsArr;
}

(async function mainExecution() {
let cavesArr = parseCaves(inputArr);
console.log(cavesArr);
let paths = findPaths(cavesArr);
console.log(paths);
console.log(paths.length);
})();

1. 
wycy
How's the run time (especially for part 2) in JS?

How's the run time (especially for part 2) in JS?

1. 3d12
Really bad. Like, over a minute? But, I didn't try it with all the console.log statements removed.

Really bad. Like, over a minute? But, I didn't try it with all the console.log statements removed.

10. wycy
(edited )
Rust Reasonably happy with my code cleanliness, but this runs really slowly: 3.03 sec on my machine. Edit: Combined Start/End into my cave enum to clean up the match arms. Edit 2: Got a slight...

Rust

Reasonably happy with my code cleanliness, but this runs really slowly: 3.03 sec on my machine.

Edit: Combined Start/End into my cave enum to clean up the match arms.

Edit 2: Got a slight speed increase by switching to a HashMap of Cave rather than a Vec of Cave

Rust (refactored)
use std::env;
use std::fs::File;
use std::collections::{VecDeque,HashSet,HashMap};

#[derive(PartialEq)]
enum CaveType {
Start,
End,
Large,
Small,
}
impl CaveType {
pub fn type_from(name: &str) -> Self {
match name {
"start" => CaveType::Start,
"end"   => CaveType::End,
other => match other.chars().next() {
Some('a'..='z') => CaveType::Small,
Some('A'..='Z') => CaveType::Large,
other => panic!("Unknown character: {:?}", other),
}
}
}
}

struct Cave {
connections: Vec<String>,
}
impl Cave {
pub fn new() -> Self {
Self { connections: Vec::new() }
}
}

// Checks whether any small caves have been revisited along this path already
fn has_revisited_small_cave(path: &String) -> bool {
let mut small_cave_visits: HashMap<String,usize> = HashMap::new();
let small_caves = path.split("-").filter(|c| CaveType::type_from(c) == CaveType::Small );
for small_cave in small_caves {
*small_cave_visits.entry(small_cave.to_string()).or_insert(0) += 1;
}
small_cave_visits.iter().filter(|&(_,v)| *v > 1).count() > 0
}

fn count_paths(caves: &HashMap<String,Cave>, allow_revisit: bool) -> usize {
let mut paths: HashSet<String> = HashSet::new();
let mut q: VecDeque<String> = VecDeque::new();
q.push_back("start".to_string());
while q.len() > 0 {

// Get current cave name and data
let current_path = q.pop_front().unwrap();
let current_name = current_path.split("-").last().unwrap();
let current_cave = caves.get(current_name).unwrap();

// Investigate connections
for connection in &current_cave.connections {
let new_path = format!("{}-{}",current_path,connection);
match CaveType::type_from(&connection) {
CaveType::Start => {}, // can't revisit start
CaveType::End   => { paths.insert(new_path); },
CaveType::Large => { q.push_back(new_path); },
CaveType::Small => {
// Count instances of this connection's name in current path
let count = current_path.split("-").filter(|c| c == connection).count();
match count {
0 => q.push_back(new_path), // first visit to this small cave
1 if allow_revisit && !has_revisited_small_cave(&current_path) => q.push_back(new_path), // first small cave revisit
_ => {},
}
},
}
}
}
paths.len()
}

fn solve(input: &str) -> io::Result<()> {

// Input
let input: Vec<String> = match reader.lines().collect() {
Err(err) => panic!("Unknown error reading input: {}", err),
Ok(result) => result,
};

// Build cave map
let mut caves: HashMap<String,Cave> = HashMap::new();
for line in &input {
let mut both_caves = line.split('-');
let cave1 = both_caves.next().unwrap().to_string();
let cave2 = both_caves.next().unwrap().to_string();
caves.entry(cave1.clone()).or_insert(Cave::new()).connections.push(cave2.clone());
caves.entry(cave2.clone()).or_insert(Cave::new()).connections.push(cave1.clone());
}

// Solve
println!("Part 1: {}", count_paths(&caves,false)); // 4411
println!("Part 2: {}", count_paths(&caves,true)); // 136767

Ok(())
}

fn main() {
let args: Vec<String> = env::args().collect();
let filename = &args;
solve(&filename).unwrap();
}

Rust (old solution)
use std::env;
use std::fs::File;
use std::collections::{VecDeque,HashSet,HashMap};

enum CaveType {
Start,
End,
Large,
Small,
}
impl CaveType {
pub fn type_from(name: &str) -> Self {
match name {
"start" => CaveType::Start,
"end"   => CaveType::End,
other => match other.chars().next() {
Some('a'..='z') => CaveType::Small,
Some('A'..='Z') => CaveType::Large,
other => panic!("Unknown character: {:?}", other),
}
}
}
}

struct Cave {
name: String,
connections: Vec<String>,
}
impl From<&str> for Cave {
fn from(input: &str) -> Self {
Self {
name: input.to_string(),
connections: Vec::new(),
}
}
}

// Checks whether any small caves have been revisited along this path already
fn has_revisited_small_cave(path: &String) -> bool {
let mut small_cave_visits: HashMap<String,usize> = HashMap::new();
let small_caves = path.split("-").filter(|c| match CaveType::type_from(c) { CaveType::Small => true, _ => false});
for small_cave in small_caves {
*small_cave_visits.entry(small_cave.to_string()).or_insert(0) += 1;
}
small_cave_visits.iter().filter(|&(_,v)| *v > 1).count() > 0
}

fn count_paths(caves: &Vec<Cave>, allow_revisit: bool) -> usize {
let mut paths: HashSet<String> = HashSet::new();
let mut q: VecDeque<String> = VecDeque::new();
q.push_back("start".to_string());
while q.len() > 0 {

// Get current cave name and data
let current_path = q.pop_front().unwrap();
let current_name = current_path.split("-").last().unwrap();
let current_cave = &caves.iter().filter(|c| c.name == current_name).next().unwrap();

// Investigate connections
for connection in &current_cave.connections {
let new_path = format!("{}-{}",current_path,connection);
match CaveType::type_from(&connection) {
CaveType::Start => {}, // can't revisit start
CaveType::End   => { paths.insert(new_path); },
CaveType::Large => { q.push_back(new_path); },
CaveType::Small => {
// Count instances of this connection's name in current path
let count = current_path.split("-").filter(|c| c == connection).count();
match count {
0 => q.push_back(new_path), // first visit to this small cave
1 if allow_revisit && !has_revisited_small_cave(&current_path) => q.push_back(new_path), // first small cave revisit
_ => {},
}
},
}
}
}
paths.len()
}

fn solve(input: &str) -> io::Result<()> {

// Input
let input: Vec<String> = match reader.lines().collect() {
Err(err) => panic!("Unknown error reading input: {}", err),
Ok(result) => result,
};

// Make caves
let mut caves: Vec<Cave> = Vec::new();
for line in &input {
for cave in line.split('-') {
if caves.iter().filter(|c| c.name == cave).count() == 0 {
caves.push(Cave::from(cave));
}
}
}

// Connect caves
for line in &input {
let mut both_caves = line.split('-');
let cave1 = both_caves.next().unwrap();
let cave2 = both_caves.next().unwrap();
caves.iter_mut().filter(|c| c.name == cave1).for_each(|c| c.connections.push(cave2.to_string()));
caves.iter_mut().filter(|c| c.name == cave2).for_each(|c| c.connections.push(cave1.to_string()));
}

// Solve parts 1 & 2
println!("Part 1: {}", count_paths(&caves,false)); // 4411
println!("Part 2: {}", count_paths(&caves,true)); // 136767

Ok(())
}

fn main() {
let args: Vec<String> = env::args().collect();
let filename = &args;
solve(&filename).unwrap();
}

1 vote
11. jzimbel
Elixir I really struggled with debugging my part 2 solution, and in the end it came down to me not updating my visited counts at the right time. I wasn't adding the current cave to the visited...

Elixir

I really struggled with debugging my part 2 solution, and in the end it came down to me not updating my visited counts at the right time. I wasn't adding the current cave to the visited counts before consulting them to determine which of the next caves were visitable, which meant if the current cave is a small one being visited the second time, and is the first instance of a small cave being visited a second time, a cave right after it might get its second visit as well. Subtle and maddening. 😵‍💫

Besides that... I'm happy with my approach, which reuses the same recursive function to count paths, with parts 1 and 2 just passing it a different filter function to determine which of a cave's connections are visitable based on the current visited counts.

There's some ugly and repetitive binary pattern matching (e.g. <<first_char, _rest::binary>> paired with the guard when first_char in ?a..?z) to determine whether a cave is small, but when I tried replacing it with a small_cave?/1 function call, the run time tripled from ~.5 sec to 1.5! I think I could do more up-front work to tag the different types of caves when parsing the input, but I'm calling it quits for now.

Both parts
def part1(input) do
input
|> parse_cave_map()
|> count_paths(&visitable_p1?/2)
end

def part2(input) do
input
|> parse_cave_map()
|> count_paths(&visitable_p2?/2)
end

defp count_paths(cave \\ "start", visit_counts \\ %{}, cave_map, visitable_fn)

defp count_paths("end", _, _, _), do: 1

defp count_paths(cave, visit_counts, cave_map, visitable_fn) do
visit_counts = Map.update(visit_counts, cave, 1, &(&1 + 1))

cave_map[cave]
|> Enum.filter(&visitable_fn.(&1, visit_counts))
|> Enum.map(&count_paths(&1, visit_counts, cave_map, visitable_fn))
|> Enum.sum()
end

# Part 1 visitable checker
defp visitable_p1?(<<first_char, _rest::binary>> = small_cave, visit_counts)
when first_char in ?a..?z do
Map.get(visit_counts, small_cave, 0) == 0
end

defp visitable_p1?(_cave, _visit_counts), do: true

# Part 2 visitable checker
defp visitable_p2?(terminus, visit_counts) when terminus in ~w[start end] do
Map.get(visit_counts, terminus, 0) == 0
end

defp visitable_p2?(<<first_char, _rest::binary>> = small_cave, visit_counts)
when first_char in ?a..?z do
case Map.get(visit_counts, small_cave, 0) do
0 -> true
1 -> no_small_caves_visited_twice?(visit_counts)
_ -> false
end
end

defp visitable_p2?(_cave, _visit_counts), do: true

defp no_small_caves_visited_twice?(visit_counts) do
visit_counts
|> Enum.filter(fn {cave, _} -> small_cave?(cave) end)
|> Enum.all?(fn {_, visit_count} -> visit_count < 2 end)
end

defp small_cave?(<<first_char, _rest::binary>>), do: first_char in ?a..?z

defp parse_cave_map(input) do
input
|> String.split("\n", trim: true)
|> Enum.map(&Regex.run(~r/(\w+)-(\w+)/, &1, capture: :all_but_first))
|> Enum.reduce(%{}, fn [cave1, cave2], cave_map ->
cave_map
|> Map.update(cave1, [cave2], fn reachable -> [cave2 | reachable] end)
|> Map.update(cave2, [cave1], fn reachable -> [cave1 | reachable] end)
end)
end
end

1 vote
12. Gyrfalcon
I have fallen behind because life, but I will keep going! This one was not too bad, actually, once I figured out in part 2 that I was calling my function from part 1 on recursion, which obviously...

I have fallen behind because life, but I will keep going! This one was not too bad, actually, once I figured out in part 2 that I was calling my function from part 1 on recursion, which obviously gave the wrong answer. With that figured out, I ran my solution for about 30 seconds before I gave up and recompiled with release instead of debug. It now runs in about 5.5 seconds, which isn't amazing compared to some of the solutions here, but I'm happy to wait that long.

Part 1
import std/[strformat, sequtils, strutils, sugar, tables, os, sets]

# Well now I'm glad I learned to use refs!
type
CaveRef = ref Cave
Cave = object of RootObj
name: string
isLower: bool
neighbors: seq[CaveRef]

func initCave(name: string): Cave =
result.name = name
if name.toLower() == name:
result.isLower = true
result.neighbors = @[]

func newCave(name: string): CaveRef =
new(result)
result[] = initCave(name)

proc createGraph(left, right: Table[string, seq[string]]): CaveRef =
let nodeNames = toHashSet(toSeq(left.keys()) & toSeq(right.keys()))
var nodeTable: Table[string, CaveRef]

for name in nodeNames:
nodeTable[name] = newCave(name)

for name in nodeNames:
for neighborName in left[name]:
for neighborName in right[name]:

result = nodeTable["start"]

proc parseInput(inputFile: string): CaveRef =
var input = collect(newSeq):
for line in inputFile.lines: line

var leftToRight: Table[string, seq[string]]
var rightToLeft: Table[string, seq[string]]
for line in input:
var sides = line.split("-")
else:
leftToRight[sides] = @[sides]

else:
rightToLeft[sides] = @[sides]

result = createGraph(leftToRight, rightToLeft)

proc delve(root: CaveRef, path: seq[string]): seq[seq[string]] =
if root.name == "end":
result = @[path & @[root.name]]
elif root.isLower and root.name in path:
result = @[]
else:
result = concat(mapIt(root.neighbors, delve(it, path & @[root.name])))

proc main(inputFile: string) =
var rootCave = parseInput(inputFile)
var paths = delve(rootCave, @[])
echo paths.len

when is_main_module:
main(paramStr(1))

Part 2 diff
@@ -63,10 +63,29 @@ proc delve(root: CaveRef, path: seq[string]): seq[seq[string]] =
else:
result = concat(mapIt(root.neighbors, delve(it, path & @[root.name])))

+proc delveDeeper(root: CaveRef, path: seq[string]): seq[seq[string]] =
+  if root.name == "end":
+    result = @[path & @[root.name]]
+  elif root.name == "start" and path.len > 0:
+    result = @[]
+  elif root.isLower:
+    var lowerCounts = filterIt(path, it == it.toLower()).toCountTable()
+    if lowerCounts.getOrDefault(root.name, 0) > 1:
+      result = @[]
+    elif lowerCounts.getOrDefault(root.name, 0) > 0 and
+         anyIt(lowerCounts.values().toSeq(), it > 1):
+      result = @[]
+    else:
+      result = concat(mapIt(root.neighbors, delveDeeper(it, path & @[root.name])))
+  else:
+    result = concat(mapIt(root.neighbors, delveDeeper(it, path & @[root.name])))
+
proc main(inputFile: string) =
var rootCave = parseInput(inputFile)
var paths = delve(rootCave, @[])
echo paths.len
+  var deepPaths = delveDeeper(rootCave, @[])
+  echo deepPaths.len

1 vote