diff --git a/oving_2/main.py b/oving_2/main.py index 44ec493..fd996a8 100644 --- a/oving_2/main.py +++ b/oving_2/main.py @@ -23,7 +23,7 @@ leftMotor = Motor(port=Port.A) rightMotor = Motor(port=Port.C) -robotDriveBase = DriveBase(leftMotor, rightMotor, wheel_diameter=56, axle_track=110) +robotDriveBase = DriveBase(leftMotor, rightMotor, wheel_diameter=56, axle_track=180) startBtn = TouchSensor(port=Port.S1) usSensor = UltrasonicSensor(port=Port.S3) diff --git a/oving_3/main.py b/oving_3/main.py index 5e8bb24..6f6e751 100644 --- a/oving_3/main.py +++ b/oving_3/main.py @@ -38,13 +38,9 @@ #ev3.screen.print("Spiller", player.name, "...") #player.play() -def doRandomTrick(): - trickIndex = randint(0, 1) - - if trickIndex == 1: - spin() - elif trickindex == 2: - playMusic() +def doRandomTrick(actions): + trick = actions[randint(0, len(actions) - 1)] + trick() def spin(): directions = [-1, 1] @@ -59,6 +55,8 @@ def playMusic(): ev3.screen.print("Spiller", player.name, "...") player.play() +actions = [spin, playMusic] + leftMotor = Motor(port=Port.A) rightMotor = Motor(port=Port.C) robotDriveBase = DriveBase(leftMotor, rightMotor, wheel_diameter=56, axle_track=160) @@ -71,39 +69,53 @@ def playMusic(): speaker = ev3.speaker -leftSensorThreshold = 8 -rigthSensorThreshold = 7 -speed = 50 +leftSensorThreshold = 9 +rigthSensorThreshold = 10 +speed = 115 currentTime = time.time() -lastCorrectino = currentTime +lastCorrection = currentTime lastDirection = 1 lastTrick = currentTime +rotationSpeed = 40 while True: currentTime = time.time() - """ if currentTime - lastCorrectino > 2: + ev3.screen.print(leftColorSensor.reflection(), rightColorSensor.reflection()) + + if usSensor.distance() <= 50: + break + """ + if currentTime - lastCorrection > 2: while leftColorSensor.reflection() > leftSensorThreshold or rightColorSensor.reflection() < rigthSensorThreshold: - robotDriveBase.drive(-speed, 50 * lastDirection) - - lastCorrectino = time.time() - continue """ + i = (time.time() - lastCorrection) * 3 + robotDriveBase.drive(-speed, i * lastDirection) + + lastCorrection = time.time() + continue + """ if currentTime - lastTrick > 10: - doTrick() + doRandomTrick(actions) lastTrick = time.time() if leftColorSensor.reflection() < leftSensorThreshold: - robotDriveBase.drive(-speed, 25) - lastCorrectino == time.time() - lastDirection = -1 + robotDriveBase.drive(-speed, rotationSpeed) + lastCorrection == time.time() + lastDirection = 1 continue if rightColorSensor.reflection() < rigthSensorThreshold: - robotDriveBase.drive(-speed, -25) - lastCorrectino = currentTime - lastDirection = 1 + robotDriveBase.drive(-speed, -rotationSpeed) + lastCorrection = currentTime + lastDirection = -1 continue - - robotDriveBase.drive(-speed, 50) \ No newline at end of file + + + + robotDriveBase.drive(-speed, 0) + + +robotDriveBase.stop() +ev3.speaker.play_file(SoundFile.FANFARE) \ No newline at end of file diff --git a/oving_3/rtttl_player.py b/oving_3/rtttl_player.py index 5551073..8ca0e4f 100644 --- a/oving_3/rtttl_player.py +++ b/oving_3/rtttl_player.py @@ -8,12 +8,13 @@ class RTTTLPlayer: 'g#': 25.96, 'a': 27.50, 'a#': 29.14, 'b': 30.87 } - def __init__(self, ev3): + def __init__(self, ev3, rtttl: str): """ :param ev3: EV3Brick instance from pybricks.hubs :param rtttl: RTTTL string (name:d=4,o=5,b=100:notes) """ self.ev3 = ev3 + self.name, self.defaults, self.notes = self.parse_rtttl(rtttl) def parse_rtttl(self, rtttl: str): name, settings, notes = rtttl.split(':') @@ -31,15 +32,15 @@ def note_to_freq(self, note: str, octave: int): base = self.NOTE_FREQS[note] return round(base * (2 ** octave), 2) - def play(self, name, defaults, notes, tempo_scale: float = 1.0): + def play(self, tempo_scale: float = 1.0): """ Play the parsed RTTTL melody. :param tempo_scale: Scale factor for speed (1.0 = normal, 2.0 = twice as fast, 0.5 = half speed) """ - bpm = defaults.get('b', 63) + bpm = self.defaults.get('b', 63) whole_note = (60 / bpm) * 4 * 1000 # ms - for raw in notes: + for raw in self.notes: match = re.match(r'(\d+)?([a-gp][#]?)(\d)?(\.*)?', raw.strip(), re.I) if not match: continue diff --git a/oving_4/rttl_player.py b/oving_4/rttl_player.py new file mode 100644 index 0000000..8ca0e4f --- /dev/null +++ b/oving_4/rttl_player.py @@ -0,0 +1,60 @@ +import re + + +class RTTTLPlayer: + NOTE_FREQS = { + 'c': 16.35, 'c#': 17.32, 'd': 18.35, 'd#': 19.45, + 'e': 20.60, 'f': 21.83, 'f#': 23.12, 'g': 24.50, + 'g#': 25.96, 'a': 27.50, 'a#': 29.14, 'b': 30.87 + } + + def __init__(self, ev3, rtttl: str): + """ + :param ev3: EV3Brick instance from pybricks.hubs + :param rtttl: RTTTL string (name:d=4,o=5,b=100:notes) + """ + self.ev3 = ev3 + self.name, self.defaults, self.notes = self.parse_rtttl(rtttl) + + def parse_rtttl(self, rtttl: str): + name, settings, notes = rtttl.split(':') + defaults = {} + for s in settings.split(','): + k, v = s.split('=') + defaults[k] = int(v) + notes = notes.split(',') + return name, defaults, notes + + def note_to_freq(self, note: str, octave: int): + """ + Convert note name + octave to frequency in Hz. + """ + base = self.NOTE_FREQS[note] + return round(base * (2 ** octave), 2) + + def play(self, tempo_scale: float = 1.0): + """ + Play the parsed RTTTL melody. + :param tempo_scale: Scale factor for speed (1.0 = normal, 2.0 = twice as fast, 0.5 = half speed) + """ + bpm = self.defaults.get('b', 63) + whole_note = (60 / bpm) * 4 * 1000 # ms + + for raw in self.notes: + match = re.match(r'(\d+)?([a-gp][#]?)(\d)?(\.*)?', raw.strip(), re.I) + if not match: + continue + + dur, note, octave, dot = match.groups() + dur = int(dur) if dur else self.defaults.get('d', 4) + octave = int(octave) if octave else self.defaults.get('o', 6) + + duration = (whole_note / dur) * tempo_scale + if dot: + duration *= 1.5 + + if note.lower() == 'p': # pause + self.ev3.speaker.beep(0, int(duration)) + else: + freq = self.note_to_freq(note.lower(), octave) + self.ev3.speaker.beep(int(freq), int(duration)) \ No newline at end of file