Skip to content
Snippets Groups Projects
Commit 0e325820 authored by Simon Wijk Stranius's avatar Simon Wijk Stranius
Browse files

merge remote master

parent b83cad2b
No related branches found
No related tags found
No related merge requests found
Pipeline #57370 passed
......@@ -24,6 +24,7 @@ class EmotionalPepper(State):
self.log = self.node.get_logger()
self.machine = sm.Machine(model=self, states = EmotionalPepper.states, initial="dummy", send_event=True)
self.to_look_for_persons()
self.known_persons = {}
self.person_in_frame = False
self.machine.add_transition("trigger_found_person", "look_for_persons", "recognize_person")
self.machine.add_transition("trigger_greet_person", "recognize_person", "greet_person")
......@@ -106,6 +107,14 @@ class EmotionalPepper(State):
def __emotion_cb(self, emotions):
self.log.info("Emotion CB!")
self.log.info(str(emotions))
if (emotions.id in self.known_persons):
self.say(f"Hello {emotions.id}")
#if self.known_persons[emotions.id] == "unknown":
# self.say("What's your name?")
#else:
#self.say("I don't know you yet. Look into the camera and I'll remember you")
self.known_persons[emotions.id] = "unknown"
self.log.info(f"Known perons: {self.known_persons}")
self.trigger_analyze_emotion(emotion=emotions.dominant_emotion)
......
......@@ -3,9 +3,13 @@ ARG NO_GPU=0
# Install pytorch
RUN if [ "$NO_GPU" = 0 ]; then \
pip3 install torch torchvision; \
pip3 install torch torchvision deepspeech-gpu; \
else \
pip3 install torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html; fi
pip3 install deepspeech torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html; fi
# Get deepspeech pre-trained English model
RUN curl -L -o src/lhw_nlp/lhw_nlp/deepspeech-0.9.3-models.pbmm https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.pbmm
RUN curl -L -o src/lhw_nlp/lhw_nlp/deepspeech-0.9.3-models.scorer https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.scorer
# Install hugging face transformers (for gpt-2)
RUN pip3 install transformers
......
......@@ -53,7 +53,6 @@ namespace lhw_qi
asr_sub_ = almemory.call<qi::AnyObject>("subscriber", "WordRecognized");
// MORE INFO AT http://doc.aldebaran.com/2-4/dev/libqi/api/cpp/type/anyobject.html
asr_sub_.connect("signal", boost::function<void(qi::AnyValue)>(boost::bind(&SpeechRecognition::word_cb, this, boost::placeholders::_1)));
}
} // namespace lhw_qi
......
r""" Implementation of a Kalman filter for predicting the positions of objects in the image.
""" Implementation of a Kalman filter for predicting the positions of objects in the image.
We represent the "state" of a target by its position and velocity. Note that we only ever observe the position of objects.
Note that the position is represented by the top-left and bottom-right corner of the target bounding box, hence 4 coordinates.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment