A presentation at Geektime Code in in Tel Aviv-Yafo, Israel by Aaron Bassett
@aaronbassett
(function poll() { new Ajax.Request('/api/', { method:'get', onSuccess: function() { ... }, onFailure: function() { ... } }); setTimeout(poll, 1000); }());
(function poll() { new Ajax.Request('/api/', { method:'get', onSuccess: function() { ... }, onFailure: function() { ... } }); setTimeout(poll, 1000); }());
(function poll() { new Ajax.Request('/api/', { method:'get', onSuccess: function() { ... }, onFailure: function() { ... } }); setTimeout(poll, 1000); }());
(function poll() { new Ajax.Request('/api/', { method: 'get', timeout: 60000, onSuccess: function() { // Do something poll(); }, onFailure: function() { // Do something else poll(); } }); }());
(function poll() { new Ajax.Request('/api/', { method: 'get', timeout: 60000, onSuccess: function() { // Do something poll(); }, onFailure: function() { // Do something else poll(); } }); }());
(function poll() { new Ajax.Request('/api/', { method: 'get', timeout: 60000, onSuccess: function() { // Do something poll(); }, onFailure: function() { // Do something else poll(); } }); }());
(function poll() { new Ajax.Request('/api/', { method: 'get', timeout: 60000, onSuccess: function() { // Do something poll(); }, onFailure: function() { // Do something else poll(); } }); }());
(function poll() { new Ajax.Request('/api/', { method: 'get', timeout: 60000, onSuccess: function() { // Do something poll(); }, onFailure: function() { // Do something else poll(); } }); }());
(function poll() { new Ajax.Request('/api/', { method: 'get', timeout: 60000, onSuccess: function() { // Do something poll(); }, onFailure: function() { // Do something else poll(); } }); }());
“typical header sizes of 700-800 bytes is common” –Google “SPDY: An experimental protocol for a faster web"
2 bytes. 0x00 UTF8 DATA 0xFF
100,000
700 Bytes
560,000,000
70MB/s
0.2MB/s
https://git.io/ fNlS2
http://werise-tornadoserver.ngrok.io/dashboard
write_message on_message
h s i
👍
var soundAllowed = function (stream) { window.persistAudioStream = stream; var audioContent = new AudioContext(); var audioStream = audioContent.createMediaStreamSource( stream ); } var soundNotAllowed = function (error) { h.innerHTML = "You must allow your microphone."; console.log(error); } navigator.getUserMedia({audio:true}, soundAllowed, soundNotAllowed);
var soundAllowed = function (stream) { window.persistAudioStream = stream; var audioContent = new AudioContext(); var audioStream = audioContent.createMediaStreamSource( stream ); } var soundNotAllowed = function (error) { h.innerHTML = "You must allow your microphone."; console.log(error); } navigator.getUserMedia({audio:true}, soundAllowed, soundNotAllowed);
var soundAllowed = function (stream) { window.persistAudioStream = stream; var audioContent = new AudioContext(); var audioStream = audioContent.createMediaStreamSource( stream ); } var soundNotAllowed = function (error) { h.innerHTML = "You must allow your microphone."; console.log(error); } navigator.getUserMedia({audio:true}, soundAllowed, soundNotAllowed);
var soundAllowed = function (stream) { window.persistAudioStream = stream; var audioContent = new AudioContext(); var audioStream = audioContent.createMediaStreamSource( stream ); } var soundNotAllowed = function (error) { h.innerHTML = "You must allow your microphone."; console.log(error); } navigator.getUserMedia({audio:true}, soundAllowed, soundNotAllowed);
👍
def proxy(self): return [ { 'action': 'connect', 'eventUrl': [f'{self.base_url}/events'], 'from': os.environ['NEXMO_NUMBER'], 'endpoint': [ { 'type': 'websocket', 'uri': f'{os.environ["WEBSOCKET_SERVER_URL"]}/socket', 'content-type': 'audio/l16;rate=16000', 'headers': {} } ] } ]
def proxy(self): return [ { 'action': 'connect', 'eventUrl': [f'{self.base_url}/events'], 'from': os.environ['NEXMO_NUMBER'], 'endpoint': [ { 'type': 'websocket', 'uri': f'{os.environ["WEBSOCKET_SERVER_URL"]}/socket', 'content-type': 'audio/l16;rate=16000', 'headers': {} } ] } ]
def proxy(self): return [ { 'action': 'connect', 'eventUrl': [f'{self.base_url}/events'], 'from': os.environ['NEXMO_NUMBER'], 'endpoint': [ { 'type': 'websocket', 'uri': f'{os.environ["WEBSOCKET_SERVER_URL"]}/socket', 'content-type': 'audio/l16;rate=16000', 'headers': {} } ] } ]
def on_message(self, message): transcriber = yield self.transcriber if type(message) != str: transcriber.write_message(message, binary=True) else: logger.info(message) data = json.loads(message) data['action'] = "start" data['continuous'] = True data['interim_results'] = True transcriber.write_message(json.dumps(data), binary=False)
def on_message(self, message): transcriber = yield self.transcriber if type(message) != str: transcriber.write_message(message, binary=True) else: logger.info(message) data = json.loads(message) data['action'] = "start" data['continuous'] = True data['interim_results'] = True transcriber.write_message(json.dumps(data), binary=False)
def on_message(self, message): transcriber = yield self.transcriber if type(message) != str: transcriber.write_message(message, binary=True) else: logger.info(message) data = json.loads(message) data['action'] = "start" data['continuous'] = True data['interim_results'] = True transcriber.write_message(json.dumps(data), binary=False)
def on_message(self, message): transcriber = yield self.transcriber if type(message) != str: transcriber.write_message(message, binary=True) else: logger.info(message) data = json.loads(message) data['action'] = "start" data['continuous'] = True data['interim_results'] = True transcriber.write_message(json.dumps(data), binary=False)
self.transcriber = tornado.websocket.websocket_connect( 'wss://[url]?watson-token={token}&model={model}'.format( token=self.transcriber_token, model=os.environ['REALTIME_TRANSCRIBER_MODEL'] ), on_message_callback=self.on_transcriber_message )
self.transcriber = tornado.websocket.websocket_connect( 'wss://[url]?watson-token={token}&model={model}'.format( token=self.transcriber_token, model=os.environ['REALTIME_TRANSCRIBER_MODEL'] ), on_message_callback=self.on_transcriber_message )
def on_transcriber_message(self, message): if message: message = json.loads(message) if 'results' in message: transcript = message['transcript'] tone_results = self.tone_analyzer.tone( tone_input=transcript, content_type="text/plain" ) tones = tone_results['tones'] DashboardHandler.send_updates(json.dumps(tones))
def on_transcriber_message(self, message): if message: message = json.loads(message) if 'results' in message: transcript = message['transcript'] tone_results = self.tone_analyzer.tone( tone_input=transcript, content_type="text/plain" ) tones = tone_results['tones'] DashboardHandler.send_updates(json.dumps(tones))
def on_transcriber_message(self, message): if message: message = json.loads(message) if 'results' in message: transcript = message['transcript'] tone_results = self.tone_analyzer.tone( tone_input=transcript, content_type="text/plain" ) tones = tone_results['tones'] DashboardHandler.send_updates(json.dumps(tones))
def on_transcriber_message(self, message): if message: message = json.loads(message) if 'results' in message: transcript = message['transcript'] tone_results = self.tone_analyzer.tone( tone_input=transcript, content_type="text/plain" ) tones = tone_results['tones'] DashboardHandler.send_updates(json.dumps(tones))
def on_transcriber_message(self, message): if message: message = json.loads(message) if 'results' in message: transcript = message['transcript'] tone_results = self.tone_analyzer.tone( tone_input=transcript, content_type="text/plain" ) tones = tone_results['tones'] DashboardHandler.send_updates(json.dumps(tones))
class DashboardHandler(tornado.websocket.WebSocketHandler): waiters = set() def open(self): DashboardHandler.waiters.add(self) def on_close(self): DashboardHandler.waiters.remove(self) @classmethod def send_updates(cls, tones): for waiter in cls.waiters: try: waiter.write_message(tones) except: pass
class DashboardHandler(tornado.websocket.WebSocketHandler): waiters = set() def open(self): DashboardHandler.waiters.add(self) def on_close(self): DashboardHandler.waiters.remove(self) @classmethod def send_updates(cls, tones): for waiter in cls.waiters: try: waiter.write_message(tones) except: pass
class DashboardHandler(tornado.websocket.WebSocketHandler): waiters = set() def open(self): DashboardHandler.waiters.add(self) def on_close(self): DashboardHandler.waiters.remove(self) @classmethod def send_updates(cls, tones): for waiter in cls.waiters: try: waiter.write_message(tones) except: pass
class DashboardHandler(tornado.websocket.WebSocketHandler): waiters = set() def open(self): DashboardHandler.waiters.add(self) def on_close(self): DashboardHandler.waiters.remove(self) @classmethod def send_updates(cls, tones): for waiter in cls.waiters: try: waiter.write_message(tones) except: pass
var emotions = { anger: new TimeSeries(), disgust: new TimeSeries(), fear: new TimeSeries(), joy: new TimeSeries(), sadness: new TimeSeries() }
var websocket = new WebSocket('{{ server_url }}/dashboard-socket'); websocket.onmessage = function(evt) { JSON.parse(evt.data).map(function(emotion){ emotions[emotion.tone_id].append( new Date().getTime(), emotion.score ) }); }
var websocket = new WebSocket('{{ server_url }}/dashboard-socket'); websocket.onmessage = function(evt) { JSON.parse(evt.data).map(function(emotion){ emotions[emotion.tone_id].append( new Date().getTime(), emotion.score ) }); }
var websocket = new WebSocket('{{ server_url }}/dashboard-socket'); websocket.onmessage = function(evt) { JSON.parse(evt.data).map(function(emotion){ emotions[emotion.tone_id].append( new Date().getTime(), emotion.score ) }); }
var websocket = new WebSocket('{{ server_url }}/dashboard-socket'); websocket.onmessage = function(evt) { JSON.parse(evt.data).map(function(emotion){ emotions[emotion.tone_id].append( new Date().getTime(), emotion.score ) }); }
@aaronbassett @nexmodev developer.nexmo.com github.com/nexmo-community
Discover how you can use Artifical Intelligence to perform sentiment analysis of an audio stream, in real-time! In this talk, we’re going to learn how to create a virtual rapporteur. A digital assistant who can join any conference call; record it, and then by using IBM Watson provide participants with a real-time transcript and insights into the overall tone of the call. All pushed to their browser via WebSockets.
The following code examples from the presentation can be tried out live.