|
| 1 | +/** |
| 2 | +* Copyright 2015 IBM Corp. All Rights Reserved. |
| 3 | +* |
| 4 | +* Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +* you may not use this file except in compliance with the License. |
| 6 | +* You may obtain a copy of the License at |
| 7 | +* |
| 8 | +* http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +* |
| 10 | +* Unless required by applicable law or agreed to in writing, software |
| 11 | +* distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +* See the License for the specific language governing permissions and |
| 14 | +* limitations under the License. |
| 15 | +* |
| 16 | +*/ |
| 17 | + |
| 18 | +using IBM.Watson.DeveloperCloud.Connection; |
| 19 | +using IBM.Watson.DeveloperCloud.DataTypes; |
| 20 | +using IBM.Watson.DeveloperCloud.Logging; |
| 21 | +using IBM.Watson.DeveloperCloud.Services.Assistant.v1; |
| 22 | +using IBM.Watson.DeveloperCloud.Services.SpeechToText.v1; |
| 23 | +using IBM.Watson.DeveloperCloud.Utilities; |
| 24 | +using System.Collections; |
| 25 | +using System.Collections.Generic; |
| 26 | +using UnityEngine; |
| 27 | + |
| 28 | +public class ExampleCustomHeaders : MonoBehaviour |
| 29 | +{ |
| 30 | + #region PLEASE SET THESE VARIABLES IN THE INSPECTOR |
| 31 | + [SerializeField] |
| 32 | + private string _assistantUsername; |
| 33 | + [SerializeField] |
| 34 | + private string _assistantPassword; |
| 35 | + [SerializeField] |
| 36 | + private string _assistantUrl; |
| 37 | + [SerializeField] |
| 38 | + private string _assistantWorkspaceId; |
| 39 | + [SerializeField] |
| 40 | + private string _assistantVersionDate; |
| 41 | + [SerializeField] |
| 42 | + private string _speechToTextUsername; |
| 43 | + [SerializeField] |
| 44 | + private string _speechToTextPassword; |
| 45 | + [SerializeField] |
| 46 | + private string _speechToTextUrl; |
| 47 | + #endregion |
| 48 | + private int _recordingRoutine = 0; |
| 49 | + private string _microphoneID = null; |
| 50 | + private AudioClip _recording = null; |
| 51 | + private int _recordingBufferSize = 1; |
| 52 | + private int _recordingHZ = 22050; |
| 53 | + |
| 54 | + private Assistant _assistant; |
| 55 | + private string _inputString = "Turn on the winshield wipers"; |
| 56 | + |
| 57 | + private SpeechToText _speechToText; |
| 58 | + Dictionary<string, object> speechToTextCustomData = null; |
| 59 | + |
| 60 | + void Start() |
| 61 | + { |
| 62 | + LogSystem.InstallDefaultReactors(); |
| 63 | + |
| 64 | + |
| 65 | + #region http custom headers |
| 66 | + // Create credential and instantiate Assistant service |
| 67 | + Credentials assistantCredentials = new Credentials(_assistantUsername, _assistantPassword, _assistantUrl); |
| 68 | + |
| 69 | + _assistant = new Assistant(assistantCredentials); |
| 70 | + _assistant.VersionDate = _assistantVersionDate; |
| 71 | + |
| 72 | + Dictionary<string, object> input = new Dictionary<string, object>(); |
| 73 | + input.Add("text", _inputString); |
| 74 | + MessageRequest messageRequest = new MessageRequest() |
| 75 | + { |
| 76 | + Input = input, |
| 77 | + AlternateIntents = true |
| 78 | + }; |
| 79 | + |
| 80 | + // Create customData object |
| 81 | + Dictionary<string, object> assistantCustomData = new Dictionary<string, object>(); |
| 82 | + // Create a dictionary of custom headers |
| 83 | + Dictionary<string, string> assistantCustomHeaders = new Dictionary<string, string>(); |
| 84 | + // Add to the header dictionary |
| 85 | + assistantCustomHeaders.Add("X-Watson-Metadata", "customer_id=some-assistant-customer-id"); |
| 86 | + // Add the header dictionary to the custom data object |
| 87 | + assistantCustomData.Add(Constants.String.CUSTOM_REQUEST_HEADERS, assistantCustomHeaders); |
| 88 | + |
| 89 | + // Logging what we will send |
| 90 | + if (assistantCustomData.ContainsKey(Constants.String.CUSTOM_REQUEST_HEADERS)) |
| 91 | + { |
| 92 | + Log.Debug("ExampleCustomHeader.Start()", "Assistant custom request headers:"); |
| 93 | + foreach (KeyValuePair<string, string> kvp in assistantCustomData[Constants.String.CUSTOM_REQUEST_HEADERS] as Dictionary<string, string>) |
| 94 | + { |
| 95 | + Log.Debug("ExampleCustomHeader.Start()", "\t{0}: {1}", kvp.Key, kvp.Value); |
| 96 | + } |
| 97 | + } |
| 98 | + |
| 99 | + // Call service using custom data object |
| 100 | + _assistant.Message(OnMessage, OnFail, _assistantWorkspaceId, messageRequest, customData: assistantCustomData); |
| 101 | + #endregion |
| 102 | + |
| 103 | + #region websocket custom headers |
| 104 | + // Websocket custom headers |
| 105 | + // Create credential and instantiate Speech to Text service |
| 106 | + Credentials speechToTextCredentials = new Credentials(_speechToTextUsername, _speechToTextPassword, _speechToTextUrl); |
| 107 | + |
| 108 | + _speechToText = new SpeechToText(speechToTextCredentials); |
| 109 | + |
| 110 | + // Create customData object |
| 111 | + speechToTextCustomData = new Dictionary<string, object>(); |
| 112 | + // Create a dictionary of custom headers |
| 113 | + Dictionary<string, string> speechToTextCustomHeaders = new Dictionary<string, string>(); |
| 114 | + // Add header to the dictionary |
| 115 | + speechToTextCustomHeaders.Add("X-Watson-Metadata", "customer_id=some-speech-to-text-customer-id"); |
| 116 | + // Add the header dictionary to the custom data object |
| 117 | + speechToTextCustomData.Add(Constants.String.CUSTOM_REQUEST_HEADERS, speechToTextCustomHeaders); |
| 118 | + |
| 119 | + // Logging what we will send |
| 120 | + if (speechToTextCustomData.ContainsKey(Constants.String.CUSTOM_REQUEST_HEADERS)) |
| 121 | + { |
| 122 | + Log.Debug("ExampleCustomHeader.Start()", "Speech to text custom request headers:"); |
| 123 | + foreach (KeyValuePair<string, string> kvp in speechToTextCustomData[Constants.String.CUSTOM_REQUEST_HEADERS] as Dictionary<string, string>) |
| 124 | + { |
| 125 | + Log.Debug("ExampleCustomHeader.Start()", "\t{0}: {1}", kvp.Key, kvp.Value); |
| 126 | + } |
| 127 | + } |
| 128 | + |
| 129 | + // Call service using custom data object (see Active) |
| 130 | + Active = true; |
| 131 | + |
| 132 | + StartRecording(); |
| 133 | + #endregion |
| 134 | + } |
| 135 | + |
| 136 | + private void OnMessage(object response, Dictionary<string, object> customData) |
| 137 | + { |
| 138 | + Log.Debug("ExampleCustomHeader.OnMessage()", "Response: {0}", customData["json"].ToString()); |
| 139 | + |
| 140 | + if (customData.ContainsKey(Constants.String.RESPONSE_HEADERS)) |
| 141 | + { |
| 142 | + Log.Debug("ExampleCustomHeader.OnMessage()", "Response headers:"); |
| 143 | + |
| 144 | + foreach (KeyValuePair<string, string> kvp in customData[Constants.String.RESPONSE_HEADERS] as Dictionary<string, string>) |
| 145 | + { |
| 146 | + Log.Debug("ExampleCustomHeader.OnMessage()", "\t{0}: {1}", kvp.Key, kvp.Value); |
| 147 | + } |
| 148 | + } |
| 149 | + } |
| 150 | + |
| 151 | + private void OnRecognize(SpeechRecognitionEvent results, Dictionary<string, object> customData) |
| 152 | + { |
| 153 | + if (customData != null) |
| 154 | + { |
| 155 | + Log.Debug("ExampleCustomHeader.OnRecognize()", "Response: {0}", customData["json"].ToString()); |
| 156 | + if (customData.ContainsKey(Constants.String.RESPONSE_HEADERS)) |
| 157 | + { |
| 158 | + Log.Debug("ExampleCustomHeader.OnRecognize()", "Response headers:"); |
| 159 | + |
| 160 | + foreach (KeyValuePair<string, string> kvp in customData[Constants.String.RESPONSE_HEADERS] as Dictionary<string, string>) |
| 161 | + { |
| 162 | + Log.Debug("ExampleCustomHeader.OnRecognize()", "\t{0}: {1}", kvp.Key, kvp.Value); |
| 163 | + } |
| 164 | + } |
| 165 | + } |
| 166 | + } |
| 167 | + |
| 168 | + private void OnFail(RESTConnector.Error error, Dictionary<string, object> customData) |
| 169 | + { |
| 170 | + Log.Debug("ExampleCustomHeader.OnFail()", "Response: {0}", customData["json"].ToString()); |
| 171 | + Log.Error("ExampleCustomHeader.OnFail()", "Error received: {0}", error.ToString()); |
| 172 | + } |
| 173 | + public bool Active |
| 174 | + { |
| 175 | + get { return _speechToText.IsListening; } |
| 176 | + set |
| 177 | + { |
| 178 | + if (value && !_speechToText.IsListening) |
| 179 | + { |
| 180 | + _speechToText.DetectSilence = true; |
| 181 | + _speechToText.EnableWordConfidence = true; |
| 182 | + _speechToText.EnableTimestamps = true; |
| 183 | + _speechToText.SilenceThreshold = 0.01f; |
| 184 | + _speechToText.MaxAlternatives = 0; |
| 185 | + _speechToText.EnableInterimResults = true; |
| 186 | + _speechToText.OnError = OnError; |
| 187 | + _speechToText.InactivityTimeout = -1; |
| 188 | + _speechToText.ProfanityFilter = false; |
| 189 | + _speechToText.SmartFormatting = true; |
| 190 | + _speechToText.SpeakerLabels = false; |
| 191 | + _speechToText.WordAlternativesThreshold = null; |
| 192 | + _speechToText.StartListening(OnRecognize, customData: speechToTextCustomData); |
| 193 | + } |
| 194 | + else if (!value && _speechToText.IsListening) |
| 195 | + { |
| 196 | + _speechToText.StopListening(); |
| 197 | + } |
| 198 | + } |
| 199 | + } |
| 200 | + |
| 201 | + private void StartRecording() |
| 202 | + { |
| 203 | + if (_recordingRoutine == 0) |
| 204 | + { |
| 205 | + UnityObjectUtil.StartDestroyQueue(); |
| 206 | + _recordingRoutine = Runnable.Run(RecordingHandler()); |
| 207 | + } |
| 208 | + } |
| 209 | + |
| 210 | + private void StopRecording() |
| 211 | + { |
| 212 | + if (_recordingRoutine != 0) |
| 213 | + { |
| 214 | + Microphone.End(_microphoneID); |
| 215 | + Runnable.Stop(_recordingRoutine); |
| 216 | + _recordingRoutine = 0; |
| 217 | + } |
| 218 | + } |
| 219 | + |
| 220 | + private void OnError(string error) |
| 221 | + { |
| 222 | + Active = false; |
| 223 | + |
| 224 | + Log.Debug("ExampleStreaming.OnError()", "Error! {0}", error); |
| 225 | + } |
| 226 | + |
| 227 | + private IEnumerator RecordingHandler() |
| 228 | + { |
| 229 | + Log.Debug("ExampleStreaming.RecordingHandler()", "devices: {0}", Microphone.devices); |
| 230 | + _recording = Microphone.Start(_microphoneID, true, _recordingBufferSize, _recordingHZ); |
| 231 | + yield return null; // let _recordingRoutine get set.. |
| 232 | + |
| 233 | + if (_recording == null) |
| 234 | + { |
| 235 | + StopRecording(); |
| 236 | + yield break; |
| 237 | + } |
| 238 | + |
| 239 | + bool bFirstBlock = true; |
| 240 | + int midPoint = _recording.samples / 2; |
| 241 | + float[] samples = null; |
| 242 | + |
| 243 | + while (_recordingRoutine != 0 && _recording != null) |
| 244 | + { |
| 245 | + int writePos = Microphone.GetPosition(_microphoneID); |
| 246 | + if (writePos > _recording.samples || !Microphone.IsRecording(_microphoneID)) |
| 247 | + { |
| 248 | + Log.Error("ExampleStreaming.RecordingHandler()", "Microphone disconnected."); |
| 249 | + |
| 250 | + StopRecording(); |
| 251 | + yield break; |
| 252 | + } |
| 253 | + |
| 254 | + if ((bFirstBlock && writePos >= midPoint) |
| 255 | + || (!bFirstBlock && writePos < midPoint)) |
| 256 | + { |
| 257 | + // front block is recorded, make a RecordClip and pass it onto our callback. |
| 258 | + samples = new float[midPoint]; |
| 259 | + _recording.GetData(samples, bFirstBlock ? 0 : midPoint); |
| 260 | + |
| 261 | + AudioData record = new AudioData(); |
| 262 | + record.MaxLevel = Mathf.Max(Mathf.Abs(Mathf.Min(samples)), Mathf.Max(samples)); |
| 263 | + record.Clip = AudioClip.Create("Recording", midPoint, _recording.channels, _recordingHZ, false); |
| 264 | + record.Clip.SetData(samples, 0); |
| 265 | + |
| 266 | + _speechToText.OnListen(record); |
| 267 | + |
| 268 | + bFirstBlock = !bFirstBlock; |
| 269 | + } |
| 270 | + else |
| 271 | + { |
| 272 | + // calculate the number of samples remaining until we ready for a block of audio, |
| 273 | + // and wait that amount of time it will take to record. |
| 274 | + int remaining = bFirstBlock ? (midPoint - writePos) : (_recording.samples - writePos); |
| 275 | + float timeRemaining = (float)remaining / (float)_recordingHZ; |
| 276 | + |
| 277 | + yield return new WaitForSeconds(timeRemaining); |
| 278 | + } |
| 279 | + |
| 280 | + } |
| 281 | + |
| 282 | + yield break; |
| 283 | + } |
| 284 | +} |
0 commit comments