Initial Commit
This commit is contained in:
45
C#/api/QuickStart/QuickStart.cs
Normal file
45
C#/api/QuickStart/QuickStart.cs
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (c) 2017 Google Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
* use this file except in compliance with the License. You may obtain a copy of
|
||||
* the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
// [START speech_quickstart]
|
||||
|
||||
using Google.Cloud.Speech.V1;
|
||||
using System;
|
||||
|
||||
namespace GoogleCloudSamples
|
||||
{
|
||||
public class QuickStart
|
||||
{
|
||||
public static void Main(string[] args)
|
||||
{
|
||||
var speech = SpeechClient.Create();
|
||||
var response = speech.Recognize(new RecognitionConfig()
|
||||
{
|
||||
Encoding = RecognitionConfig.Types.AudioEncoding.Linear16,
|
||||
SampleRateHertz = 16000,
|
||||
LanguageCode = "en",
|
||||
}, RecognitionAudio.FromFile("audio.raw"));
|
||||
foreach (var result in response.Results)
|
||||
{
|
||||
foreach (var alternative in result.Alternatives)
|
||||
{
|
||||
Console.WriteLine(alternative.Transcript);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// [END speech_quickstart]
|
||||
12
C#/api/QuickStart/QuickStart.csproj
Normal file
12
C#/api/QuickStart/QuickStart.csproj
Normal file
@@ -0,0 +1,12 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<OutputType>Exe</OutputType>
|
||||
<TargetFramework>netcoreapp2.0</TargetFramework>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Google.Cloud.Speech.V1" Version="1.0.0" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<Content Include="audio.raw" CopyToOutputDirectory="Always" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
BIN
C#/api/QuickStart/audio.raw
Normal file
BIN
C#/api/QuickStart/audio.raw
Normal file
Binary file not shown.
146
C#/api/README.md
Normal file
146
C#/api/README.md
Normal file
@@ -0,0 +1,146 @@
|
||||
# Google Cloud Speech API Samples
|
||||
|
||||
These samples show how to use the [Google Cloud Speech API](http://cloud.google.com/speech)
|
||||
to transcribe audio files, using the [Google API Client Library for
|
||||
.NET](https://developers.google.com/api-client-library/dotnet/).
|
||||
|
||||
This sample requires [.NET Core 2.0](
|
||||
https://www.microsoft.com/net/core) or later. That means using
|
||||
[Visual Studio 2017](
|
||||
https://www.visualstudio.com/), or the command line. Visual Studio 2015 users
|
||||
can use [this older sample](
|
||||
https://github.com/GoogleCloudPlatform/dotnet-docs-samples/tree/vs2015/speech/api).
|
||||
|
||||
You can also build, run and edit this sample in your web browser using Google Cloud Shell.
|
||||
The instructions on how to do this are in the [Build and Run with Google Cloud
|
||||
Shell](#cloudshell) section below.
|
||||
|
||||
## Build and Run
|
||||
|
||||
1. **Follow the instructions in the [root README](../../README.md)**.
|
||||
|
||||
4. Enable APIs for your project.
|
||||
[Click here](https://console.cloud.google.com/flows/enableapi?apiid=speech.googleapis.com&showconfirmation=true)
|
||||
to visit Cloud Platform Console and enable the Google Cloud Speech API.
|
||||
|
||||
9. From a Powershell command line, run the QuickStart sample:
|
||||
```
|
||||
PS C:\...\dotnet-docs-samples\speech\api\QuickStart> dotnet restore
|
||||
PS C:\...\dotnet-docs-samples\speech\api\QuickStart> dotnet run
|
||||
how old is the Brooklyn Bridge
|
||||
```
|
||||
|
||||
9. And run Recognize for more examples:
|
||||
```
|
||||
PS C:\...\dotnet-docs-samples\speech\api\Recognize> dotnet restore
|
||||
PS C:\...\dotnet-docs-samples\speech\api\Recognize> dotnet run
|
||||
Recognize 1.0.0
|
||||
Copyright (C) 2017 Recognize
|
||||
|
||||
ERROR(S):
|
||||
No verb selected.
|
||||
|
||||
sync Detects speech in an audio file.
|
||||
|
||||
async Creates a job to detect speech in an audio file, and waits for the job to complete.
|
||||
|
||||
stream Detects speech in an audio file by streaming it to the Speech API.
|
||||
|
||||
listen Detects speech in a microphone input stream.
|
||||
|
||||
rec Detects speech in an audio file. Supports other file formats.
|
||||
|
||||
sync-creds Detects speech in an audio file.
|
||||
|
||||
with-context Detects speech in an audio file. Add additional context on stdin.
|
||||
|
||||
help Display more information on a specific command.
|
||||
|
||||
version Display version information.
|
||||
|
||||
PS C:\...\dotnet-docs-samples\speech\api\Recognize> dotnet run listen 3
|
||||
Speak now.
|
||||
test
|
||||
testing
|
||||
testing one
|
||||
testing
|
||||
one
|
||||
testing
|
||||
one two
|
||||
testing one
|
||||
two
|
||||
testing
|
||||
1 2 3
|
||||
testing 1 2 3
|
||||
PS C:\...\dotnet-docs-samples\speech\api\Recognize>
|
||||
```
|
||||
|
||||
## <a name="cloudshell"></a>Build and Run with Google Cloud Shell
|
||||
|
||||
1. Follow the instructions in the [root README](https://github.com/GoogleCloudPlatform/dotnet-docs-samples/blob/master/README.md).
|
||||
1. Enable APIs for your project. <a target='_blank' href="https://console.cloud.google.com/flows/enableapi?apiid=speech.googleapis.com&showconfirmation=true">Click here</a> to visit Cloud Platform Console and enable the Google Cloud Speech API.
|
||||
1. <a target='_blank' href="https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/dotnet-docs-samples&page=editor&open_in_viewer=README.md&working_dir=speech/api">Open the sample with Cloud Shell</a>.
|
||||
1. Upload your JSON key file to Cloud Shell:
|
||||
* Use the Cloud Shell file browser to select the **dotnet-doc-samples** folder.
|
||||
* Use the **File** menu to select **Import > File or Zip Archive**
|
||||
* Select and import the JSON key that was downloaded as instructed in the [root README](https://github.com/GoogleCloudPlatform/dotnet-docs-samples/blob/master/README.md).
|
||||

|
||||
|
||||
1. Set the environment variable `GOOGLE_APPLICATION_CREDENTIALS` to the path of the JSON key that was just imported. In Cloud Shell console, the command will look like this:
|
||||
|
||||
```
|
||||
:~/$ export GOOGLE_APPLICATION_CREDENTIALS=../../../your-project-id-dea9fa230eae3.json
|
||||
```
|
||||
|
||||
1. In Cloud Shell console, run the QuickStart sample:
|
||||
|
||||
```
|
||||
:~/dotnet-docs-samples/speech/api$ cd QuickStart
|
||||
:~/dotnet-docs-samples/speech/api/QuickStart$ dotnet run
|
||||
how old is the Brooklyn Bridge
|
||||
```
|
||||
|
||||
1. And run Recognize for more examples:
|
||||
|
||||
```
|
||||
:~/dotnet-docs-samples/speech/api/QuickStart$ cd ../Recognize
|
||||
:~/dotnet-docs-samples/speech/api/Recognize$ dotnet run
|
||||
Recognize 1.0.0
|
||||
Copyright (C) 2017 Recognize
|
||||
|
||||
ERROR(S):
|
||||
No verb selected.
|
||||
|
||||
sync Detects speech in an audio file.
|
||||
|
||||
async Creates a job to detect speech in an audio file, and waits for the job to complete.
|
||||
|
||||
stream Detects speech in an audio file by streaming it to the Speech API.
|
||||
|
||||
listen Detects speech in a microphone input stream.
|
||||
|
||||
rec Detects speech in an audio file. Supports other file formats.
|
||||
|
||||
sync-creds Detects speech in an audio file.
|
||||
|
||||
with-context Detects speech in an audio file. Add additional context on stdin.
|
||||
|
||||
help Display more information on a specific command.
|
||||
|
||||
version Display version information.
|
||||
|
||||
:~/dotnet-docs-samples/speech/api/Recognize$ dotnet run sync ../resources/audio2.raw
|
||||
the rain in Spain stays mainly on the plain
|
||||
```
|
||||
|
||||
## Contributing changes
|
||||
|
||||
* See [CONTRIBUTING.md](../../CONTRIBUTING.md)
|
||||
|
||||
## Licensing
|
||||
|
||||
* See [LICENSE](../../LICENSE)
|
||||
|
||||
## Testing
|
||||
|
||||
* See [TESTING.md](../../TESTING.md)
|
||||
502
C#/api/Recognize/Recognize.cs
Normal file
502
C#/api/Recognize/Recognize.cs
Normal file
@@ -0,0 +1,502 @@
|
||||
/*
|
||||
* Copyright (c) 2017 Google Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
* use this file except in compliance with the License. You may obtain a copy of
|
||||
* the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
using CommandLine;
|
||||
using Google.Apis.Auth.OAuth2;
|
||||
using Google.Cloud.Speech.V1;
|
||||
using Grpc.Auth;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace GoogleCloudSamples
|
||||
{
|
||||
class Options
|
||||
{
|
||||
[Value(0, HelpText = "A path to a sound file. Encoding must be "
|
||||
+ "Linear16 with a sample rate of 16000.", Required = true)]
|
||||
public string FilePath { get; set; }
|
||||
}
|
||||
|
||||
class StorageOptions
|
||||
{
|
||||
[Value(0, HelpText = "A path to a sound file. "
|
||||
+ "Can be a local file path or a Google Cloud Storage path like "
|
||||
+ "gs://my-bucket/my-object. "
|
||||
+ "Encoding must be "
|
||||
+ "Linear16 with a sample rate of 16000.", Required = true)]
|
||||
public string FilePath { get; set; }
|
||||
}
|
||||
|
||||
[Verb("sync", HelpText = "Detects speech in an audio file.")]
|
||||
class SyncOptions : StorageOptions
|
||||
{
|
||||
[Option('w', HelpText = "Report the time offsets of individual words.")]
|
||||
public bool EnableWordTimeOffsets { get; set; }
|
||||
}
|
||||
|
||||
[Verb("with-context", HelpText = "Detects speech in an audio file."
|
||||
+ " Add additional context on stdin.")]
|
||||
class OptionsWithContext : StorageOptions { }
|
||||
|
||||
[Verb("async", HelpText = "Creates a job to detect speech in an audio "
|
||||
+ "file, and waits for the job to complete.")]
|
||||
class AsyncOptions : StorageOptions
|
||||
{
|
||||
[Option('w', HelpText = "Report the time offsets of individual words.")]
|
||||
public bool EnableWordTimeOffsets { get; set; }
|
||||
}
|
||||
|
||||
[Verb("sync-creds", HelpText = "Detects speech in an audio file.")]
|
||||
class SyncOptionsWithCreds
|
||||
{
|
||||
[Value(0, HelpText = "A path to a sound file. Encoding must be "
|
||||
+ "Linear16 with a sample rate of 16000.", Required = true)]
|
||||
public string FilePath { get; set; }
|
||||
|
||||
[Value(1, HelpText = "Path to Google credentials json file.", Required = true)]
|
||||
public string CredentialsFilePath { get; set; }
|
||||
}
|
||||
|
||||
[Verb("stream", HelpText = "Detects speech in an audio file by streaming "
|
||||
+ "it to the Speech API.")]
|
||||
class StreamingOptions : Options { }
|
||||
|
||||
[Verb("listen", HelpText = "Detects speech in a microphone input stream.")]
|
||||
class ListenOptions
|
||||
{
|
||||
[Value(0, HelpText = "Number of seconds to listen for.", Required = false)]
|
||||
public int Seconds { get; set; } = 3;
|
||||
}
|
||||
|
||||
[Verb("rec", HelpText = "Detects speech in an audio file. Supports other file formats.")]
|
||||
class RecOptions : Options
|
||||
{
|
||||
[Option('b', Default = 16000, HelpText = "Sample rate in bits per second.")]
|
||||
public int BitRate { get; set; }
|
||||
|
||||
[Option('e', Default = RecognitionConfig.Types.AudioEncoding.Linear16,
|
||||
HelpText = "Audio file encoding format.")]
|
||||
public RecognitionConfig.Types.AudioEncoding Encoding { get; set; }
|
||||
}
|
||||
|
||||
|
||||
public class Recognize
|
||||
{
|
||||
static object Rec(string filePath, int bitRate,
|
||||
RecognitionConfig.Types.AudioEncoding encoding)
|
||||
{
|
||||
var speech = SpeechClient.Create();
|
||||
var response = speech.Recognize(new RecognitionConfig()
|
||||
{
|
||||
Encoding = encoding,
|
||||
SampleRateHertz = bitRate,
|
||||
LanguageCode = "en",
|
||||
}, RecognitionAudio.FromFile(filePath));
|
||||
foreach (var result in response.Results)
|
||||
{
|
||||
foreach (var alternative in result.Alternatives)
|
||||
{
|
||||
Console.WriteLine(alternative.Transcript);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// [START speech_sync_recognize]
|
||||
static object SyncRecognize(string filePath)
|
||||
{
|
||||
var speech = SpeechClient.Create();
|
||||
var response = speech.Recognize(new RecognitionConfig()
|
||||
{
|
||||
Encoding = RecognitionConfig.Types.AudioEncoding.Linear16,
|
||||
SampleRateHertz = 16000,
|
||||
LanguageCode = "en",
|
||||
}, RecognitionAudio.FromFile(filePath));
|
||||
foreach (var result in response.Results)
|
||||
{
|
||||
foreach (var alternative in result.Alternatives)
|
||||
{
|
||||
Console.WriteLine(alternative.Transcript);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
// [END speech_sync_recognize]
|
||||
|
||||
|
||||
// [START speech_sync_recognize_words]
|
||||
static object SyncRecognizeWords(string filePath)
|
||||
{
|
||||
var speech = SpeechClient.Create();
|
||||
var response = speech.Recognize(new RecognitionConfig()
|
||||
{
|
||||
Encoding = RecognitionConfig.Types.AudioEncoding.Linear16,
|
||||
SampleRateHertz = 16000,
|
||||
LanguageCode = "en",
|
||||
EnableWordTimeOffsets = true,
|
||||
}, RecognitionAudio.FromFile(filePath));
|
||||
foreach (var result in response.Results)
|
||||
{
|
||||
foreach (var alternative in result.Alternatives)
|
||||
{
|
||||
Console.WriteLine($"Transcript: { alternative.Transcript}");
|
||||
Console.WriteLine("Word details:");
|
||||
Console.WriteLine($" Word count:{alternative.Words.Count}");
|
||||
foreach (var item in alternative.Words)
|
||||
{
|
||||
Console.WriteLine($" {item.Word}");
|
||||
Console.WriteLine($" WordStartTime: {item.StartTime}");
|
||||
Console.WriteLine($" WordEndTime: {item.EndTime}");
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
// [END speech_sync_recognize_words]
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Reads a list of phrases from stdin.
|
||||
/// </summary>
|
||||
static List<string> ReadPhrases()
|
||||
{
|
||||
Console.Write("Reading phrases from stdin. Finish with blank line.\n> ");
|
||||
var phrases = new List<string>();
|
||||
string line = Console.ReadLine();
|
||||
while (!string.IsNullOrWhiteSpace(line))
|
||||
{
|
||||
phrases.Add(line.Trim());
|
||||
Console.Write("> ");
|
||||
line = Console.ReadLine();
|
||||
}
|
||||
return phrases;
|
||||
}
|
||||
|
||||
static object RecognizeWithContext(string filePath, IEnumerable<string> phrases)
|
||||
{
|
||||
var speech = SpeechClient.Create();
|
||||
var config = new RecognitionConfig()
|
||||
{
|
||||
SpeechContexts = { new SpeechContext() { Phrases = { phrases } } },
|
||||
Encoding = RecognitionConfig.Types.AudioEncoding.Linear16,
|
||||
SampleRateHertz = 16000,
|
||||
LanguageCode = "en",
|
||||
};
|
||||
var audio = IsStorageUri(filePath) ?
|
||||
RecognitionAudio.FromStorageUri(filePath) :
|
||||
RecognitionAudio.FromFile(filePath);
|
||||
var response = speech.Recognize(config, audio);
|
||||
foreach (var result in response.Results)
|
||||
{
|
||||
foreach (var alternative in result.Alternatives)
|
||||
{
|
||||
Console.WriteLine(alternative.Transcript);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static object SyncRecognizeWithCredentials(string filePath, string credentialsFilePath)
|
||||
{
|
||||
GoogleCredential googleCredential;
|
||||
using (Stream m = new FileStream(credentialsFilePath, FileMode.Open))
|
||||
googleCredential = GoogleCredential.FromStream(m);
|
||||
var channel = new Grpc.Core.Channel(SpeechClient.DefaultEndpoint.Host,
|
||||
googleCredential.ToChannelCredentials());
|
||||
var speech = SpeechClient.Create(channel);
|
||||
var response = speech.Recognize(new RecognitionConfig()
|
||||
{
|
||||
Encoding = RecognitionConfig.Types.AudioEncoding.Linear16,
|
||||
SampleRateHertz = 16000,
|
||||
LanguageCode = "en",
|
||||
}, RecognitionAudio.FromFile(filePath));
|
||||
foreach (var result in response.Results)
|
||||
{
|
||||
foreach (var alternative in result.Alternatives)
|
||||
{
|
||||
Console.WriteLine(alternative.Transcript);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// [START speech_sync_recognize_gcs]
|
||||
static object SyncRecognizeGcs(string storageUri)
|
||||
{
|
||||
var speech = SpeechClient.Create();
|
||||
var response = speech.Recognize(new RecognitionConfig()
|
||||
{
|
||||
Encoding = RecognitionConfig.Types.AudioEncoding.Linear16,
|
||||
SampleRateHertz = 16000,
|
||||
LanguageCode = "en",
|
||||
}, RecognitionAudio.FromStorageUri(storageUri));
|
||||
foreach (var result in response.Results)
|
||||
{
|
||||
foreach (var alternative in result.Alternatives)
|
||||
{
|
||||
Console.WriteLine(alternative.Transcript);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
// [END speech_sync_recognize_gcs]
|
||||
|
||||
// [START speech_async_recognize]
|
||||
static object LongRunningRecognize(string filePath)
|
||||
{
|
||||
var speech = SpeechClient.Create();
|
||||
var longOperation = speech.LongRunningRecognize(new RecognitionConfig()
|
||||
{
|
||||
Encoding = RecognitionConfig.Types.AudioEncoding.Linear16,
|
||||
SampleRateHertz = 16000,
|
||||
LanguageCode = "en",
|
||||
}, RecognitionAudio.FromFile(filePath));
|
||||
longOperation = longOperation.PollUntilCompleted();
|
||||
var response = longOperation.Result;
|
||||
foreach (var result in response.Results)
|
||||
{
|
||||
foreach (var alternative in result.Alternatives)
|
||||
{
|
||||
Console.WriteLine(alternative.Transcript);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
// [END speech_async_recognize]
|
||||
|
||||
// [START speech_async_recognize_gcs]
|
||||
static object AsyncRecognizeGcs(string storageUri)
|
||||
{
|
||||
var speech = SpeechClient.Create();
|
||||
var longOperation = speech.LongRunningRecognize(new RecognitionConfig()
|
||||
{
|
||||
Encoding = RecognitionConfig.Types.AudioEncoding.Linear16,
|
||||
SampleRateHertz = 16000,
|
||||
LanguageCode = "en",
|
||||
}, RecognitionAudio.FromStorageUri(storageUri));
|
||||
longOperation = longOperation.PollUntilCompleted();
|
||||
var response = longOperation.Result;
|
||||
foreach (var result in response.Results)
|
||||
{
|
||||
foreach (var alternative in result.Alternatives)
|
||||
{
|
||||
Console.WriteLine($"Transcript: { alternative.Transcript}");
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
// [END speech_async_recognize_gcs]
|
||||
|
||||
// [START speech_async_recognize_gcs_words]
|
||||
static object AsyncRecognizeGcsWords(string storageUri)
|
||||
{
|
||||
var speech = SpeechClient.Create();
|
||||
var longOperation = speech.LongRunningRecognize(new RecognitionConfig()
|
||||
{
|
||||
Encoding = RecognitionConfig.Types.AudioEncoding.Linear16,
|
||||
SampleRateHertz = 16000,
|
||||
LanguageCode = "en",
|
||||
EnableWordTimeOffsets = true,
|
||||
}, RecognitionAudio.FromStorageUri(storageUri));
|
||||
longOperation = longOperation.PollUntilCompleted();
|
||||
var response = longOperation.Result;
|
||||
foreach (var result in response.Results)
|
||||
{
|
||||
foreach (var alternative in result.Alternatives)
|
||||
{
|
||||
Console.WriteLine($"Transcript: { alternative.Transcript}");
|
||||
Console.WriteLine("Word details:");
|
||||
Console.WriteLine($" Word count:{alternative.Words.Count}");
|
||||
foreach (var item in alternative.Words)
|
||||
{
|
||||
Console.WriteLine($" {item.Word}");
|
||||
Console.WriteLine($" WordStartTime: {item.StartTime}");
|
||||
Console.WriteLine($" WordEndTime: {item.EndTime}");
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
// [END speech_async_recognize_gcs_words]
|
||||
|
||||
/// <summary>
|
||||
/// Stream the content of the file to the API in 32kb chunks.
|
||||
/// </summary>
|
||||
// [START speech_streaming_recognize]
|
||||
static async Task<object> StreamingRecognizeAsync(string filePath)
|
||||
{
|
||||
var speech = SpeechClient.Create();
|
||||
var streamingCall = speech.StreamingRecognize();
|
||||
// Write the initial request with the config.
|
||||
await streamingCall.WriteAsync(
|
||||
new StreamingRecognizeRequest()
|
||||
{
|
||||
StreamingConfig = new StreamingRecognitionConfig()
|
||||
{
|
||||
Config = new RecognitionConfig()
|
||||
{
|
||||
Encoding =
|
||||
RecognitionConfig.Types.AudioEncoding.Linear16,
|
||||
SampleRateHertz = 16000,
|
||||
LanguageCode = "en",
|
||||
},
|
||||
InterimResults = true,
|
||||
}
|
||||
});
|
||||
// Print responses as they arrive.
|
||||
Task printResponses = Task.Run(async () =>
|
||||
{
|
||||
while (await streamingCall.ResponseStream.MoveNext(
|
||||
default(CancellationToken)))
|
||||
{
|
||||
foreach (var result in streamingCall.ResponseStream
|
||||
.Current.Results)
|
||||
{
|
||||
foreach (var alternative in result.Alternatives)
|
||||
{
|
||||
Console.WriteLine(alternative.Transcript);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
// Stream the file content to the API. Write 2 32kb chunks per
|
||||
// second.
|
||||
using (FileStream fileStream = new FileStream(filePath, FileMode.Open))
|
||||
{
|
||||
var buffer = new byte[32 * 1024];
|
||||
int bytesRead;
|
||||
while ((bytesRead = await fileStream.ReadAsync(
|
||||
buffer, 0, buffer.Length)) > 0)
|
||||
{
|
||||
await streamingCall.WriteAsync(
|
||||
new StreamingRecognizeRequest()
|
||||
{
|
||||
AudioContent = Google.Protobuf.ByteString
|
||||
.CopyFrom(buffer, 0, bytesRead),
|
||||
});
|
||||
await Task.Delay(500);
|
||||
};
|
||||
}
|
||||
await streamingCall.WriteCompleteAsync();
|
||||
await printResponses;
|
||||
return 0;
|
||||
}
|
||||
// [END speech_streaming_recognize]
|
||||
|
||||
// [START speech_streaming_mic_recognize]
|
||||
static async Task<object> StreamingMicRecognizeAsync(int seconds)
|
||||
{
|
||||
if (NAudio.Wave.WaveIn.DeviceCount < 1)
|
||||
{
|
||||
Console.WriteLine("No microphone!");
|
||||
return -1;
|
||||
}
|
||||
var speech = SpeechClient.Create();
|
||||
var streamingCall = speech.StreamingRecognize();
|
||||
// Write the initial request with the config.
|
||||
await streamingCall.WriteAsync(
|
||||
new StreamingRecognizeRequest()
|
||||
{
|
||||
StreamingConfig = new StreamingRecognitionConfig()
|
||||
{
|
||||
Config = new RecognitionConfig()
|
||||
{
|
||||
Encoding =
|
||||
RecognitionConfig.Types.AudioEncoding.Linear16,
|
||||
SampleRateHertz = 16000,
|
||||
LanguageCode = "en",
|
||||
},
|
||||
InterimResults = true,
|
||||
}
|
||||
});
|
||||
// Print responses as they arrive.
|
||||
Task printResponses = Task.Run(async () =>
|
||||
{
|
||||
while (await streamingCall.ResponseStream.MoveNext(
|
||||
default(CancellationToken)))
|
||||
{
|
||||
foreach (var result in streamingCall.ResponseStream
|
||||
.Current.Results)
|
||||
{
|
||||
foreach (var alternative in result.Alternatives)
|
||||
{
|
||||
Console.WriteLine(alternative.Transcript);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
// Read from the microphone and stream to API.
|
||||
object writeLock = new object();
|
||||
bool writeMore = true;
|
||||
var waveIn = new NAudio.Wave.WaveInEvent();
|
||||
waveIn.DeviceNumber = 0;
|
||||
waveIn.WaveFormat = new NAudio.Wave.WaveFormat(16000, 1);
|
||||
waveIn.DataAvailable +=
|
||||
(object sender, NAudio.Wave.WaveInEventArgs args) =>
|
||||
{
|
||||
lock (writeLock)
|
||||
{
|
||||
if (!writeMore) return;
|
||||
streamingCall.WriteAsync(
|
||||
new StreamingRecognizeRequest()
|
||||
{
|
||||
AudioContent = Google.Protobuf.ByteString
|
||||
.CopyFrom(args.Buffer, 0, args.BytesRecorded)
|
||||
}).Wait();
|
||||
}
|
||||
};
|
||||
waveIn.StartRecording();
|
||||
Console.WriteLine("Speak now.");
|
||||
await Task.Delay(TimeSpan.FromSeconds(seconds));
|
||||
// Stop recording and shut down.
|
||||
waveIn.StopRecording();
|
||||
lock (writeLock) writeMore = false;
|
||||
await streamingCall.WriteCompleteAsync();
|
||||
await printResponses;
|
||||
return 0;
|
||||
}
|
||||
// [END speech_streaming_mic_recognize]
|
||||
|
||||
static bool IsStorageUri(string s) => s.Substring(0, 4).ToLower() == "gs:/";
|
||||
|
||||
public static int Main(string[] args)
|
||||
{
|
||||
return (int)Parser.Default.ParseArguments<
|
||||
SyncOptions, AsyncOptions,
|
||||
StreamingOptions, ListenOptions,
|
||||
RecOptions, SyncOptionsWithCreds,
|
||||
OptionsWithContext
|
||||
>(args).MapResult(
|
||||
(SyncOptions opts) => IsStorageUri(opts.FilePath) ?
|
||||
SyncRecognizeGcs(opts.FilePath) : opts.EnableWordTimeOffsets ?
|
||||
SyncRecognizeWords(opts.FilePath) : SyncRecognize(opts.FilePath),
|
||||
(AsyncOptions opts) => IsStorageUri(opts.FilePath) ?
|
||||
(opts.EnableWordTimeOffsets ? AsyncRecognizeGcsWords(opts.FilePath)
|
||||
: AsyncRecognizeGcs(opts.FilePath))
|
||||
: LongRunningRecognize(opts.FilePath),
|
||||
(StreamingOptions opts) => StreamingRecognizeAsync(opts.FilePath).Result,
|
||||
(ListenOptions opts) => StreamingMicRecognizeAsync(opts.Seconds).Result,
|
||||
(RecOptions opts) => Rec(opts.FilePath, opts.BitRate, opts.Encoding),
|
||||
(SyncOptionsWithCreds opts) => SyncRecognizeWithCredentials(
|
||||
opts.FilePath, opts.CredentialsFilePath),
|
||||
(OptionsWithContext opts) => RecognizeWithContext(opts.FilePath, ReadPhrases()),
|
||||
errs => 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
12
C#/api/Recognize/Recognize.csproj
Normal file
12
C#/api/Recognize/Recognize.csproj
Normal file
@@ -0,0 +1,12 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<OutputType>Exe</OutputType>
|
||||
<TargetFramework>netcoreapp2.0</TargetFramework>
|
||||
<StartupObject>GoogleCloudSamples.Recognize</StartupObject>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<PackageReference Include="CommandLineParser" Version="2.1.1-beta" />
|
||||
<PackageReference Include="Google.Cloud.Speech.V1" Version="1.0.0" />
|
||||
<PackageReference Include="NAudio" Version="1.8.3" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
64
C#/api/Speech.sln
Normal file
64
C#/api/Speech.sln
Normal file
@@ -0,0 +1,64 @@
|
||||
Microsoft Visual Studio Solution File, Format Version 12.00
|
||||
# Visual Studio 15
|
||||
VisualStudioVersion = 15.0.27130.2027
|
||||
MinimumVisualStudioVersion = 15.0.26124.0
|
||||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Recognize", "Recognize\Recognize.csproj", "{CEC87C7A-5F4F-4DEC-ACF7-F8195011D960}"
|
||||
EndProject
|
||||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "QuickStart", "QuickStart\QuickStart.csproj", "{2C52D25D-92E5-42F5-8398-9F3AF0BFADC2}"
|
||||
EndProject
|
||||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SpeechTest", "SpeechTest\SpeechTest.csproj", "{AB5C126E-D070-4DB0-BF08-7524DE4EF978}"
|
||||
EndProject
|
||||
Global
|
||||
GlobalSection(SolutionConfigurationPlatforms) = preSolution
|
||||
Debug|Any CPU = Debug|Any CPU
|
||||
Debug|x64 = Debug|x64
|
||||
Debug|x86 = Debug|x86
|
||||
Release|Any CPU = Release|Any CPU
|
||||
Release|x64 = Release|x64
|
||||
Release|x86 = Release|x86
|
||||
EndGlobalSection
|
||||
GlobalSection(ProjectConfigurationPlatforms) = postSolution
|
||||
{CEC87C7A-5F4F-4DEC-ACF7-F8195011D960}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{CEC87C7A-5F4F-4DEC-ACF7-F8195011D960}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{CEC87C7A-5F4F-4DEC-ACF7-F8195011D960}.Debug|x64.ActiveCfg = Debug|Any CPU
|
||||
{CEC87C7A-5F4F-4DEC-ACF7-F8195011D960}.Debug|x64.Build.0 = Debug|Any CPU
|
||||
{CEC87C7A-5F4F-4DEC-ACF7-F8195011D960}.Debug|x86.ActiveCfg = Debug|Any CPU
|
||||
{CEC87C7A-5F4F-4DEC-ACF7-F8195011D960}.Debug|x86.Build.0 = Debug|Any CPU
|
||||
{CEC87C7A-5F4F-4DEC-ACF7-F8195011D960}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{CEC87C7A-5F4F-4DEC-ACF7-F8195011D960}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{CEC87C7A-5F4F-4DEC-ACF7-F8195011D960}.Release|x64.ActiveCfg = Release|Any CPU
|
||||
{CEC87C7A-5F4F-4DEC-ACF7-F8195011D960}.Release|x64.Build.0 = Release|Any CPU
|
||||
{CEC87C7A-5F4F-4DEC-ACF7-F8195011D960}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{CEC87C7A-5F4F-4DEC-ACF7-F8195011D960}.Release|x86.Build.0 = Release|Any CPU
|
||||
{2C52D25D-92E5-42F5-8398-9F3AF0BFADC2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{2C52D25D-92E5-42F5-8398-9F3AF0BFADC2}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{2C52D25D-92E5-42F5-8398-9F3AF0BFADC2}.Debug|x64.ActiveCfg = Debug|Any CPU
|
||||
{2C52D25D-92E5-42F5-8398-9F3AF0BFADC2}.Debug|x64.Build.0 = Debug|Any CPU
|
||||
{2C52D25D-92E5-42F5-8398-9F3AF0BFADC2}.Debug|x86.ActiveCfg = Debug|Any CPU
|
||||
{2C52D25D-92E5-42F5-8398-9F3AF0BFADC2}.Debug|x86.Build.0 = Debug|Any CPU
|
||||
{2C52D25D-92E5-42F5-8398-9F3AF0BFADC2}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{2C52D25D-92E5-42F5-8398-9F3AF0BFADC2}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{2C52D25D-92E5-42F5-8398-9F3AF0BFADC2}.Release|x64.ActiveCfg = Release|Any CPU
|
||||
{2C52D25D-92E5-42F5-8398-9F3AF0BFADC2}.Release|x64.Build.0 = Release|Any CPU
|
||||
{2C52D25D-92E5-42F5-8398-9F3AF0BFADC2}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{2C52D25D-92E5-42F5-8398-9F3AF0BFADC2}.Release|x86.Build.0 = Release|Any CPU
|
||||
{AB5C126E-D070-4DB0-BF08-7524DE4EF978}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{AB5C126E-D070-4DB0-BF08-7524DE4EF978}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{AB5C126E-D070-4DB0-BF08-7524DE4EF978}.Debug|x64.ActiveCfg = Debug|Any CPU
|
||||
{AB5C126E-D070-4DB0-BF08-7524DE4EF978}.Debug|x64.Build.0 = Debug|Any CPU
|
||||
{AB5C126E-D070-4DB0-BF08-7524DE4EF978}.Debug|x86.ActiveCfg = Debug|Any CPU
|
||||
{AB5C126E-D070-4DB0-BF08-7524DE4EF978}.Debug|x86.Build.0 = Debug|Any CPU
|
||||
{AB5C126E-D070-4DB0-BF08-7524DE4EF978}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{AB5C126E-D070-4DB0-BF08-7524DE4EF978}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{AB5C126E-D070-4DB0-BF08-7524DE4EF978}.Release|x64.ActiveCfg = Release|Any CPU
|
||||
{AB5C126E-D070-4DB0-BF08-7524DE4EF978}.Release|x64.Build.0 = Release|Any CPU
|
||||
{AB5C126E-D070-4DB0-BF08-7524DE4EF978}.Release|x86.ActiveCfg = Release|Any CPU
|
||||
{AB5C126E-D070-4DB0-BF08-7524DE4EF978}.Release|x86.Build.0 = Release|Any CPU
|
||||
EndGlobalSection
|
||||
GlobalSection(SolutionProperties) = preSolution
|
||||
HideSolutionNode = FALSE
|
||||
EndGlobalSection
|
||||
GlobalSection(ExtensibilityGlobals) = postSolution
|
||||
SolutionGuid = {21E7414C-BDC0-4CE2-97A1-3664CE3BCFB2}
|
||||
EndGlobalSection
|
||||
EndGlobal
|
||||
173
C#/api/SpeechTest/SpeechTest.cs
Normal file
173
C#/api/SpeechTest/SpeechTest.cs
Normal file
@@ -0,0 +1,173 @@
|
||||
/*
|
||||
* Copyright (c) 2017 Google Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
* use this file except in compliance with the License. You may obtain a copy of
|
||||
* the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
using System;
|
||||
using System.IO;
|
||||
using Xunit;
|
||||
|
||||
namespace GoogleCloudSamples
|
||||
{
|
||||
public class QuickStartTest
|
||||
{
|
||||
readonly CommandLineRunner _quickStart = new CommandLineRunner()
|
||||
{
|
||||
VoidMain = QuickStart.Main,
|
||||
Command = "QuickStart"
|
||||
};
|
||||
|
||||
[Fact]
|
||||
public void TestRun()
|
||||
{
|
||||
var output = _quickStart.Run();
|
||||
Assert.Equal(0, output.ExitCode);
|
||||
Assert.Contains("Brooklyn", output.Stdout);
|
||||
}
|
||||
}
|
||||
|
||||
public abstract class CommonRecognizeTests
|
||||
{
|
||||
protected readonly CommandLineRunner _recognize = new CommandLineRunner()
|
||||
{
|
||||
Main = Recognize.Main,
|
||||
Command = "Recognize"
|
||||
};
|
||||
/// <summary>
|
||||
/// Derived classes implement this function to examine the file
|
||||
/// locally, or first upload it to Google Cloud Storage and then
|
||||
/// examine it.
|
||||
/// </summary>
|
||||
/// <param name="args">Command line arguments to Main().</param>
|
||||
protected abstract ConsoleOutput Run(params string[] args);
|
||||
|
||||
protected string _audioRawPath = Path.Combine("resources", "audio.raw");
|
||||
protected string _audioFlacPath = Path.Combine("resources", "audio.flac");
|
||||
|
||||
[Fact]
|
||||
public void TestSync()
|
||||
{
|
||||
var output = Run("sync", _audioRawPath);
|
||||
Assert.Equal(0, output.ExitCode);
|
||||
Assert.Contains("Brooklyn", output.Stdout);
|
||||
}
|
||||
}
|
||||
|
||||
public class LocalRecognizeTests : CommonRecognizeTests
|
||||
{
|
||||
protected override ConsoleOutput Run(params string[] args) =>
|
||||
_recognize.Run(args);
|
||||
|
||||
[Fact(Skip = "https://github.com/GoogleCloudPlatform/google-cloud-dotnet/issues/723")]
|
||||
public void TestStreaming()
|
||||
{
|
||||
var output = _recognize.Run("stream", _audioRawPath);
|
||||
Assert.Equal(0, output.ExitCode);
|
||||
Assert.Contains("Brooklyn", output.Stdout);
|
||||
}
|
||||
|
||||
[Fact(Skip = "Unreliable on automated test machines.")]
|
||||
public void TestListen()
|
||||
{
|
||||
var output = _recognize.Run("listen", "3");
|
||||
if (0 == output.ExitCode)
|
||||
Assert.Contains("Speak now.", output.Stdout);
|
||||
else
|
||||
Assert.Contains("No microphone.", output.Stdout);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void TestFlac()
|
||||
{
|
||||
var output = _recognize.Run("rec", "-e", "Flac", _audioFlacPath);
|
||||
Assert.Equal(0, output.ExitCode);
|
||||
Assert.Contains("Brooklyn", output.Stdout);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void TestSyncWithCredentials()
|
||||
{
|
||||
var output = Run("sync-creds", _audioRawPath,
|
||||
System.Environment.GetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS"));
|
||||
Assert.Equal(0, output.ExitCode);
|
||||
Assert.Contains("Brooklyn", output.Stdout);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void TestWithContext()
|
||||
{
|
||||
string stdin = "Good day!\nBye bye.\n\n";
|
||||
var output = _recognize.RunWithStdIn(stdin, "with-context", _audioRawPath);
|
||||
Assert.Equal(0, output.ExitCode);
|
||||
Assert.Contains("Brooklyn", output.Stdout);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void TestSyncWords()
|
||||
{
|
||||
var output = Run("sync", "-w", _audioRawPath);
|
||||
Assert.Equal(0, output.ExitCode);
|
||||
Assert.Contains("Brooklyn", output.Stdout);
|
||||
Assert.Contains("WordStartTime:", output.Stdout);
|
||||
}
|
||||
}
|
||||
|
||||
public class CloudStorageRecognizeTests : CommonRecognizeTests, IClassFixture<RandomBucketFixture>, System.IDisposable
|
||||
{
|
||||
readonly string _bucketName;
|
||||
readonly BucketCollector _bucketCollector;
|
||||
|
||||
public CloudStorageRecognizeTests(RandomBucketFixture bucketFixture)
|
||||
{
|
||||
_bucketName = bucketFixture.BucketName;
|
||||
_bucketCollector = new BucketCollector(_bucketName);
|
||||
}
|
||||
|
||||
string Upload(string localPath)
|
||||
{
|
||||
string objectName = Path.GetFileName(localPath);
|
||||
string gsPath = $"gs://{_bucketName}/{objectName}";
|
||||
_bucketCollector.CopyToBucket(localPath, objectName);
|
||||
return gsPath;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void TestAsync()
|
||||
{
|
||||
var output = Run("async", Upload(_audioRawPath));
|
||||
Assert.Equal(0, output.ExitCode);
|
||||
Assert.Contains("Brooklyn", output.Stdout);
|
||||
Assert.Contains("how", output.Stdout);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void TestAsyncWords()
|
||||
{
|
||||
var output = Run("async", "-w", Upload(_audioRawPath));
|
||||
Assert.Equal(0, output.ExitCode);
|
||||
Assert.Contains("Brooklyn", output.Stdout);
|
||||
Assert.Contains("WordStartTime:", output.Stdout);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
((IDisposable)_bucketCollector).Dispose();
|
||||
}
|
||||
|
||||
protected override ConsoleOutput Run(params string[] args)
|
||||
{
|
||||
return _recognize.Run(args);
|
||||
}
|
||||
}
|
||||
}
|
||||
36
C#/api/SpeechTest/SpeechTest.csproj
Normal file
36
C#/api/SpeechTest/SpeechTest.csproj
Normal file
@@ -0,0 +1,36 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<OutputType>Library</OutputType>
|
||||
<TargetFramework>netcoreapp2.0</TargetFramework>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Google.Cloud.Speech.V1" Version="1.0.0" />
|
||||
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="15.3.0" />
|
||||
<PackageReference Include="xunit" Version="2.3.0" />
|
||||
<PackageReference Include="xunit.runner.visualstudio" Version="2.3.0" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\Recognize\Recognize.csproj" />
|
||||
<ProjectReference Include="..\QuickStart\QuickStart.csproj" />
|
||||
<ProjectReference Include="..\..\..\testutil\testutil.csproj" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<Content Include="..\resources\audio.flac">
|
||||
<Link>resources\audio.flac</Link>
|
||||
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
|
||||
</Content>
|
||||
<Content Include="..\resources\audio.raw">
|
||||
<Link>resources\audio.raw</Link>
|
||||
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
|
||||
</Content>
|
||||
<Content Include="..\resources\audio2.raw">
|
||||
<Link>resources\audio2.raw</Link>
|
||||
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
|
||||
</Content>
|
||||
<Content Include="..\resources\quit.raw">
|
||||
<Link>resources\quit.raw</Link>
|
||||
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
|
||||
</Content>
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
16
C#/api/SpeechTest/runTest.ps1
Normal file
16
C#/api/SpeechTest/runTest.ps1
Normal file
@@ -0,0 +1,16 @@
|
||||
# Copyright(c) 2017 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
# use this file except in compliance with the License. You may obtain a copy of
|
||||
# the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations under
|
||||
# the License.
|
||||
|
||||
dotnet restore
|
||||
dotnet test
|
||||
BIN
C#/api/cloud-shell-import-file.png
Normal file
BIN
C#/api/cloud-shell-import-file.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 33 KiB |
BIN
C#/api/resources/audio.flac
Normal file
BIN
C#/api/resources/audio.flac
Normal file
Binary file not shown.
BIN
C#/api/resources/audio.raw
Normal file
BIN
C#/api/resources/audio.raw
Normal file
Binary file not shown.
BIN
C#/api/resources/audio2.raw
Normal file
BIN
C#/api/resources/audio2.raw
Normal file
Binary file not shown.
BIN
C#/api/resources/quit.raw
Normal file
BIN
C#/api/resources/quit.raw
Normal file
Binary file not shown.
Reference in New Issue
Block a user