這篇文章主要介紹了c#基于udp如何實現p2p語音聊天工具,具有一定借鑒價值,感興趣的朋友可以參考下,希望大家閱讀完這篇文章之后大有收獲,下面讓小編帶著大家一起了解一下。
之前發過一篇文章http://www.php.cn/ 已經實現過了UDP的分包發送數據的功能,而這篇文章主要是一個應用,使用udp傳送語音和文本等信息。在這個系統中沒有服務端和客戶端,相互通訊都是直接相互聯系的。能夠很好的實現效果。
要想發送語音信息,首先得獲取語音,這里有幾種方法,一種是使用DirectX的DirectXsound來錄音,我為了簡便使用一個開源的插件NAudio來實現語音錄取。 在項目中引用NAudio.dll
//------------------錄音相關-----------------------------
private IWaveIn waveIn;
private WaveFileWriter writer;
private void LoadWasapiDevicesCombo()
{
var deviceEnum = new MMDeviceEnumerator();
var devices = deviceEnum.EnumerateAudioEndPoints(DataFlow.Capture, DeviceState.Active).ToList();
comboBox1.DataSource = devices;
comboBox1.DisplayMember = "FriendlyName";
}
private void CreateWaveInDevice()
{
waveIn = new WaveIn();
waveIn.WaveFormat = new WaveFormat(8000, 1);
waveIn.DataAvailable += OnDataAvailable;
waveIn.RecordingStopped += OnRecordingStopped;
}
void OnDataAvailable(object sender, WaveInEventArgs e)
{
if (this.InvokeRequired)
{
this.BeginInvoke(new EventHandler<WaveInEventArgs>(OnDataAvailable), sender, e);
}
else
{
writer.Write(e.Buffer, 0, e.BytesRecorded);
int secondsRecorded = (int)(writer.Length / writer.WaveFormat.AverageBytesPerSecond);
if (secondsRecorded >= 10)//最大10s
{
StopRecord();
}
else
{
l_sound.Text = secondsRecorded + " s";
}
}
}
void OnRecordingStopped(object sender, StoppedEventArgs e)
{
if (InvokeRequired)
{
BeginInvoke(new EventHandler<StoppedEventArgs>(OnRecordingStopped), sender, e);
}
else
{
FinalizeWaveFile();
}
}
void StopRecord()
{
AllChangeBtn(btn_luyin, true);
AllChangeBtn(btn_stop, false);
AllChangeBtn(btn_sendsound, true);
AllChangeBtn(btn_play, true);
//btn_luyin.Enabled = true;
//btn_stop.Enabled = false;
//btn_sendsound.Enabled = true;
//btn_play.Enabled = true;
if (waveIn != null)
waveIn.StopRecording();
//Cleanup();
}
private void Cleanup()
{
if (waveIn != null)
{
waveIn.Dispose();
waveIn = null;
}
FinalizeWaveFile();
}
private void FinalizeWaveFile()
{
if (writer != null)
{
writer.Dispose();
writer = null;
}
}
//開始錄音
private void btn_luyin_Click(object sender, EventArgs e)
{
btn_stop.Enabled = true;
btn_luyin.Enabled = false;
if (waveIn == null)
{
CreateWaveInDevice();
}
if (File.Exists(soundfile))
{
File.Delete(soundfile);
}
writer = new WaveFileWriter(soundfile, waveIn.WaveFormat);
waveIn.StartRecording();
}上面的代碼實現了錄音,并且寫入文件p2psound_A.wav

獲取到語音后我們要把語音發送出去
當我們錄好音后點擊發送,這部分相關代碼是
MsgTranslator tran = null;
public Form1()
{
InitializeComponent();
LoadWasapiDevicesCombo();//顯示音頻設備
Config cfg = SeiClient.GetDefaultConfig();
cfg.Port = 7777;
UDPThread udp = new UDPThread(cfg);
tran = new MsgTranslator(udp, cfg);
tran.MessageReceived += tran_MessageReceived;
tran.Debuged += new EventHandler<DebugEventArgs>(tran_Debuged);
}
private void btn_sendsound_Click(object sender, EventArgs e)
{
if (t_ip.Text == "")
{
MessageBox.Show("請輸入ip");
return;
}
if (t_port.Text == "")
{
MessageBox.Show("請輸入端口號");
return;
}
string ip = t_ip.Text;
int port = int.Parse(t_port.Text);
string nick = t_nick.Text;
string msg = "語音消息";
IPEndPoint remote = new IPEndPoint(IPAddress.Parse(ip), port);
Msg m = new Msg(remote, "zz", nick, Commands.SendMsg, msg, "Come From A");
m.IsRequireReceive = true;
m.ExtendMessageBytes = FileContent(soundfile);
m.PackageNo = Msg.GetRandomNumber();
m.Type = Consts.MESSAGE_BINARY;
tran.Send(m);
}
private byte[] FileContent(string fileName)
{
FileStream fs = new FileStream(fileName, FileMode.Open, FileAccess.Read);
try
{
byte[] buffur = new byte[fs.Length];
fs.Read(buffur, 0, (int)fs.Length);
return buffur;
}
catch (Exception ex)
{
return null;
}
finally
{
if (fs != null)
{
//關閉資源
fs.Close();
}
}
}
如此一來我們就把產生的語音文件發送出去了
其實語音的接收和文本消息的接收沒有什么不同,只不過語音發送的時候是以二進制發送的,因此我們在收到語音后 就應該寫入到一個文件里面去,接收完成后,播放這段語音就行了。
下面這段代碼主要是把收到的數據保存到文件中去,這個函數式我的NetFrame里收到消息時所觸發的事件,在文章前面提過的那篇文章里
void tran_MessageReceived(object sender, MessageEventArgs e)
{
Msg msg = e.msg;
if (msg.Type == Consts.MESSAGE_BINARY)
{
string m = msg.Type + "->" + msg.UserName + "發來二進制消息!";
AddServerMessage(m);
if (File.Exists(recive_soundfile))
{
File.Delete(recive_soundfile);
}
FileStream fs = new FileStream(recive_soundfile, FileMode.Create, FileAccess.Write);
fs.Write(msg.ExtendMessageBytes, 0, msg.ExtendMessageBytes.Length);
fs.Close();
//play_sound(recive_soundfile);
ChangeBtn(true);
}
else
{
string m = msg.Type + "->" + msg.UserName + "說:" + msg.NormalMsg;
AddServerMessage(m);
}
}
收到語音消息后,我們要進行播放,播放時仍然用剛才那個插件播放
//--------播放部分----------
private IWavePlayer wavePlayer;
private WaveStream reader;
public void play_sound(string filename)
{
if (wavePlayer != null)
{
wavePlayer.Dispose();
wavePlayer = null;
}
if (reader != null)
{
reader.Dispose();
}
reader = new MediaFoundationReader(filename, new MediaFoundationReader.MediaFoundationReaderSettings() { SingleReaderObject = true });
if (wavePlayer == null)
{
wavePlayer = new WaveOut();
wavePlayer.PlaybackStopped += WavePlayerOnPlaybackStopped;
wavePlayer.Init(reader);
}
wavePlayer.Play();
}
private void WavePlayerOnPlaybackStopped(object sender, StoppedEventArgs stoppedEventArgs)
{
if (stoppedEventArgs.Exception != null)
{
MessageBox.Show(stoppedEventArgs.Exception.Message);
}
if (wavePlayer != null)
{
wavePlayer.Stop();
}
btn_luyin.Enabled = true;
}private void btn_play_Click(object sender, EventArgs e)
{
btn_luyin.Enabled = false;
play_sound(soundfile);
}

在上面演示了接收和發送一段語音消息的界面
主要用到的技術就是UDP和NAudio的錄音和播放功能
感謝你能夠認真閱讀完這篇文章,希望小編分享的“c#基于udp如何實現p2p語音聊天工具”這篇文章對大家有幫助,同時也希望大家多多支持億速云,關注億速云行業資訊頻道,更多相關知識等著你來學習!
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。