本文介紹網絡蜘蛛獲取網頁中所有鏈接的方法,實現原理:使用System.Net.WebClIEnt類獲取遠程網頁內容,然後使用URL正則表達式分析Html代碼中的鏈接。代碼如下:
using System;
using System.Net;
using System.Text;
using System.Text.RegularExpressions;
namespace HttpGet
{
class Class1
{
[STAThread]
static void Main(string[] args)
{
System.Net.WebClient client = new WebClIEnt();
byte[] page = clIEnt.DownloadData("http://news.163.com");
string content = System.Text.Encoding.UTF8.GetString(page);
string regex = "href=[\\\"\\\'](http:\\/\\/|\\.\\/|\\/)?\\w+(\\.\\w+)*(\\/\\w+(\\.\\w+)?)*(\\/|\\?\\w*=\\w*(&\\w*=\\w*)*)?[\\\"\\\']";
Regex re = new Regex(regex);
MatchCollection matches = re.Matches(content);
System.Collections.IEnumerator enu = matches.GetEnumerator();
while (enu.MoveNext() && enu.Current != null)
{
Match match = (Match)(enu.Current);
Console.Write(match.Value + "\r\n");
}
}
}
}