hdfs具体的安装方法参考前文(cm5.4和cdh5.4安装http://my.oschina.net/penngo/blog/517223)
这里只介绍java对hdfs文件系统的操作,下图是需要用到的包(这里包括hbase相关包)
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class HdfsTest {
private FileSystem fileSystem = null;
private Configuration config = null;
public HdfsTest() {
}
public void connect() throws Exception {
String uri = "hdfs://cdh1.localdomain:8020/";
config = new Configuration();
try {
fileSystem = FileSystem.get(URI.create(uri), config);
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 创建目录
* @param dir
* @throws Exception
*/
public void createDir(String dir) throws Exception {
Path path = new Path(dir);
if (fileSystem.exists(path) == false) {
fileSystem.mkdirs(path);
System.out.println("new dir \t" + config.get("fs.default.name")
+ dir);
} else {
System.out.println("new dir exist \t"
+ config.get("fs.default.name") + dir);
}
}
/**
* 直接把本地文件上传
* @param localSrc
* @param hdfsDst
* @throws Exception
*/
public void uploadFile(String localSrc, String hdfsDst) throws Exception {
Path src = new Path(localSrc);
Path dst = new Path(hdfsDst);
fileSystem.copyFromLocalFile(src, dst);
// list all the files in the current direction
FileStatus files[] = fileSystem.listStatus(dst);
System.out.println("Upload to \t" + config.get("fs.default.name")
+ hdfsDst);
for (FileStatus file : files) {
System.out.println(file.getPath());
}
}
/**
* 读取文件
* @param dst
* @throws Exception
*/
public void readFile(String dst) throws Exception {
FSDataInputStream hdfsInStream = fileSystem.open(new Path(dst));
byte[] ioBuffer = new byte[1024];
int readLen = hdfsInStream.read(ioBuffer);
StringBuffer sff = new StringBuffer();
while (-1 != readLen) {
String s = new String(ioBuffer, 0, readLen);
sff.append(s);
readLen = hdfsInStream.read(ioBuffer);
}
hdfsInStream.close();
System.out.println("readFromHdfs===" + sff.toString());
}
/**
* 写入文件
* @param fileName
* @param fileContent
* @throws Exception
*/
public void writeFile(String fileName, String fileContent) throws Exception {
Path dst = new Path(fileName);
byte[] bytes = fileContent.getBytes();
FSDataOutputStream output = fileSystem.create(dst);
output.write(bytes);
output.flush();
output.close();
System.out.println("new file \t" + config.get("fs.default.name")
+ fileName);
}
// public void appendFile(String fileName, String fileContent)
// throws Exception {
// // 需要设置
// // <property>
// // <name>dfs.support.append</name>
// // <value>true</value>
// // </property>
// Path dst = new Path(fileName);
//
// byte[] bytes = fileContent.getBytes();
// FSDataOutputStream output = fileSystem.append(dst);
// output.write(bytes);
// output.flush();
// output.close();
// FileStatus fs = fileSystem.getFileStatus(dst);
// System.out.println("appendFile after \t" + fs.getLen());
// System.out.println("new file \t" + config.get("fs.default.name")
// + fileName);
// }
/**
* 遍历目录
* @param dirName
* @throws IOException
*/
public void listFiles(String dirName) throws IOException {
Path f = new Path(dirName);
FileStatus[] status = fileSystem.listStatus(f);
System.out.println(dirName + " has all files:");
for (int i = 0; i < status.length; i++) {
System.out.println(status[i].getPath().toString() + " "
+ status[i].getLen());
}
}
// judge a file existed? and delete it!
public void deleteFile(String fileName) throws IOException {
Path f = new Path(fileName);
boolean isExists = fileSystem.exists(f);
if (isExists) { // if exists, delete
boolean isDel = fileSystem.delete(f, true);
System.out.println(fileName + " delete? \t" + isDel);
} else {
System.out.println(fileName + " exist? \t" + isExists);
}
}
public void close() throws Exception {
fileSystem.close();
}
public static void main(String[] args) throws Exception {
// org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.security.AccessControlException):
// Access denied for user Administrator. Superuser privilege is required
System.setProperty("HADOOP_USER_NAME", "hdfs");
HdfsTest hdfs = new HdfsTest();
hdfs.connect();
String dir = "/test2";
// hdfs.createDir(dir);
// hdfs.listFiles("/");
String localSrc = "d:\\testp2.txt";
hdfs.uploadFile(localSrc, dir);
hdfs.writeFile(dir + "/myfile2.txt", "hello test!");
hdfs.listFiles(dir);
// hdfs.appendFile(dir + "/myfile.txt", " test 1000");
hdfs.deleteFile(dir + "/myfile.txt");
hdfs.listFiles(dir);
hdfs.close();
}
}
来源:oschina
链接:https://my.oschina.net/u/117179/blog/519371