1 查看oiv和oev命令
[root@node1 ~]# hdfs|grep o.v
oev apply the offline edits viewer to an edits file
oiv apply the offline fsimage viewer to an fsimage
2 查看oiv命令
[root@node1 ~]# hdfs oiv --help
Usage: bin/hdfs oiv [OPTIONS] -i INPUTFILE -o OUTPUTFILE
Offline Image Viewer
View a Hadoop fsimage INPUTFILE using the specified PROCESSOR,
saving the results in OUTPUTFILE.
-i,--inputFile <arg> FSImage or XML file to process.
Optional command line arguments:
-o,--outputFile <arg> Name of output file. If the specified
file exists, it will be overwritten.
(output to stdout by default)
If the input file was an XML file, we
will also create an <outputFile>.md5 file.
-p,--processor <arg> Select which type of processor to apply
against image file. (XML|FileDistribution|
ReverseXML|Web|Delimited)
The default is Web.
3 基本语法
hdfs oiv -p 文件类型 -i 镜像文件 -o 转换后文件输出的全路径名
4 案例实操
[root@node1 current]# pwd
/var/itbaizhan/hadoop/full/dfs/name/current
[root@node1 current]# ll|grep fsimage
-rw-r--r-- 1 root root 722 10月 9 13:49 fsimage_0000000000000000047
-rw-r--r-- 1 root root 62 10月 9 13:49 fsimage_0000000000000000047.md5
-rw-r--r-- 1 root root 722 10月 9 14:49 fsimage_0000000000000000049
-rw-r--r-- 1 root root 62 10月 9 14:49 fsimage_0000000000000000049.md5
[root@node1 current]# hdfs oiv -p XML -i fsimage_0000000000000000049 -o /opt/hadoop-3.1.3/fsimage49.xml
2021-10-09 15:20:24,428 INFO offlineImageViewer.FSImageHandler: Loading 3 strings
[root@node1 current]# vim /opt/hadoop-3.1.3/fsimage49.xml
#格式化该xml文件:Ctrl+v-> !xmllint -format - -> 删除生成的<xml ..> ->保存并退出
[root@node1 current]# cat /opt/hadoop-3.1.3/fsimage49.xml
部分显示结果如下:
<?xml version="1.0"?>
<fsimage>
<INodeSection>
<lastInodeId>16392</lastInodeId>
<numInodes>5</numInodes>
<inode>
<id>16385</id>
<type>DIRECTORY</type>
<name/>
<mtime>1633749749273</mtime>
<permission>root:supergroup:0755</permission>
<nsquota>9223372036854775807</nsquota>
<dsquota>-1</dsquota>
</inode>
<inode>
<id>16386</id>
<type>DIRECTORY</type>
<name>user</name>
<mtime>1633748876034</mtime>
<permission>root:supergroup:0755</permission>
<nsquota>-1</nsquota>
<dsquota>-1</dsquota>
</inode>
<inode>
<id>16387</id>
<type>DIRECTORY</type>
<name>root</name>
<mtime>1633749176246</mtime>
<permission>root:supergroup:0755</permission>
<nsquota>-1</nsquota>
<dsquota>-1</dsquota>
</inode>
<inode>
<id>16388</id>
<type>FILE</type>
<name>hadoop-3.1.3.tar.gz</name>
<replication>2</replication>
<mtime>1633749066652</mtime>
<atime>1633749010986</atime>
<preferredBlockSize>134217728</preferredBlockSize>
<permission>root:supergroup:0644</permission>
<blocks>
<block>
<id>1073741825</id>
<genstamp>1001</genstamp>
<numBytes>134217728</numBytes>
</block>
<block>
<id>1073741826</id>
<genstamp>1002</genstamp>
<numBytes>134217728</numBytes>
</block>
<block>
<id>1073741827</id>
<genstamp>1003</genstamp>
<numBytes>69640404</numBytes>
</block>
</blocks>
<storagePolicyId>0</storagePolicyId>
</inode>
<inode>
<id>16389</id>
<type>FILE</type>
<name>test.txt</name>
<replication>2</replication>
<mtime>1633749176236</mtime>
<atime>1633749175593</atime>
<preferredBlockSize>134217728</preferredBlockSize>
<permission>root:supergroup:0644</permission>
<blocks>
<block>
<id>1073741828</id>
<genstamp>1004</genstamp>
<numBytes>38</numBytes>
</block>
</blocks>
<storagePolicyId>0</storagePolicyId>
</inode>
</INodeSection>
......
</fsimage>
思考:观察发现Fsimage中没有记录块所对应DataNode,为什么?
在集群启动后,要求DataNode上报数据块信息,并间隔一段时间后再次上报。