public class DataNode extends org.apache.hadoop.conf.Configured implements org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol, org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol, org.apache.hadoop.hdfs.protocol.FSConstants, Runnable, org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean
| Modifier and Type | Field and Description |
|---|---|
org.apache.hadoop.hdfs.server.datanode.DataBlockScanner |
blockScanner |
org.apache.hadoop.util.Daemon |
blockScannerThread |
org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface |
data |
static String |
DATA_DIR_KEY |
static String |
DATA_DIR_PERMISSION_KEY |
static String |
DN_CLIENTTRACE_FORMAT |
org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration |
dnRegistration |
static String |
EMPTY_DEL_HINT |
org.apache.hadoop.ipc.Server |
ipcServer |
static org.apache.commons.logging.Log |
LOG |
org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol |
namenode |
static int |
PKT_HEADER_LEN
Header size for a packet
|
versionIDBLOCKREPORT_INITIAL_DELAY, BLOCKREPORT_INTERVAL, BUFFER_SIZE, DEFAULT_BLOCK_SIZE, DEFAULT_DATA_SOCKET_SIZE, HEARTBEAT_INTERVAL, LAYOUT_VERSION, LEASE_HARDLIMIT_PERIOD, LEASE_RECOVER_PERIOD, LEASE_SOFTLIMIT_PERIOD, MAX_PATH_DEPTH, MAX_PATH_LENGTH, MIN_BLOCKS_FOR_WRITE, QUOTA_DONT_SET, QUOTA_RESET, SIZE_OF_INTEGER, SMALL_BUFFER_SIZE| Modifier and Type | Method and Description |
|---|---|
protected void |
checkDiskError()
Check if there is a disk failure and if so, handle the error
|
protected void |
checkDiskError(Exception e)
Check if there is no space in disk
|
static DataNode |
createDataNode(String[] args,
org.apache.hadoop.conf.Configuration conf)
Instantiate & Start a single datanode daemon and wait for it to finish.
|
static DataNode |
createDataNode(String[] args,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources)
Instantiate & Start a single datanode daemon and wait for it to finish.
|
static org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol |
createInterDataNodeProtocolProxy(org.apache.hadoop.hdfs.protocol.DatanodeInfo info,
org.apache.hadoop.conf.Configuration conf,
int socketTimeout,
boolean connectToDnViaHostname) |
static InetSocketAddress |
createSocketAddr(String target)
Deprecated.
|
Long |
getBalancerBandwidth()
Get current value of the max balancer bandwidth in bytes per second.
|
org.apache.hadoop.hdfs.protocol.Block |
getBlockInfo(org.apache.hadoop.hdfs.protocol.Block block) |
org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo |
getBlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.Block block,
org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier> token) |
org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo |
getBlockMetaDataInfo(org.apache.hadoop.hdfs.protocol.Block block) |
static DataNode |
getDataNode()
Return the DataNode object
|
org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface |
getFSDataset()
This method is used for testing.
|
String |
getHostName() |
String |
getHttpPort() |
static InetSocketAddress |
getInfoAddr(org.apache.hadoop.conf.Configuration conf)
Determine the http server's effective addr
|
String |
getNamenode()
Return the namenode's identifier
|
InetSocketAddress |
getNameNodeAddr() |
String |
getNamenodeAddress() |
long |
getProtocolVersion(String protocol,
long clientVersion) |
String |
getRpcPort() |
InetSocketAddress |
getSelfAddr() |
static InetSocketAddress |
getStreamingAddr(org.apache.hadoop.conf.Configuration conf) |
String |
getVersion() |
String |
getVolumeInfo()
Returned information is a JSON representation of a map with
volume name as the key and value is a map of volume attribute
keys to its values
|
int |
getXceiverCount()
Number of concurrent xceivers per node.
|
static DataNode |
instantiateDataNode(String[] args,
org.apache.hadoop.conf.Configuration conf)
Instantiate a single datanode object.
|
static DataNode |
instantiateDataNode(String[] args,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources)
Instantiate a single datanode object.
|
static void |
main(String[] args) |
static DataNode |
makeInstance(String[] dataDirs,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources)
Make an instance of DataNode after ensuring that at least one of the
given data directories (and their parent directories, if necessary)
can be created.
|
protected Socket |
newSocket()
Creates either NIO or regular depending on socketWriteTimeout.
|
protected void |
notifyNamenodeReceivedBlock(org.apache.hadoop.hdfs.protocol.Block block,
String delHint) |
void |
offerService()
Main loop for the DataNode.
|
org.apache.hadoop.hdfs.protocol.LocatedBlock |
recoverBlock(org.apache.hadoop.hdfs.protocol.Block block,
boolean keepLength,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] targets) |
org.apache.hadoop.util.Daemon |
recoverBlocks(org.apache.hadoop.hdfs.protocol.Block[] blocks,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[][] targets) |
void |
run()
No matter what kind of exception we get, keep retrying to offerService().
|
static void |
runDatanodeDaemon(DataNode dn)
Start a single datanode daemon and wait for it to finish.
|
void |
scheduleBlockReport(long delay)
This methods arranges for the data node to send the block report at the next heartbeat.
|
static void |
secureMain(String[] args,
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources) |
static void |
setNewStorageID(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration dnReg) |
void |
shutdown()
Shut down this instance of the datanode.
|
org.apache.hadoop.hdfs.server.protocol.BlockRecoveryInfo |
startBlockRecovery(org.apache.hadoop.hdfs.protocol.Block block) |
String |
toString() |
void |
unRegisterMXBean() |
void |
updateBlock(org.apache.hadoop.hdfs.protocol.Block oldblock,
org.apache.hadoop.hdfs.protocol.Block newblock,
boolean finalize) |
public static final org.apache.commons.logging.Log LOG
public static final String DN_CLIENTTRACE_FORMAT
public org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol namenode
public org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface data
public org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration dnRegistration
public static final String EMPTY_DEL_HINT
public org.apache.hadoop.hdfs.server.datanode.DataBlockScanner blockScanner
public org.apache.hadoop.util.Daemon blockScannerThread
public static final String DATA_DIR_KEY
public static final String DATA_DIR_PERMISSION_KEY
public org.apache.hadoop.ipc.Server ipcServer
public static final int PKT_HEADER_LEN
@Deprecated public static InetSocketAddress createSocketAddr(String target) throws IOException
NetUtils.createSocketAddr(String) instead.IOExceptionpublic void unRegisterMXBean()
public static InetSocketAddress getInfoAddr(org.apache.hadoop.conf.Configuration conf)
protected Socket newSocket() throws IOException
IOExceptionpublic static DataNode getDataNode()
public static org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol createInterDataNodeProtocolProxy(org.apache.hadoop.hdfs.protocol.DatanodeInfo info,
org.apache.hadoop.conf.Configuration conf,
int socketTimeout,
boolean connectToDnViaHostname)
throws IOException
IOExceptionpublic InetSocketAddress getNameNodeAddr()
public InetSocketAddress getSelfAddr()
public String getNamenode()
public static void setNewStorageID(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration dnReg)
public void shutdown()
protected void checkDiskError(Exception e) throws IOException
e - that caused this checkDiskError callIOExceptionprotected void checkDiskError()
public int getXceiverCount()
getXceiverCount in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBeanpublic void offerService()
throws Exception
Exceptionprotected void notifyNamenodeReceivedBlock(org.apache.hadoop.hdfs.protocol.Block block,
String delHint)
public void run()
public static void runDatanodeDaemon(DataNode dn) throws IOException
IOExceptionpublic static DataNode instantiateDataNode(String[] args, org.apache.hadoop.conf.Configuration conf) throws IOException
runDatanodeDaemon(DataNode) subsequently.IOExceptionpublic static DataNode instantiateDataNode(String[] args, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources) throws IOException
runDatanodeDaemon(DataNode) subsequently.resources - Secure resources needed to run under KerberosIOExceptionpublic static DataNode createDataNode(String[] args, org.apache.hadoop.conf.Configuration conf) throws IOException
IOExceptionpublic static DataNode createDataNode(String[] args, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources) throws IOException
IOExceptionpublic static DataNode makeInstance(String[] dataDirs, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources) throws IOException
dataDirs - List of directories, where the new DataNode instance should
keep its files.conf - Configuration instance to use.resources - Secure resources needed to run under KerberosIOExceptionpublic void scheduleBlockReport(long delay)
public org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface getFSDataset()
public static void secureMain(String[] args, org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources)
public static void main(String[] args)
public org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo getBlockMetaDataInfo(org.apache.hadoop.hdfs.protocol.Block block)
throws IOException
getBlockMetaDataInfo in interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocolIOExceptionpublic org.apache.hadoop.hdfs.server.protocol.BlockRecoveryInfo startBlockRecovery(org.apache.hadoop.hdfs.protocol.Block block)
throws IOException
startBlockRecovery in interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocolIOExceptionpublic org.apache.hadoop.util.Daemon recoverBlocks(org.apache.hadoop.hdfs.protocol.Block[] blocks,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[][] targets)
public void updateBlock(org.apache.hadoop.hdfs.protocol.Block oldblock,
org.apache.hadoop.hdfs.protocol.Block newblock,
boolean finalize)
throws IOException
updateBlock in interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocolIOExceptionpublic long getProtocolVersion(String protocol, long clientVersion) throws IOException
getProtocolVersion in interface org.apache.hadoop.ipc.VersionedProtocolIOExceptionpublic org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo getBlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.Block block,
org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier> token)
throws IOException
getBlockLocalPathInfo in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocolIOExceptionpublic org.apache.hadoop.hdfs.protocol.LocatedBlock recoverBlock(org.apache.hadoop.hdfs.protocol.Block block,
boolean keepLength,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] targets)
throws IOException
recoverBlock in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocolIOExceptionpublic org.apache.hadoop.hdfs.protocol.Block getBlockInfo(org.apache.hadoop.hdfs.protocol.Block block)
throws IOException
getBlockInfo in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocolIOExceptionpublic static InetSocketAddress getStreamingAddr(org.apache.hadoop.conf.Configuration conf)
public String getHostName()
getHostName in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBeanpublic String getVersion()
getVersion in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBeanpublic String getRpcPort()
getRpcPort in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBeanpublic String getHttpPort()
getHttpPort in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBeanpublic String getNamenodeAddress()
getNamenodeAddress in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBeanpublic String getVolumeInfo()
getVolumeInfo in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBeanpublic Long getBalancerBandwidth()
Copyright © 2011–2014 Red Hat. All rights reserved.