1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220 |
- <?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
- <!-- Generated by the JDiff Javadoc doclet -->
- <!-- (http://www.jdiff.org) -->
- <!-- on Tue Aug 24 10:48:17 PDT 2010 -->
- <api
- xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
- xsi:noNamespaceSchemaLocation='api.xsd'
- name="hadoop-hdfs 0.21.0"
- jdversion="1.0.9">
- <!-- Command line arguments = -doclet jdiff.JDiff -docletpath /Users/tom/workspace/hadoop-hdfs-0.21-committer/build/ivy/lib/Hadoop-Hdfs/jdiff/jdiff-1.0.9.jar:/Users/tom/workspace/hadoop-hdfs-0.21-committer/build/ivy/lib/Hadoop-Hdfs/jdiff/xerces-1.4.4.jar -classpath /Users/tom/workspace/hadoop-hdfs-0.21-committer/build/classes:/Users/tom/workspace/hadoop-hdfs-0.21-committer/conf:/Users/tom/.ivy2/cache/org.apache.hadoop/hadoop-common/jars/hadoop-common-0.21.0-SNAPSHOT.jar:/Users/tom/.ivy2/cache/commons-cli/commons-cli/jars/commons-cli-1.2.jar:/Users/tom/.ivy2/cache/xmlenc/xmlenc/jars/xmlenc-0.52.jar:/Users/tom/.ivy2/cache/commons-httpclient/commons-httpclient/jars/commons-httpclient-3.1.jar:/Users/tom/.ivy2/cache/commons-codec/commons-codec/jars/commons-codec-1.4.jar:/Users/tom/.ivy2/cache/commons-net/commons-net/jars/commons-net-1.4.1.jar:/Users/tom/.ivy2/cache/oro/oro/jars/oro-2.0.8.jar:/Users/tom/.ivy2/cache/org.mortbay.jetty/jetty/jars/jetty-6.1.14.jar:/Users/tom/.ivy2/cache/org.mortbay.jetty/jetty-util/jars/jetty-util-6.1.14.jar:/Users/tom/.ivy2/cache/org.mortbay.jetty/servlet-api-2.5/jars/servlet-api-2.5-6.1.14.jar:/Users/tom/.ivy2/cache/tomcat/jasper-runtime/jars/jasper-runtime-5.5.12.jar:/Users/tom/.ivy2/cache/tomcat/jasper-compiler/jars/jasper-compiler-5.5.12.jar:/Users/tom/.ivy2/cache/org.mortbay.jetty/jsp-api-2.1/jars/jsp-api-2.1-6.1.14.jar:/Users/tom/.ivy2/cache/org.mortbay.jetty/jsp-2.1/jars/jsp-2.1-6.1.14.jar:/Users/tom/.ivy2/cache/org.eclipse.jdt/core/jars/core-3.1.1.jar:/Users/tom/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar:/Users/tom/.ivy2/cache/commons-el/commons-el/jars/commons-el-1.0.jar:/Users/tom/.ivy2/cache/net.java.dev.jets3t/jets3t/jars/jets3t-0.7.1.jar:/Users/tom/.ivy2/cache/commons-logging/commons-logging/jars/commons-logging-1.1.1.jar:/Users/tom/.ivy2/cache/net.sf.kosmosfs/kfs/jars/kfs-0.3.jar:/Users/tom/.ivy2/cache/junit/junit/jars/junit-4.8.1.jar:/Users/tom/.ivy2/cache/hsqldb/hsqldb/jars/hsqldb-1.8.0.10.jar:/Users/tom/.ivy2/cache/org.apache.hadoop/avro/jars/avro-1.3.2.jar:/Users/tom/.ivy2/cache/org.codehaus.jackson/jackson-mapper-asl/jars/jackson-mapper-asl-1.4.2.jar:/Users/tom/.ivy2/cache/org.codehaus.jackson/jackson-core-asl/jars/jackson-core-asl-1.4.2.jar:/Users/tom/.ivy2/cache/org.slf4j/slf4j-api/jars/slf4j-api-1.5.11.jar:/Users/tom/.ivy2/cache/com.thoughtworks.paranamer/paranamer/jars/paranamer-2.2.jar:/Users/tom/.ivy2/cache/com.thoughtworks.paranamer/paranamer-ant/jars/paranamer-ant-2.2.jar:/Users/tom/.ivy2/cache/com.thoughtworks.paranamer/paranamer-generator/jars/paranamer-generator-2.2.jar:/Users/tom/.ivy2/cache/com.thoughtworks.qdox/qdox/jars/qdox-1.10.1.jar:/Users/tom/.ivy2/cache/asm/asm/jars/asm-3.2.jar:/Users/tom/.ivy2/cache/commons-lang/commons-lang/jars/commons-lang-2.5.jar:/Users/tom/.ivy2/cache/log4j/log4j/jars/log4j-1.2.15.jar:/Users/tom/.ivy2/cache/org.aspectj/aspectjrt/jars/aspectjrt-1.6.5.jar:/Users/tom/.ivy2/cache/org.aspectj/aspectjtools/jars/aspectjtools-1.6.5.jar:/Users/tom/.ivy2/cache/org.mockito/mockito-all/jars/mockito-all-1.8.2.jar:/Users/tom/.ivy2/cache/jdiff/jdiff/jars/jdiff-1.0.9.jar:/Users/tom/.ivy2/cache/xerces/xerces/jars/xerces-1.4.4.jar:/usr/share/ant/lib/ant-launcher.jar:/Users/tom/.ant/lib/ivy.jar:/usr/share/ant/lib/ant-antlr.jar:/usr/share/ant/lib/ant-jai.jar:/usr/share/ant/lib/ant-jmf.jar:/usr/share/ant/lib/ant-junit.jar:/usr/share/ant/lib/ant-nodeps.jar:/usr/share/ant/lib/ant-swing.jar:/usr/share/ant/lib/ant-testutil.jar:/usr/share/ant/lib/ant-trax.jar:/usr/share/ant/lib/ant.jar:/usr/share/ant/lib/ivy-2.1.0.jar:/usr/share/ant/lib/xercesImpl.jar:/usr/share/ant/lib/xml-apis.jar -sourcepath /Users/tom/workspace/hadoop-hdfs-0.21-committer/src/java -apidir /Users/tom/workspace/hadoop-hdfs-0.21-committer/lib/jdiff -apiname hadoop-hdfs 0.21.0 -->
- <package name="org.apache.hadoop.fs">
- <!-- start class org.apache.hadoop.fs.Hdfs -->
- <class name="Hdfs" extends="org.apache.hadoop.fs.AbstractFileSystem"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getUriDefaultPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </method>
- <method name="createInternal" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="createFlag" type="java.util.EnumSet"/>
- <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="bufferSize" type="int"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="bytesPerChecksum" type="int"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="start" type="long"/>
- <param name="len" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="getFileLinkStatus" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="getFsStatus" return="org.apache.hadoop.fs.FsStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="listStatusIterator" return="java.util.Iterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="mkdir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="dir" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="bufferSize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="renameInternal"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="dst" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="renameInternal"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="dst" type="org.apache.hadoop.fs.Path"/>
- <param name="overwrite" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="setVerifyChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="verifyChecksum" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="supportsSymlinks" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </method>
- <method name="createSymlink"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="target" type="org.apache.hadoop.fs.Path"/>
- <param name="link" type="org.apache.hadoop.fs.Path"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="getLinkTarget" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- </class>
- <!-- end class org.apache.hadoop.fs.Hdfs -->
- </package>
- <package name="org.apache.hadoop.hdfs">
- <!-- start class org.apache.hadoop.hdfs.BlockMissingException -->
- <class name="BlockMissingException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockMissingException" type="java.lang.String, java.lang.String, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[An exception that indicates that file was corrupted.
- @param filename name of corrupted file
- @param description a description of the corruption details]]>
- </doc>
- </constructor>
- <method name="getFile" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the name of the corrupted file.
- @return name of corrupted file]]>
- </doc>
- </method>
- <method name="getOffset" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the offset at which this file is corrupted
- @return offset of corrupted file]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This exception is thrown when a read encounters a block that has no locations
- associated with it.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.BlockMissingException -->
- <!-- start class org.apache.hadoop.hdfs.BlockReader -->
- <class name="BlockReader" extends="org.apache.hadoop.fs.FSInputChecker"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="read" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="buf" type="byte[]"/>
- <param name="off" type="int"/>
- <param name="len" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="skip" return="long"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="n" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="read" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="seekToNewSource" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="targetPos" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="seek"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="pos" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getChunkPosition" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="pos" type="long"/>
- </method>
- <method name="readChunk" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="pos" type="long"/>
- <param name="buf" type="byte[]"/>
- <param name="offset" type="int"/>
- <param name="len" type="int"/>
- <param name="checksumBuf" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="newBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sock" type="java.net.Socket"/>
- <param name="file" type="java.lang.String"/>
- <param name="blockId" type="long"/>
- <param name="accessToken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <param name="genStamp" type="long"/>
- <param name="startOffset" type="long"/>
- <param name="len" type="long"/>
- <param name="bufferSize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="newBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sock" type="java.net.Socket"/>
- <param name="file" type="java.lang.String"/>
- <param name="blockId" type="long"/>
- <param name="accessToken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <param name="genStamp" type="long"/>
- <param name="startOffset" type="long"/>
- <param name="len" type="long"/>
- <param name="bufferSize" type="int"/>
- <param name="verifyChecksum" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Java Doc required]]>
- </doc>
- </method>
- <method name="newBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sock" type="java.net.Socket"/>
- <param name="file" type="java.lang.String"/>
- <param name="blockId" type="long"/>
- <param name="accessToken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <param name="genStamp" type="long"/>
- <param name="startOffset" type="long"/>
- <param name="len" type="long"/>
- <param name="bufferSize" type="int"/>
- <param name="verifyChecksum" type="boolean"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readAll" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="buf" type="byte[]"/>
- <param name="offset" type="int"/>
- <param name="len" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[kind of like readFully(). Only reads as much as possible.
- And allows use of protected readFully().]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This is a wrapper around connection to datadone
- and understands checksum, offset etc]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.BlockReader -->
- <!-- start class org.apache.hadoop.hdfs.DeprecatedUTF8 -->
- <class name="DeprecatedUTF8" extends="org.apache.hadoop.io.UTF8"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DeprecatedUTF8"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DeprecatedUTF8" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Construct from a given string.]]>
- </doc>
- </constructor>
- <constructor name="DeprecatedUTF8" type="org.apache.hadoop.hdfs.DeprecatedUTF8"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Construct from a given string.]]>
- </doc>
- </constructor>
- <method name="readString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="writeString" return="int"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <param name="s" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[A simple wrapper around {@link org.apache.hadoop.io.UTF8}.
- This class should be used only when it is absolutely necessary
- to use {@link org.apache.hadoop.io.UTF8}. The only difference is that
- using this class does not require "@SuppressWarning" annotation to avoid
- javac warning. Instead the deprecation is implied in the class name.
-
- This should be treated as package private class to HDFS.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DeprecatedUTF8 -->
- <!-- start class org.apache.hadoop.hdfs.DFSClient -->
- <class name="DFSClient" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <implements name="java.io.Closeable"/>
- <constructor name="DFSClient" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="Deprecated at 0.21">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Same as this(NameNode.getAddress(conf), conf);
- @see #DFSClient(InetSocketAddress, Configuration)
- @deprecated Deprecated at 0.21]]>
- </doc>
- </constructor>
- <constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Same as this(nameNodeAddr, conf, null);
- @see #DFSClient(InetSocketAddress, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)]]>
- </doc>
- </constructor>
- <constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem.Statistics"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Same as this(nameNodeAddr, null, conf, stats);
- @see #DFSClient(InetSocketAddress, ClientProtocol, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)]]>
- </doc>
- </constructor>
- <method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The locking hierarchy is to first acquire lock on DFSClient object, followed by
- lock on leasechecker, followed by lock on an individual DFSOutputStream.]]>
- </doc>
- </method>
- <method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nameNodeAddr" type="java.net.InetSocketAddress"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Close the file system, abandoning all of the leases and files being
- created and close connections to the namenode.]]>
- </doc>
- </method>
- <method name="getDefaultBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the default block size for this cluster
- @return the default block size in bytes]]>
- </doc>
- </method>
- <method name="getBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get server default values for a number of configuration params.]]>
- </doc>
- </method>
- <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="renewer" type="org.apache.hadoop.io.Text"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="renewDelegationToken" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="cancelDelegationToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="reportBadBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Report corrupt blocks that were discovered by the client.]]>
- </doc>
- </method>
- <method name="getDefaultReplication" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="start" type="long"/>
- <param name="length" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Get block location info about file
-
- getBlockLocations() returns a list of hostnames that store
- data for a specific file region. It returns a set of hostnames
- for every block within the indicated region.
- This function is very useful when writing code that considers
- data-placement when performing operations. For example, the
- MapReduce system tries to schedule tasks on the same machines
- as the data-block the task processes.]]>
- </doc>
- </method>
- <method name="open" return="org.apache.hadoop.hdfs.DFSInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="open" return="org.apache.hadoop.hdfs.DFSInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use {@link #open(String, int, boolean)} instead.">
- <param name="src" type="java.lang.String"/>
- <param name="buffersize" type="int"/>
- <param name="verifyChecksum" type="boolean"/>
- <param name="stats" type="org.apache.hadoop.fs.FileSystem.Statistics"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create an input stream that obtains a nodelist from the
- namenode, and then reads from all the right places. Creates
- inner subclass of InputStream that does the right out-of-band
- work.
- @deprecated Use {@link #open(String, int, boolean)} instead.]]>
- </doc>
- </method>
- <method name="open" return="org.apache.hadoop.hdfs.DFSInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="buffersize" type="int"/>
- <param name="verifyChecksum" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create an input stream that obtains a nodelist from the
- namenode, and then reads from all the right places. Creates
- inner subclass of InputStream that does the right out-of-band
- work.]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create a new dfs file and return an output stream for writing into it.
-
- @param src stream name
- @param overwrite do not check for file existence if true
- @return output stream
- @throws UnresolvedLinkException if a symlink is encountered in src.
- @throws IOException]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create a new dfs file and return an output stream for writing into it
- with write-progress reporting.
-
- @param src stream name
- @param overwrite do not check for file existence if true
- @return output stream
- @throws UnresolvedLinkException if a symlink is encountered in src.
- @throws IOException]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create a new dfs file with the specified block replication
- and return an output stream for writing into the file.
-
- @param src stream name
- @param overwrite do not check for file existence if true
- @param replication block replication
- @return output stream
- @throws UnresolvedLinkException if a symlink is encountered in src.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the namenode associated with this DFSClient object
- @return the namenode associated with this DFSClient object]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create a new dfs file with the specified block replication
- with write-progress reporting and return an output stream for writing
- into the file.
-
- @param src stream name
- @param overwrite do not check for file existence if true
- @param replication block replication
- @return output stream
- @throws UnresolvedLinkException if a symlink is encountered in src.
- @throws IOException]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="buffersize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Call
- {@link #create(String,FsPermission,EnumSet,short,long,Progressable,int)}
- with default permission.
- @see FsPermission#getDefault()]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="flag" type="java.util.EnumSet"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="buffersize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Call
- {@link #create(String,FsPermission,EnumSet,boolean,short,long,Progressable,int)}
- with createParent set to true.]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="flag" type="java.util.EnumSet"/>
- <param name="createParent" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="buffersize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create a new dfs file with the specified block replication
- with write-progress reporting and return an output stream for writing
- into the file.
-
- @param src stream name
- @param permission The permission of the directory being created.
- If permission == null, use {@link FsPermission#getDefault()}.
- @param flag do not check for file existence if true
- @param createParent create missing parent directory if true
- @param replication block replication
- @return output stream
- @throws IOException
- @throws UnresolvedLinkException if src contains a symlink.
- @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable, boolean, short, long)]]>
- </doc>
- </method>
- <method name="primitiveCreate" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="absPermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="flag" type="java.util.EnumSet"/>
- <param name="createParent" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="buffersize" type="int"/>
- <param name="bytesPerChecksum" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Same as {{@link #create(String, FsPermission, EnumSet, short, long,
- Progressable, int)} except that the permission
- is absolute (ie has already been masked with umask.]]>
- </doc>
- </method>
- <method name="createSymlink"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="java.lang.String"/>
- <param name="link" type="java.lang.String"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Creates a symbolic link.
-
- @see ClientProtocol#createSymlink(String, String,FsPermission, boolean)]]>
- </doc>
- </method>
- <method name="getLinkTarget" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Resolve the *first* symlink, if any, in the path.
-
- @see ClientProtocol#getLinkTarget(String)]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Set replication for an existing file.
-
- @see ClientProtocol#setReplication(String, short)
- @param replication
- @throws IOException
- @return true is successful or false if file does not exist]]>
- </doc>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use {@link #rename(String, String, Options.Rename...)} instead.">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Rename file or directory.
- See {@link ClientProtocol#rename(String, String)}.
- @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.]]>
- </doc>
- </method>
- <method name="concat"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="trg" type="java.lang.String"/>
- <param name="srcs" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Move blocks from src to trg and delete src
- See {@link ClientProtocol#concat(String, String [])}.]]>
- </doc>
- </method>
- <method name="rename"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Rename file or directory.
- See {@link ClientProtocol#rename(String, String, Options.Rename...)}]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Delete file or directory.
- See {@link ClientProtocol#delete(String)}.]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[delete file or directory.
- delete contents of the directory if non empty and recursive
- set to true]]>
- </doc>
- </method>
- <method name="exists" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Implemented using getFileInfo(src)]]>
- </doc>
- </method>
- <method name="listPaths" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="startAfter" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Get a partial listing of the indicated directory
- Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
- if the application wants to fetch a listing starting from
- the first entry in the directory
- @param src the directory name
- @param startAfter the name to start listing after encoded in java UTF8
- @return a partial listing starting after startAfter]]>
- </doc>
- </method>
- <method name="getFileInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="getFileLinkInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Get the file info for a specific file or directory. If src
- refers to a symlink then the FileStatus of the link is returned.
- @param src path to a file or directory.
- @throws IOException
- @throws UnresolvedLinkException if the path contains symlinks
- @return FileStatus describing src.]]>
- </doc>
- </method>
- <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the checksum of a file.
- @param src The file path
- @return The checksum
- @see DistributedFileSystem#getFileChecksum(Path)]]>
- </doc>
- </method>
- <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="namenode" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
- <param name="socketFactory" type="javax.net.SocketFactory"/>
- <param name="socketTimeout" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the checksum of a file.
- @param src The file path
- @return The checksum]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Set permissions to a file or directory.
- @param src path name.
- @param permission
- @throws <code>FileNotFoundException</code> is file does not exist.
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Set file or directory owner.
- @param src path name.
- @param username user id.
- @param groupname user group.
- @throws <code>FileNotFoundException</code> is file does not exist.
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="getDiskStatus" return="org.apache.hadoop.fs.FsStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getMissingBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with no good replicas left. Normally should be
- zero.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getUnderReplicatedBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with one of more replica missing.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getCorruptBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with at least one replica marked corrupt.
- @throws IOException]]>
- </doc>
- </method>
- <method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Enter, leave or get safe mode.
- See {@link ClientProtocol#setSafeMode(FSConstants.SafeModeAction)}
- for more details.
-
- @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
- </doc>
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refresh the hosts and exclude files. (Rereads them.)
- See {@link ClientProtocol#refreshNodes()}
- for more details.
-
- @see ClientProtocol#refreshNodes()]]>
- </doc>
- </method>
- <method name="metaSave"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="pathname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Dumps DFS data structures into specified file.
- See {@link ClientProtocol#metaSave(String)}
- for more details.
-
- @see ClientProtocol#metaSave(String)]]>
- </doc>
- </method>
- <method name="finalizeUpgrade"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@see ClientProtocol#finalizeUpgrade()]]>
- </doc>
- </method>
- <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction)]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create a directory (or hierarchy of directories) with the given
- name and permission.
- @param src The path of the directory being created
- @param permission The permission of the directory being created.
- If permission == null, use {@link FsPermission#getDefault()}.
- @param createParent create missing parent directory if true
- @return True if the operation success.
- @throws UnresolvedLinkException if the path contains a symlink.
- @see ClientProtocol#mkdirs(String, FsPermission, boolean)]]>
- </doc>
- </method>
- <method name="primitiveMkdir" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="absPermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Same {{@link #mkdirs(String, FsPermission, boolean)} except
- that the permissions has already been masked against umask.
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[set the modification and access time of a file
- @throws FileNotFoundException if the path is not a file]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SERVER_DEFAULTS_VALIDITY_PERIOD" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="MAX_BLOCK_ACQUIRE_FAILURES" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DFSClient can connect to a Hadoop Filesystem and
- perform basic file tasks. It uses the ClientProtocol
- to communicate with a NameNode daemon, and connects
- directly to DataNodes to read/write block data.
- Hadoop DFS users should obtain an instance of
- DistributedFileSystem, which uses DFSClient to handle
- filesystem tasks.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSClient -->
- <!-- start class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
- <class name="DFSClient.DFSDataInputStream" extends="org.apache.hadoop.fs.FSDataInputStream"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DFSClient.DFSDataInputStream" type="org.apache.hadoop.hdfs.DFSInputStream"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </constructor>
- <method name="getCurrentDatanode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the datanode from which the stream is currently reading.]]>
- </doc>
- </method>
- <method name="getCurrentBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the block containing the target position.]]>
- </doc>
- </method>
- <method name="getVisibleLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@return The visible length of the file.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[The Hdfs implementation of {@link FSDataInputStream}]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
- <!-- start class org.apache.hadoop.hdfs.DFSConfigKeys -->
- <class name="DFSConfigKeys" extends="org.apache.hadoop.fs.CommonConfigurationKeys"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DFSConfigKeys"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <field name="DFS_BLOCK_SIZE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_SIZE_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_REPLICATION_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_REPLICATION_DEFAULT" type="short"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_STREAM_BUFFER_SIZE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_STREAM_BUFFER_SIZE_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BYTES_PER_CHECKSUM_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BYTES_PER_CHECKSUM_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_WRITE_PACKET_SIZE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_BACKUP_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HTTP_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_MAX_OBJECTS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_MAX_OBJECTS_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SAFEMODE_EXTENSION_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SAFEMODE_EXTENSION_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT" type="float"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_CHECKPOINT_PERIOD_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_CHECKPOINT_SIZE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_UPGRADE_PERMISSION_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_HTTPS_NEED_AUTH_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_ACCESSTIME_PRECISION_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_MIN_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_MIN_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_PERMISSIONS_ENABLED_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_PERMISSIONS_ENABLED_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_PERMISSIONS_SUPERUSERGROUP_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_NAME_DIR_RESTORE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_LIST_LIMIT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_LIST_LIMIT_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DATA_DIR_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HTTPS_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_NAME_DIR_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_EDITS_DIR_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_READ_PREFETCH_SIZE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_RETRY_WINDOW_BASE" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_METRICS_SESSION_ID_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HOST_NAME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_STORAGEID_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HOSTS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HOSTS_EXCLUDE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_SOCKET_TIMEOUT_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_CHECKPOINT_DIR_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BALANCER_MOVEDWINWIDTH_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BALANCER_MOVEDWINWIDTH_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DATA_DIR_PERMISSION_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DNS_INTERFACE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DNS_INTERFACE_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DNS_NAMESERVER_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DNS_NAMESERVER_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DU_RESERVED_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DU_RESERVED_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HANDLER_COUNT_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HANDLER_COUNT_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HTTP_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_MAX_XCIEVERS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_MAX_XCIEVERS_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_NUMBLOCKS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_NUMBLOCKS_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SCAN_PERIOD_HOURS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_TRANSFERTO_ALLOWED_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_HEARTBEAT_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_HEARTBEAT_INTERVAL_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HANDLER_COUNT_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HANDLER_COUNT_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_SUPPORT_APPEND_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_SUPPORT_APPEND_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_HTTPS_ENABLE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_HTTPS_ENABLE_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HTTPS_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HTTPS_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_IPC_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_IPC_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_REPLICATION_MAX_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_REPLICATION_MAX_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DF_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DF_INTERVAL_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCKREPORT_INTERVAL_MSEC_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCKREPORT_INITIAL_DELAY_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_PLUGINS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_STARTUP_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_PLUGINS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_WEB_UGI_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_STARTUP_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_KEYTAB_FILE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_USER_NAME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_KEYTAB_FILE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_USER_NAME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This class contains constants for configuration keys used
- in hdfs.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSConfigKeys -->
- <!-- start class org.apache.hadoop.hdfs.DFSUtil -->
- <class name="DFSUtil" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DFSUtil"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="isValidName" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <doc>
- <![CDATA[Whether the pathname is valid. Currently prohibits relative paths,
- and names which contain a ":" or "/"]]>
- </doc>
- </method>
- <method name="login"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="keytabFileKey" type="java.lang.String"/>
- <param name="userNameKey" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[If a keytab has been provided, login as that user.]]>
- </doc>
- </method>
- <method name="bytes2String" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="bytes" type="byte[]"/>
- <doc>
- <![CDATA[Converts a byte array to a string using UTF8 encoding.]]>
- </doc>
- </method>
- <method name="string2Bytes" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="str" type="java.lang.String"/>
- <doc>
- <![CDATA[Converts a string to a byte array using UTF8 encoding.]]>
- </doc>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSUtil -->
- <!-- start class org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator -->
- <class name="DFSUtil.ErrorSimulator" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DFSUtil.ErrorSimulator"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="initializeErrorSimulationEvent"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="numberOfEvents" type="int"/>
- </method>
- <method name="getErrorSimulation" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- </method>
- <method name="setErrorSimulation"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- </method>
- <method name="clearErrorSimulation"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- </method>
- <doc>
- <![CDATA[Utility class to facilitate junit test error simulation.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator -->
- <!-- start class org.apache.hadoop.hdfs.DistributedFileSystem -->
- <class name="DistributedFileSystem" extends="org.apache.hadoop.fs.FileSystem"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DistributedFileSystem"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </constructor>
- <method name="getUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="uri" type="java.net.URI"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="checkPath"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="path" type="org.apache.hadoop.fs.Path"/>
- <doc>
- <![CDATA[Permit paths which explicitly specify the default port.]]>
- </doc>
- </method>
- <method name="makeQualified" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="org.apache.hadoop.fs.Path"/>
- <doc>
- <![CDATA[Normalize paths that explicitly specify the default port.]]>
- </doc>
- </method>
- <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDefaultBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDefaultReplication" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setWorkingDirectory"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dir" type="org.apache.hadoop.fs.Path"/>
- </method>
- <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
- <param name="start" type="long"/>
- <param name="len" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="start" type="long"/>
- <param name="len" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setVerifyChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="verifyChecksum" type="boolean"/>
- </method>
- <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="bufferSize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="bufferSize" type="int"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[This optional operation is not yet supported.]]>
- </doc>
- </method>
- <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="overwrite" type="boolean"/>
- <param name="bufferSize" type="int"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="primitiveCreate" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="flag" type="java.util.EnumSet"/>
- <param name="bufferSize" type="int"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="bytesPerChecksum" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="createNonRecursive" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="flag" type="java.util.EnumSet"/>
- <param name="bufferSize" type="int"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Same as create(), except fails if parent directory doesn't already exist.]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="concat"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="trg" type="org.apache.hadoop.fs.Path"/>
- <param name="psrcs" type="org.apache.hadoop.fs.Path[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[THIS IS DFS only operations, it is not part of FileSystem
- move blocks from srcs to trg
- and delete srcs afterwards
- all blocks should be the same size
- @param trg existing file to append to
- @param psrcs list of files (same block size, same replication)
- @throws IOException]]>
- </doc>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="dst" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="rename"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="dst" type="org.apache.hadoop.fs.Path"/>
- <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}
- This rename operation is guaranteed to be atomic.]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setQuota"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="namespaceQuota" type="long"/>
- <param name="diskspaceQuota" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set a directory's quotas
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long)]]>
- </doc>
- </method>
- <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[List all the entries of a directory
- Note that this operation is not atomic for a large directory.
- The entries of a directory may be fetched from NameNode multiple times.
- It only guarantees that each name occurs once if a directory
- undergoes changes between the calls.]]>
- </doc>
- </method>
- <method name="mkdir" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a directory with given name and permission, only when
- parent directory exists.]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="primitiveMkdir" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getClient" return="org.apache.hadoop.hdfs.DFSClient"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getStatus" return="org.apache.hadoop.fs.FsStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getDiskStatus" return="org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
- instead">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the disk usage of the filesystem, including total capacity,
- used space, and remaining space
- @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
- instead]]>
- </doc>
- </method>
- <method name="getRawCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
- instead">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the total raw capacity of the filesystem, disregarding
- replication.
- @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
- instead]]>
- </doc>
- </method>
- <method name="getRawUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
- instead">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the total raw used space in the filesystem, disregarding
- replication.
- @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
- instead]]>
- </doc>
- </method>
- <method name="getMissingBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with no good replicas left. Normally should be
- zero.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getUnderReplicatedBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with one of more replica missing.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getCorruptBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with at least one replica marked corrupt.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getDataNodeStats" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return statistics for each datanode.]]>
- </doc>
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Enter, leave or get safe mode.
-
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
- FSConstants.SafeModeAction)]]>
- </doc>
- </method>
- <method name="saveNamespace"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Save namespace image.
-
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
- </doc>
- </method>
- <method name="restoreFailedStorage" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="arg" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <doc>
- <![CDATA[enable/disable/check restoreFaileStorage
-
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)]]>
- </doc>
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refreshes the list of hosts and excluded hosts from the configured
- files.]]>
- </doc>
- </method>
- <method name="finalizeUpgrade"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Finalize previously upgraded files system state.
- @throws IOException]]>
- </doc>
- </method>
- <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="metaSave"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="pathname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="reportChecksumFailure" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
- <param name="inPos" type="long"/>
- <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
- <param name="sumsPos" type="long"/>
- <doc>
- <![CDATA[We need to find the blocks that didn't match. Likely only one
- is corrupt but we will report both to the namenode. In the future,
- we can consider figuring out exactly which block is corrupt.]]>
- </doc>
- </method>
- <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the stat information about the file.
- @throws FileNotFoundException if the file does not exist.]]>
- </doc>
- </method>
- <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc }]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc }]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc }]]>
- </doc>
- </method>
- <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="renewer" type="org.apache.hadoop.io.Text"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a valid Delegation Token.
-
- @param renewer Name of the designated renewer for the token
- @return Token<DelegationTokenIdentifier>
- @throws IOException]]>
- </doc>
- </method>
- <method name="renewDelegationToken" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Renew an existing delegation token.
-
- @param token delegation token obtained earlier
- @return the new expiration time
- @throws IOException]]>
- </doc>
- </method>
- <method name="cancelDelegationToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Cancel an existing delegation token.
-
- @param token delegation token
- @throws IOException]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Implementation of the abstract FileSystem for the DFS system.
- This object is the way end-user code interacts with a Hadoop
- DistributedFileSystem.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DistributedFileSystem -->
- <!-- start class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
- <class name="DistributedFileSystem.DiskStatus" extends="org.apache.hadoop.fs.FsStatus"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="Use {@link org.apache.hadoop.fs.FsStatus} instead">
- <constructor name="DistributedFileSystem.DiskStatus" type="org.apache.hadoop.fs.FsStatus"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DistributedFileSystem.DiskStatus" type="long, long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[@deprecated Use {@link org.apache.hadoop.fs.FsStatus} instead]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
- <!-- start class org.apache.hadoop.hdfs.HdfsConfiguration -->
- <class name="HdfsConfiguration" extends="org.apache.hadoop.conf.Configuration"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HdfsConfiguration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="HdfsConfiguration" type="boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="HdfsConfiguration" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[Adds deprecated keys into the configuration.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HdfsConfiguration -->
- <!-- start class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
- <class name="HDFSPolicyProvider" extends="org.apache.hadoop.security.authorize.PolicyProvider"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HDFSPolicyProvider"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getServices" return="org.apache.hadoop.security.authorize.Service[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[{@link PolicyProvider} for HDFS protocols.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
- <!-- start class org.apache.hadoop.hdfs.HftpFileSystem -->
- <class name="HftpFileSystem" extends="org.apache.hadoop.fs.FileSystem"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HftpFileSystem"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getDateFormat" return="java.text.SimpleDateFormat"
- abstract="false" native="false" synchronized="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.net.URI"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="openConnection" return="java.net.HttpURLConnection"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="query" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Open an HTTP connection to the namenode to read file data and metadata.
- @param path The path component of the URL
- @param query The query component of the URL]]>
- </doc>
- </method>
- <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="buffersize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setWorkingDirectory"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- </method>
- <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="bufferSize" type="int"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[This optional operation is not yet supported.]]>
- </doc>
- </method>
- <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="overwrite" type="boolean"/>
- <param name="bufferSize" type="int"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="dst" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="nnAddr" type="java.net.InetSocketAddress"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="ugi" type="org.apache.hadoop.security.UserGroupInformation"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="ran" type="java.util.Random"
- transient="false" volatile="false"
- static="false" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="HFTP_TIMEZONE" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="HFTP_DATE_FORMAT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="df" type="java.lang.ThreadLocal"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[An implementation of a protocol for accessing filesystems over HTTP.
- The following implementation provides a limited, read-only interface
- to a filesystem over HTTP.
- @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
- @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HftpFileSystem -->
- <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem -->
- <class name="HsftpFileSystem" extends="org.apache.hadoop.hdfs.HftpFileSystem"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HsftpFileSystem"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.net.URI"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="openConnection" return="java.net.HttpURLConnection"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="query" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[An implementation of a protocol for accessing filesystems over HTTPS. The
- following implementation provides a limited, read-only interface to a
- filesystem over HTTPS.
-
- @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
- @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem -->
- <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
- <class name="HsftpFileSystem.DummyHostnameVerifier" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="protected"
- deprecated="not deprecated">
- <implements name="javax.net.ssl.HostnameVerifier"/>
- <constructor name="HsftpFileSystem.DummyHostnameVerifier"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="verify" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="hostname" type="java.lang.String"/>
- <param name="session" type="javax.net.ssl.SSLSession"/>
- </method>
- <doc>
- <![CDATA[Dummy hostname verifier that is used to bypass hostname checking]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
- <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem.DummyTrustManager -->
- <class name="HsftpFileSystem.DummyTrustManager" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="protected"
- deprecated="not deprecated">
- <implements name="javax.net.ssl.X509TrustManager"/>
- <constructor name="HsftpFileSystem.DummyTrustManager"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="checkClientTrusted"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="chain" type="java.security.cert.X509Certificate[]"/>
- <param name="authType" type="java.lang.String"/>
- </method>
- <method name="checkServerTrusted"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="chain" type="java.security.cert.X509Certificate[]"/>
- <param name="authType" type="java.lang.String"/>
- </method>
- <method name="getAcceptedIssuers" return="java.security.cert.X509Certificate[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[Dummy trustmanager that is used to trust all server certificates]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem.DummyTrustManager -->
- <doc>
- <![CDATA[<p>A distributed implementation of {@link
- org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
- Google's <a href="http://labs.google.com/papers/gfs.html">GFS</a>.</p>
- <p>The most important difference is that unlike GFS, Hadoop DFS files
- have strictly one writer at any one time. Bytes are always appended
- to the end of the writer's stream. There is no notion of "record appends"
- or "mutations" that are then checked or reordered. Writers simply emit
- a byte stream. That byte stream is guaranteed to be stored in the
- order written.</p>]]>
- </doc>
- </package>
- <package name="org.apache.hadoop.hdfs.protocol">
- <!-- start class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
- <class name="AlreadyBeingCreatedException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="AlreadyBeingCreatedException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The exception that happens when you ask to create a file that already
- is being created, but is not closed yet.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
- <!-- start class org.apache.hadoop.hdfs.protocol.Block -->
- <class name="Block" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <implements name="java.lang.Comparable"/>
- <constructor name="Block"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Block" type="long, long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Block" type="long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Block" type="org.apache.hadoop.hdfs.protocol.Block"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Block" type="java.io.File, long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Find the blockid from the given filename]]>
- </doc>
- </constructor>
- <method name="isBlockFilename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="java.io.File"/>
- </method>
- <method name="filename2id" return="long"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="isMetaFilename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="metaFile" type="java.lang.String"/>
- <doc>
- <![CDATA[Get generation stamp from the name of the metafile name]]>
- </doc>
- </method>
- <method name="getBlockId" return="long"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="metaFile" type="java.lang.String"/>
- <doc>
- <![CDATA[Get the blockId from the name of the metafile name]]>
- </doc>
- </method>
- <method name="set"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blkid" type="long"/>
- <param name="len" type="long"/>
- <param name="genStamp" type="long"/>
- </method>
- <method name="getBlockId" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setBlockId"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="bid" type="long"/>
- </method>
- <method name="getBlockName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getNumBytes" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setNumBytes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="len" type="long"/>
- </method>
- <method name="getGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setGenerationStamp"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="stamp" type="long"/>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="BLOCK_FILE_PREFIX" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="METADATA_EXTENSION" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockFilePattern" type="java.util.regex.Pattern"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="metaFilePattern" type="java.util.regex.Pattern"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[A Block is a Hadoop FS primitive, identified by a
- long.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.Block -->
- <!-- start class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
- <class name="BlockListAsLongs" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Iterable"/>
- <constructor name="BlockListAsLongs" type="java.util.List, java.util.List"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create block report from finalized and under construction lists of blocks.
-
- @param finalized - list of finalized blocks
- @param uc - list of under construction blocks]]>
- </doc>
- </constructor>
- <constructor name="BlockListAsLongs"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="BlockListAsLongs" type="long[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Constructor
- @param iBlockList - BlockListALongs create from this long[] parameter]]>
- </doc>
- </constructor>
- <method name="getBlockListAsLongs" return="long[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="iterator" return="java.util.Iterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns an iterator over blocks in the block report.]]>
- </doc>
- </method>
- <method name="getBlockReportIterator" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns {@link BlockReportIterator}.]]>
- </doc>
- </method>
- <method name="getNumberOfBlocks" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The number of blocks
- @return - the number of blocks]]>
- </doc>
- </method>
- <method name="getBlockId" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- <doc>
- <![CDATA[The block-id of the indexTh block
- @param index - the block whose block-id is desired
- @return the block-id]]>
- </doc>
- </method>
- <method name="getBlockLen" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- <doc>
- <![CDATA[The block-len of the indexTh block
- @param index - the block whose block-len is desired
- @return - the block-len]]>
- </doc>
- </method>
- <method name="getBlockGenStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- <doc>
- <![CDATA[The generation stamp of the indexTh block
- @param index - the block whose block-len is desired
- @return - the generation stamp]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This class provides an interface for accessing list of blocks that
- has been implemented as long[].
- This class is useful for block report. Rather than send block reports
- as a Block[] we can send it as a long[].
- The structure of the array is as follows:
- 0: the length of the finalized replica list;
- 1: the length of the under-construction replica list;
- - followed by finalized replica list where each replica is represented by
- 3 longs: one for the blockId, one for the block length, and one for
- the generation stamp;
- - followed by the invalid replica represented with three -1s;
- - followed by the under-construction replica list where each replica is
- represented by 4 longs: three for the block id, length, generation
- stamp, and the forth for the replica state.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
- <!-- start class org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator -->
- <class name="BlockListAsLongs.BlockReportIterator" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.util.Iterator"/>
- <method name="hasNext" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="next" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="remove"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCurrentReplicaState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the state of the current replica.
- The state corresponds to the replica returned
- by the latest {@link #next()}.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Iterates over blocks in the block report.
- Avoids object allocation on each iteration.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator -->
- <!-- start interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
- <interface name="ClientDatanodeProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="getReplicaVisibleLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the visible length of a replica.]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[6: recoverBlock() removed.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[An client-datanode protocol for block recovery]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
- <!-- start interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
- <interface name="ClientProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="offset" type="long"/>
- <param name="length" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <doc>
- <![CDATA[Get locations of the blocks of the specified file within the specified range.
- DataNode locations for each block are sorted by
- the proximity to the client.
- <p>
- Return {@link LocatedBlocks} which contains
- file length, blocks and their locations.
- DataNode locations for each block are sorted by
- the distance to the client's address.
- <p>
- The client will then have to contact
- one of the indicated DataNodes to obtain the actual data.
-
- @param src file name
- @param offset range start offset
- @param length range length
- @return file length and array of blocks with their locations
- @throws IOException
- @throws UnresolvedLinkException if the path contains a symlink.
- @throws FileNotFoundException if the path does not exist.]]>
- </doc>
- </method>
- <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get server default values for a number of configuration params.
- @return a set of server default configuration values
- @throws IOException]]>
- </doc>
- </method>
- <method name="create"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="flag" type="org.apache.hadoop.io.EnumSetWritable"/>
- <param name="createParent" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="AlreadyBeingCreatedException" type="org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException"/>
- <exception name="NSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.NSQuotaExceededException"/>
- <doc>
- <![CDATA[Create a new file entry in the namespace.
- <p>
- This will create an empty file specified by the source path.
- The path should reflect a full path originated at the root.
- The name-node does not have a notion of "current" directory for a client.
- <p>
- Once created, the file is visible and available for read to other clients.
- Although, other clients cannot {@link #delete(String, boolean)}, re-create or
- {@link #rename(String, String)} it until the file is completed
- or explicitly as a result of lease expiration.
- <p>
- Blocks have a maximum size. Clients that intend to create
- multi-block files must also use {@link #addBlock(String, String, Block, DatanodeInfo[])}.
- @param src path of the file being created.
- @param masked masked permission.
- @param clientName name of the current client.
- @param flag indicates whether the file should be
- overwritten if it already exists or create if it does not exist or append.
- @param createParent create missing parent directory if true
- @param replication block replication factor.
- @param blockSize maximum block size.
-
- @throws AccessControlException if permission to create file is
- denied by the system. As usually on the client side the exception will
- be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
- @throws QuotaExceededException if the file creation violates
- any quota restriction
- @throws IOException if other errors occur.
- @throws UnresolvedLinkException if the path contains a symlink.
- @throws AlreadyBeingCreatedException if the path does not exist.
- @throws NSQuotaExceededException if the namespace quota is exceeded.]]>
- </doc>
- </method>
- <method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Append to the end of the file.
- @param src path of the file being created.
- @param clientName name of the current client.
- @return information about the last partial block if any.
- @throws AccessControlException if permission to append file is
- denied by the system. As usually on the client side the exception will
- be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
- Allows appending to an existing file if the server is
- configured with the parameter dfs.support.append set to true, otherwise
- throws an IOException.
- @throws IOException if other errors occur.
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Set replication for an existing file.
- <p>
- The NameNode sets replication to the new value and returns.
- The actual block replication is not expected to be performed during
- this method call. The blocks will be populated or removed in the
- background as the result of the routine block maintenance procedures.
-
- @param src file name
- @param replication new replication
- @throws IOException
- @return true if successful;
- false if file does not exist or is a directory
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <doc>
- <![CDATA[Set permissions for an existing file/directory.
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Set Owner of a path (i.e. a file or a directory).
- The parameters username and groupname cannot both be null.
- @param src
- @param username If it is null, the original username remains unchanged.
- @param groupname If it is null, the original groupname remains unchanged.
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="abandonBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="src" type="java.lang.String"/>
- <param name="holder" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[The client can give up on a block by calling abandonBlock().
- The client can then either obtain a new block, or complete or
- abandon the file. Any partial writes to the block will be discarded.
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="previous" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="excludedNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="DSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.DSQuotaExceededException"/>
- <doc>
- <![CDATA[A client that wants to write an additional block to the
- indicated filename (which must currently be open for writing)
- should call addBlock().
- addBlock() allocates a new block and datanodes the block data
- should be replicated to.
-
- addBlock() also commits the previous block by reporting
- to the name-node the actual generation stamp and the length
- of the block that the client has transmitted to data-nodes.
- @param src the file being created
- @param clientName the name of the client that adds the block
- @param previous previous block
- @param excludedNodes a list of nodes that should not be
- allocated for the current block
- @return LocatedBlock allocated block information.
- @throws UnresolvedLinkException if the path contains a symlink.
- @throws DSQuotaExceededException if the directory's quota is exceeded.]]>
- </doc>
- </method>
- <method name="complete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="last" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[The client is done writing data to the given filename, and would
- like to complete it.
- The function returns whether the file has been closed successfully.
- If the function returns false, the caller should try again.
-
- close() also commits the last block of the file by reporting
- to the name-node the actual generation stamp and the length
- of the block that the client has transmitted to data-nodes.
- A call to complete() will not return true until all the file's
- blocks have been replicated the minimum number of times. Thus,
- DataNode failures may cause a client to call complete() several
- times before succeeding.
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="reportBadBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client wants to report corrupted blocks (blocks with specified
- locations on datanodes).
- @param blocks Array of located blocks to report]]>
- </doc>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use {@link #rename(String, String, Options.Rename...)} instead.">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Rename an item in the file system namespace.
- @param src existing file or directory name.
- @param dst new name.
- @return true if successful, or false if the old name does not exist
- or if the new name already belongs to the namespace.
- @throws IOException if the new name is invalid.
- @throws UnresolvedLinkException if the path contains a symlink.
- @throws QuotaExceededException if the rename would violate
- any quota restriction
- @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.]]>
- </doc>
- </method>
- <method name="concat"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="trg" type="java.lang.String"/>
- <param name="srcs" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Moves blocks from srcs to trg and delete srcs
-
- @param trg existing file
- @param srcs - list of existing files (same block size, same replication)
- @throws IOException if some arguments are invalid
- @throws UnresolvedLinkException if the path contains a symlink.
- @throws QuotaExceededException if the rename would violate
- any quota restriction]]>
- </doc>
- </method>
- <method name="rename"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Rename src to dst.
- <ul>
- <li>Fails if src is a file and dst is a directory.
- <li>Fails if src is a directory and dst is a file.
- <li>Fails if the parent of dst does not exist or is a file.
- </ul>
- <p>
- Without OVERWRITE option, rename fails if the dst already exists.
- With OVERWRITE option, rename overwrites the dst, if it is a file
- or an empty directory. Rename fails if dst is a non-empty directory.
- <p>
- This implementation of rename is atomic.
- <p>
- @param src existing file or directory name.
- @param dst new name.
- @param options Rename options
- @throws IOException if rename failed
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="use {@link #delete(String, boolean)} istead.">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Delete the given file or directory from the file system.
- <p>
- Any blocks belonging to the deleted files will be garbage-collected.
-
- @param src existing name.
- @return true only if the existing file or directory was actually removed
- from the file system.
- @throws UnresolvedLinkException if the path contains a symlink.
- @deprecated use {@link #delete(String, boolean)} istead.]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Delete the given file or directory from the file system.
- <p>
- same as delete but provides a way to avoid accidentally
- deleting non empty directories programmatically.
- @param src existing name
- @param recursive if true deletes a non empty directory recursively,
- else throws an exception.
- @return true only if the existing file or directory was actually removed
- from the file system.
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="NSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.NSQuotaExceededException"/>
- <doc>
- <![CDATA[Create a directory (or hierarchy of directories) with the given
- name and permission.
- @param src The path of the directory being created
- @param masked The masked permission of the directory being created
- @param createParent create missing parent directory if true
- @return True if the operation success.
- @throws UnresolvedLinkException if the path contains a symlink.
- @throws {@link AccessControlException} if permission to create file is
- denied by the system. As usually on the client side the exception will
- be wraped into {@link org.apache.hadoop.ipc.RemoteException}.
- @throws QuotaExceededException if the operation would violate
- any quota restriction.]]>
- </doc>
- </method>
- <method name="getListing" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="startAfter" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Get a partial listing of the indicated directory
- @param src the directory name
- @param startAfter the name to start listing after encoded in java UTF8
- @return a partial listing starting after startAfter
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="renewLease"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Client programs can cause stateful changes in the NameNode
- that affect other clients. A client may obtain a file and
- neither abandon nor complete it. A client might hold a series
- of locks that prevent other clients from proceeding.
- Clearly, it would be bad if a client held a bunch of locks
- that it never gave up. This can happen easily if the client
- dies unexpectedly.
- <p>
- So, the NameNode will revoke the locks and live file-creates
- for clients that it thinks have died. A client tells the
- NameNode that it is still alive by periodically calling
- renewLease(). If a certain amount of time passes since
- the last call to renewLease(), the NameNode assumes the
- client has died.
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="getStats" return="long[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a set of statistics about the filesystem.
- Right now, only three values are returned.
- <ul>
- <li> [0] contains the total storage capacity of the system, in bytes.</li>
- <li> [1] contains the total used space of the system, in bytes.</li>
- <li> [2] contains the available storage of the system, in bytes.</li>
- <li> [3] contains number of under replicated blocks in the system.</li>
- <li> [4] contains number of blocks with a corrupt replica. </li>
- <li> [5] contains number of blocks without any good replicas left. </li>
- </ul>
- Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
- actual numbers to index into the array.]]>
- </doc>
- </method>
- <method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a report on the system's current datanodes.
- One DatanodeInfo object is returned for each DataNode.
- Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
- otherwise all datanodes if type is ALL.]]>
- </doc>
- </method>
- <method name="getPreferredBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="filename" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Get the block size for the given file.
- @param filename The name of the file
- @return The number of bytes in each block
- @throws IOException
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Enter, leave or get safe mode.
- <p>
- Safe mode is a name node state when it
- <ol><li>does not accept changes to name space (read-only), and</li>
- <li>does not replicate or delete blocks.</li></ol>
-
- <p>
- Safe mode is entered automatically at name node startup.
- Safe mode can also be entered manually using
- {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
- <p>
- At startup the name node accepts data node reports collecting
- information about block locations.
- In order to leave safe mode it needs to collect a configurable
- percentage called threshold of blocks, which satisfy the minimal
- replication condition.
- The minimal replication condition is that each block must have at least
- <tt>dfs.namenode.replication.min</tt> replicas.
- When the threshold is reached the name node extends safe mode
- for a configurable amount of time
- to let the remaining data nodes to check in before it
- will start replicating missing blocks.
- Then the name node leaves safe mode.
- <p>
- If safe mode is turned on manually using
- {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
- then the name node stays in safe mode until it is manually turned off
- using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
- Current state of the name node can be verified using
- {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
- <h4>Configuration parameters:</h4>
- <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
- <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
- <tt>dfs.namenode.replication.min</tt> is the minimal replication parameter.
-
- <h4>Special cases:</h4>
- The name node does not enter safe mode at startup if the threshold is
- set to 0 or if the name space is empty.<br>
- If the threshold is set to 1 then all blocks need to have at least
- minimal replication.<br>
- If the threshold value is greater than 1 then the name node will not be
- able to turn off safe mode automatically.<br>
- Safe mode can always be turned off manually.
-
- @param action <ul> <li>0 leave safe mode;</li>
- <li>1 enter safe mode;</li>
- <li>2 get safe mode state.</li></ul>
- @return <ul><li>0 if the safe mode is OFF or</li>
- <li>1 if the safe mode is ON.</li></ul>
- @throws IOException]]>
- </doc>
- </method>
- <method name="saveNamespace"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Save namespace image.
- <p>
- Saves current namespace into storage directories and reset edits log.
- Requires superuser privilege and safe mode.
-
- @throws AccessControlException if the superuser privilege is violated.
- @throws IOException if image creation failed.]]>
- </doc>
- </method>
- <method name="restoreFailedStorage" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="arg" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <doc>
- <![CDATA[Enable/Disable restore failed storage.
- <p>
- sets flag to enable restore of failed storage replicas
-
- @throws AccessControlException if the superuser privilege is violated.]]>
- </doc>
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Tells the namenode to reread the hosts and exclude files.
- @throws IOException]]>
- </doc>
- </method>
- <method name="finalizeUpgrade"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Finalize previous upgrade.
- Remove file system state saved during the upgrade.
- The upgrade will become irreversible.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Report distributed upgrade progress or force current upgrade to proceed.
-
- @param action {@link FSConstants.UpgradeAction} to perform
- @return upgrade status information or null if no upgrades are in progress
- @throws IOException]]>
- </doc>
- </method>
- <method name="metaSave"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="filename" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Dumps namenode data structures into specified file. If the file
- already exists, then append.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getCorruptFiles" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@return Array of FileStatus objects referring to corrupted files.
- The server could return all or a few of the files that are corrupt.
- @throws AccessControlException
- @throws IOException]]>
- </doc>
- </method>
- <method name="getFileInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Get the file info for a specific file or directory.
- @param src The string representation of the path to the file
- @throws UnresolvedLinkException if the path contains symlinks;
- IOException if permission to access file is denied by the system
- @return object containing information regarding the file
- or null if file not found]]>
- </doc>
- </method>
- <method name="getFileLinkInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Get the file info for a specific file or directory. If the path
- refers to a symlink then the FileStatus of the symlink is returned.
- @param src The string representation of the path to the file
- @throws UnresolvedLinkException if the path contains symlinks;
- IOException if permission to access file is denied by the system
- @return object containing information regarding the file
- or null if file not found]]>
- </doc>
- </method>
- <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Get {@link ContentSummary} rooted at the specified directory.
- @param path The string representation of the path
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="setQuota"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="namespaceQuota" type="long"/>
- <param name="diskspaceQuota" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <doc>
- <![CDATA[Set the quota for a directory.
- @param path The string representation of the path to the directory
- @param namespaceQuota Limit on the number of names in the tree rooted
- at the directory
- @param diskspaceQuota Limit on disk space occupied all the files under
- this directory.
- <br><br>
-
- The quota can have three types of values : (1) 0 or more will set
- the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies
- the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET}
- implies the quota will be reset. Any other value is a runtime error.
- @throws UnresolvedLinkException if the path contains a symlink.
- @throws FileNotFoundException if the path is a file or
- does not exist
- @throws QuotaExceededException if the directory size
- is greater than the given quota]]>
- </doc>
- </method>
- <method name="fsync"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="client" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Write all metadata for this file into persistent storage.
- The file must be currently open for writing.
- @param src The string representation of the path
- @param client The string representation of the client
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Sets the modification and access time of the file to the specified time.
- @param src The string representation of the path
- @param mtime The number of milliseconds since Jan 1, 1970.
- Setting mtime to -1 means that modification time should not be set
- by this call.
- @param atime The number of milliseconds since Jan 1, 1970.
- Setting atime to -1 means that access time should not be set
- by this call.
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="createSymlink"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="java.lang.String"/>
- <param name="link" type="java.lang.String"/>
- <param name="dirPerm" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create a symbolic link to a file or directory.
- @param target The pathname of the destination that the
- link points to.
- @param link The pathname of the link being created.
- @param dirPerm permissions to use when creating parent directories
- @param createParent - if true then missing parent dirs are created
- if false then parent must exist
- @throws IOException
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="getLinkTarget" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Resolve the first symbolic link on the specified path.
- @param path The pathname that needs to be resolved
- @return The pathname after resolving the first symbolic link if any.
- @throws IOException]]>
- </doc>
- </method>
- <method name="updateBlockForPipeline" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a new generation stamp together with an access token for
- a block under construction
-
- This method is called only when a client needs to recover a failed
- pipeline or set up a pipeline for appending to a block.
-
- @param block a block
- @param clientName the name of the client
- @return a located block with a new generation stamp and an access token
- @throws IOException if any error occurs]]>
- </doc>
- </method>
- <method name="updatePipeline"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="clientName" type="java.lang.String"/>
- <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update a pipeline for a block under construction
-
- @param clientName the name of the client
- @param oldBlock the old block
- @param newBlock the new block containing new generation stamp and length
- @param newNodes datanodes in the pipeline
- @throws IOException if any error occurs]]>
- </doc>
- </method>
- <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="renewer" type="org.apache.hadoop.io.Text"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a valid Delegation Token.
-
- @param renewer the designated renewer for the token
- @return Token<DelegationTokenIdentifier>
- @throws IOException]]>
- </doc>
- </method>
- <method name="renewDelegationToken" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Renew an existing delegation token.
-
- @param token delegation token obtained earlier
- @return the new expiration time
- @throws IOException]]>
- </doc>
- </method>
- <method name="cancelDelegationToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Cancel an existing delegation token.
-
- @param token delegation token
- @throws IOException]]>
- </doc>
- </method>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Compared to the previous version the following changes have been introduced:
- (Only the latest change is reflected.
- The log of historical changes can be retrieved from the svn).
- 60: Replace full getListing with iterative getListinng.]]>
- </doc>
- </field>
- <field name="GET_STATS_CAPACITY_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_USED_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_REMAINING_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_UNDER_REPLICATED_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_CORRUPT_BLOCKS_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_MISSING_BLOCKS_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[ClientProtocol is used by user code via
- {@link org.apache.hadoop.hdfs.DistributedFileSystem} class to communicate
- with the NameNode. User code can manipulate the directory namespace,
- as well as open/close file streams, etc.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeID -->
- <class name="DatanodeID" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.WritableComparable"/>
- <constructor name="DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Equivalent to DatanodeID("").]]>
- </doc>
- </constructor>
- <constructor name="DatanodeID" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Equivalent to DatanodeID(nodeName, "", -1, -1).]]>
- </doc>
- </constructor>
- <constructor name="DatanodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeID copy constructor
-
- @param from]]>
- </doc>
- </constructor>
- <constructor name="DatanodeID" type="java.lang.String, java.lang.String, int, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create DatanodeID
- @param nodeName (hostname:portNumber)
- @param storageID data storage ID
- @param infoPort info server port
- @param ipcPort ipc server port]]>
- </doc>
- </constructor>
- <method name="getName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return hostname:portNumber.]]>
- </doc>
- </method>
- <method name="getStorageID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return data storage ID.]]>
- </doc>
- </method>
- <method name="getInfoPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return infoPort (the port at which the HTTP server bound to)]]>
- </doc>
- </method>
- <method name="getIpcPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return ipcPort (the port at which the IPC server bound to)]]>
- </doc>
- </method>
- <method name="setStorageID"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="storageID" type="java.lang.String"/>
- <doc>
- <![CDATA[sets the data storage ID.]]>
- </doc>
- </method>
- <method name="getHost" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return hostname and no :portNumber.]]>
- </doc>
- </method>
- <method name="getPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="to" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="updateRegInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <doc>
- <![CDATA[Update fields when a new registration request comes in.
- Note that this does not update storageID.]]>
- </doc>
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="that" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <doc>
- <![CDATA[Comparable.
- Basis of compare is the String name (host:portNumber) only.
- @param that
- @return as specified by Comparable.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="EMPTY_ARRAY" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="name" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="storageID" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="infoPort" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="ipcPort" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DatanodeID is composed of the data node
- name (hostname:portNumber) and the data storage ID,
- which it currently represents.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeID -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
- <class name="DatanodeInfo" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.net.Node"/>
- <constructor name="DatanodeInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="getCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The raw capacity.]]>
- </doc>
- </method>
- <method name="getDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The used space by the data node.]]>
- </doc>
- </method>
- <method name="getNonDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The used space by the data node.]]>
- </doc>
- </method>
- <method name="getDfsUsedPercent" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The used space by the data node as percentage of present capacity]]>
- </doc>
- </method>
- <method name="getRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The raw free space.]]>
- </doc>
- </method>
- <method name="getRemainingPercent" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The remaining space as percentage of configured capacity.]]>
- </doc>
- </method>
- <method name="getLastUpdate" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The time when this information was accurate.]]>
- </doc>
- </method>
- <method name="getXceiverCount" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[number of active connections]]>
- </doc>
- </method>
- <method name="setCapacity"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="capacity" type="long"/>
- <doc>
- <![CDATA[Sets raw capacity.]]>
- </doc>
- </method>
- <method name="setRemaining"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="remaining" type="long"/>
- <doc>
- <![CDATA[Sets raw free space.]]>
- </doc>
- </method>
- <method name="setLastUpdate"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="lastUpdate" type="long"/>
- <doc>
- <![CDATA[Sets time when this information was accurate.]]>
- </doc>
- </method>
- <method name="setXceiverCount"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="xceiverCount" type="int"/>
- <doc>
- <![CDATA[Sets number of active connections]]>
- </doc>
- </method>
- <method name="getNetworkLocation" return="java.lang.String"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[rack name]]>
- </doc>
- </method>
- <method name="setNetworkLocation"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="location" type="java.lang.String"/>
- <doc>
- <![CDATA[Sets the rack name]]>
- </doc>
- </method>
- <method name="getHostName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setHostName"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="host" type="java.lang.String"/>
- </method>
- <method name="getDatanodeReport" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[A formatted string for reporting the status of the DataNode.]]>
- </doc>
- </method>
- <method name="dumpDatanode" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[A formatted string for printing the status of the DataNode.]]>
- </doc>
- </method>
- <method name="startDecommission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Start decommissioning a node.
- old state.]]>
- </doc>
- </method>
- <method name="stopDecommission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Stop decommissioning a node.
- old state.]]>
- </doc>
- </method>
- <method name="isDecommissionInProgress" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns true if the node is in the process of being decommissioned]]>
- </doc>
- </method>
- <method name="isDecommissioned" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns true if the node has been decommissioned.]]>
- </doc>
- </method>
- <method name="setDecommissioned"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Sets the admin state to indicate that decommission is complete.]]>
- </doc>
- </method>
- <method name="setAdminState"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="newState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"/>
- <doc>
- <![CDATA[Sets the admin state of this node.]]>
- </doc>
- </method>
- <method name="getParent" return="org.apache.hadoop.net.Node"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return this node's parent]]>
- </doc>
- </method>
- <method name="setParent"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="parent" type="org.apache.hadoop.net.Node"/>
- </method>
- <method name="getLevel" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return this node's level in the tree.
- E.g. the root of a tree returns 0 and its children return 1]]>
- </doc>
- </method>
- <method name="setLevel"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="level" type="int"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="read" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read a DatanodeInfo]]>
- </doc>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="obj" type="java.lang.Object"/>
- </method>
- <field name="capacity" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="dfsUsed" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="remaining" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="lastUpdate" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="xceiverCount" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="location" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="hostName" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[HostName as supplied by the datanode during registration as its
- name. Namenode uses datanode IP address as the name.]]>
- </doc>
- </field>
- <field name="adminState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DatanodeInfo represents the status of a DataNode.
- This object is used for communication in the
- Datanode Protocol and the Client Protocol.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
- <class name="DatanodeInfo.AdminStates" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="NORMAL" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DECOMMISSION_INPROGRESS" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DECOMMISSIONED" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
- <!-- start interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
- <interface name="DataTransferProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <field name="DATA_TRANSFER_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Version for data transfers between clients and datanodes
- This should change when serialization of DatanodeInfo, not just
- when protocol changes. It is not very obvious.]]>
- </doc>
- </field>
- <field name="OP_WRITE_BLOCK" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Op.WRITE_BLOCK instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Op.WRITE_BLOCK instead.]]>
- </doc>
- </field>
- <field name="OP_READ_BLOCK" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Op.READ_BLOCK instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Op.READ_BLOCK instead.]]>
- </doc>
- </field>
- <field name="OP_READ_METADATA" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="As of version 15, OP_READ_METADATA is no longer supported.">
- <doc>
- <![CDATA[@deprecated As of version 15, OP_READ_METADATA is no longer supported.]]>
- </doc>
- </field>
- <field name="OP_REPLACE_BLOCK" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Op.REPLACE_BLOCK instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Op.REPLACE_BLOCK instead.]]>
- </doc>
- </field>
- <field name="OP_COPY_BLOCK" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Op.COPY_BLOCK instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Op.COPY_BLOCK instead.]]>
- </doc>
- </field>
- <field name="OP_BLOCK_CHECKSUM" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Op.BLOCK_CHECKSUM instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Op.BLOCK_CHECKSUM instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_SUCCESS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.SUCCESS instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.SUCCESS instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_ERROR" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.ERROR instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_ERROR_CHECKSUM" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.ERROR_CHECKSUM instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_CHECKSUM instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_ERROR_INVALID" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.ERROR_INVALID instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_INVALID instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_ERROR_EXISTS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.ERROR_EXISTS instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_EXISTS instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_ERROR_ACCESS_TOKEN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.ERROR_ACCESS_TOKEN instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_ACCESS_TOKEN instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_CHECKSUM_OK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.CHECKSUM_OK instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.CHECKSUM_OK instead.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[Transfer data to/from datanode using a streaming protocol.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage -->
- <class name="DataTransferProtocol.BlockConstructionStage" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getRecoveryStage" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[get the recovery stage of this stage]]>
- </doc>
- </method>
- <field name="PIPELINE_SETUP_APPEND" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The enumerates are always listed as regular stage followed by the
- recovery stage.
- Changing this order will make getRecoveryStage not working.]]>
- </doc>
- </field>
- <field name="PIPELINE_SETUP_APPEND_RECOVERY" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DATA_STREAMING" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="PIPELINE_SETUP_STREAMING_RECOVERY" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="PIPELINE_CLOSE" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="PIPELINE_CLOSE_RECOVERY" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="PIPELINE_SETUP_CREATE" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op -->
- <class name="DataTransferProtocol.Op" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="read" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read from in]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write to out]]>
- </doc>
- </method>
- <field name="WRITE_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="READ_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="READ_METADATA" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="REPLACE_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COPY_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BLOCK_CHECKSUM" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="code" type="byte"
- transient="false" volatile="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The code for this operation.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[Operation]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck -->
- <class name="DataTransferProtocol.PipelineAck" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="DataTransferProtocol.PipelineAck"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[default constructor]]>
- </doc>
- </constructor>
- <constructor name="DataTransferProtocol.PipelineAck" type="long, org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Constructor
- @param seqno sequence number
- @param replies an array of replies]]>
- </doc>
- </constructor>
- <method name="getSeqno" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the sequence number
- @return the sequence number]]>
- </doc>
- </method>
- <method name="getNumOfReplies" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the number of replies
- @return the number of replies]]>
- </doc>
- </method>
- <method name="getReply" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="i" type="int"/>
- <doc>
- <![CDATA[get the ith reply
- @return the the ith reply]]>
- </doc>
- </method>
- <method name="isSuccess" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Check if this ack contains error status
- @return true if all statuses are SUCCESS]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Writable interface]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="UNKOWN_SEQNO" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[reply]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Receiver -->
- <class name="DataTransferProtocol.Receiver" extends="java.lang.Object"
- abstract="true"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DataTransferProtocol.Receiver"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="readOp" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="protected"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read an Op. It also checks protocol version.]]>
- </doc>
- </method>
- <method name="processOp"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="protected"
- deprecated="not deprecated">
- <param name="op" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"/>
- <param name="in" type="java.io.DataInputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Process op by the corresponding method.]]>
- </doc>
- </method>
- <method name="opReadBlock"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <param name="blockId" type="long"/>
- <param name="blockGs" type="long"/>
- <param name="offset" type="long"/>
- <param name="length" type="long"/>
- <param name="client" type="java.lang.String"/>
- <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Abstract OP_READ_BLOCK method.
- Read a block.]]>
- </doc>
- </method>
- <method name="opWriteBlock"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <param name="blockId" type="long"/>
- <param name="blockGs" type="long"/>
- <param name="pipelineSize" type="int"/>
- <param name="stage" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"/>
- <param name="newGs" type="long"/>
- <param name="minBytesRcvd" type="long"/>
- <param name="maxBytesRcvd" type="long"/>
- <param name="client" type="java.lang.String"/>
- <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
- <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Abstract OP_WRITE_BLOCK method.
- Write a block.]]>
- </doc>
- </method>
- <method name="opReplaceBlock"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <param name="blockId" type="long"/>
- <param name="blockGs" type="long"/>
- <param name="sourceId" type="java.lang.String"/>
- <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Abstract OP_REPLACE_BLOCK method.
- It is used for balancing purpose; send to a destination]]>
- </doc>
- </method>
- <method name="opCopyBlock"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <param name="blockId" type="long"/>
- <param name="blockGs" type="long"/>
- <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Abstract OP_COPY_BLOCK method.
- It is used for balancing purpose; send to a proxy source.]]>
- </doc>
- </method>
- <method name="opBlockChecksum"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <param name="blockId" type="long"/>
- <param name="blockGs" type="long"/>
- <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Abstract OP_BLOCK_CHECKSUM method.
- Get the checksum of a block]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Receiver]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Receiver -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Sender -->
- <class name="DataTransferProtocol.Sender" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DataTransferProtocol.Sender"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="op"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <param name="op" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Initialize a operation.]]>
- </doc>
- </method>
- <method name="opReadBlock"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <param name="blockId" type="long"/>
- <param name="blockGs" type="long"/>
- <param name="blockOffset" type="long"/>
- <param name="blockLen" type="long"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="accessToken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Send OP_READ_BLOCK]]>
- </doc>
- </method>
- <method name="opWriteBlock"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <param name="blockId" type="long"/>
- <param name="blockGs" type="long"/>
- <param name="pipelineSize" type="int"/>
- <param name="stage" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"/>
- <param name="newGs" type="long"/>
- <param name="minBytesRcvd" type="long"/>
- <param name="maxBytesRcvd" type="long"/>
- <param name="client" type="java.lang.String"/>
- <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
- <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Send OP_WRITE_BLOCK]]>
- </doc>
- </method>
- <method name="opReplaceBlock"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <param name="blockId" type="long"/>
- <param name="blockGs" type="long"/>
- <param name="storageId" type="java.lang.String"/>
- <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Send OP_REPLACE_BLOCK]]>
- </doc>
- </method>
- <method name="opCopyBlock"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <param name="blockId" type="long"/>
- <param name="blockGs" type="long"/>
- <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Send OP_COPY_BLOCK]]>
- </doc>
- </method>
- <method name="opBlockChecksum"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <param name="blockId" type="long"/>
- <param name="blockGs" type="long"/>
- <param name="accesstoken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Send OP_BLOCK_CHECKSUM]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Sender]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Sender -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status -->
- <class name="DataTransferProtocol.Status" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="read" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read from in]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write to out]]>
- </doc>
- </method>
- <method name="writeOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.OutputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write to out]]>
- </doc>
- </method>
- <field name="SUCCESS" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ERROR" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ERROR_CHECKSUM" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ERROR_INVALID" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ERROR_EXISTS" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ERROR_ACCESS_TOKEN" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="CHECKSUM_OK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Status]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DirectoryListing -->
- <class name="DirectoryListing" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="DirectoryListing"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[default constructor]]>
- </doc>
- </constructor>
- <constructor name="DirectoryListing" type="org.apache.hadoop.hdfs.protocol.HdfsFileStatus[], int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[constructor
- @param partialListing a partial listing of a directory
- @param remainingEntries number of entries that are left to be listed]]>
- </doc>
- </constructor>
- <method name="getPartialListing" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the partial listing of file status
- @return the partial listing of file status]]>
- </doc>
- </method>
- <method name="getRemainingEntries" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the number of remaining entries that are left to be listed
- @return the number of remaining entries that are left to be listed]]>
- </doc>
- </method>
- <method name="hasMore" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Check if there are more entries that are left to be listed
- @return true if there are more entries that are left to be listed;
- return false otherwise.]]>
- </doc>
- </method>
- <method name="getLastName" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the last name in this list
- @return the last name in the list if it is not empty; otherwise return null]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[This class defines a partial listing of a directory to support
- iterative directory listing.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DirectoryListing -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DSQuotaExceededException -->
- <class name="DSQuotaExceededException" extends="org.apache.hadoop.hdfs.protocol.QuotaExceededException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DSQuotaExceededException"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DSQuotaExceededException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DSQuotaExceededException" type="long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getMessage" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="serialVersionUID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DSQuotaExceededException -->
- <!-- start interface org.apache.hadoop.hdfs.protocol.FSConstants -->
- <interface name="FSConstants" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <field name="MIN_BLOCKS_FOR_WRITE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BLOCK_INVALIDATE_CHUNK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="QUOTA_DONT_SET" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="QUOTA_RESET" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="HEARTBEAT_INTERVAL" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BLOCKREPORT_INTERVAL" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BLOCKREPORT_INITIAL_DELAY" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LEASE_SOFTLIMIT_PERIOD" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LEASE_HARDLIMIT_PERIOD" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LEASE_RECOVER_PERIOD" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="MAX_PATH_LENGTH" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="MAX_PATH_DEPTH" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BUFFER_SIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SMALL_BUFFER_SIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_BLOCK_SIZE" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_BYTES_PER_CHECKSUM" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_WRITE_PACKET_SIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_REPLICATION_FACTOR" type="short"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_FILE_BUFFER_SIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_DATA_SOCKET_SIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SIZE_OF_INTEGER" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="HDFS_URI_SCHEME" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[URI Scheme for hdfs://namenode/ URIs.]]>
- </doc>
- </field>
- <field name="LAYOUT_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Some handy constants]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.protocol.FSConstants -->
- <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
- <class name="FSConstants.DatanodeReportType" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="ALL" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LIVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEAD" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
- <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
- <class name="FSConstants.SafeModeAction" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="SAFEMODE_LEAVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SAFEMODE_ENTER" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SAFEMODE_GET" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
- <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
- <class name="FSConstants.UpgradeAction" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="GET_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DETAILED_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FORCE_PROCEED" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Distributed upgrade actions:
-
- 1. Get upgrade status.
- 2. Get detailed upgrade status.
- 3. Proceed with the upgrade if it is stuck, no matter what the status is.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
- <!-- start class org.apache.hadoop.hdfs.protocol.HdfsFileStatus -->
- <class name="HdfsFileStatus" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="HdfsFileStatus"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[default constructor]]>
- </doc>
- </constructor>
- <constructor name="HdfsFileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, byte[], byte[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Constructor
- @param length the number of bytes the file has
- @param isdir if the path is a directory
- @param block_replication the replication factor
- @param blocksize the block size
- @param modification_time modification time
- @param access_time access time
- @param permission permission
- @param owner the owner of the path
- @param group the group of the path
- @param path the local name in java UTF8 encoding the same as that in-memory]]>
- </doc>
- </constructor>
- <method name="getLen" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the length of this file, in bytes.
- @return the length of this file, in bytes.]]>
- </doc>
- </method>
- <method name="isDir" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Is this a directory?
- @return true if this is a directory]]>
- </doc>
- </method>
- <method name="isSymlink" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Is this a symbolic link?
- @return true if this is a symbolic link]]>
- </doc>
- </method>
- <method name="getBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the block size of the file.
- @return the number of bytes]]>
- </doc>
- </method>
- <method name="getReplication" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the replication factor of a file.
- @return the replication factor of a file.]]>
- </doc>
- </method>
- <method name="getModificationTime" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the modification time of the file.
- @return the modification time of file in milliseconds since January 1, 1970 UTC.]]>
- </doc>
- </method>
- <method name="getAccessTime" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the access time of the file.
- @return the access time of file in milliseconds since January 1, 1970 UTC.]]>
- </doc>
- </method>
- <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get FsPermission associated with the file.
- @return permssion]]>
- </doc>
- </method>
- <method name="getOwner" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the owner of the file.
- @return owner of the file]]>
- </doc>
- </method>
- <method name="getGroup" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the group associated with the file.
- @return group for the file.]]>
- </doc>
- </method>
- <method name="isEmptyLocalName" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Check if the local name is empty
- @return true if the name is empty]]>
- </doc>
- </method>
- <method name="getLocalName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the string representation of the local name
- @return the local name in string]]>
- </doc>
- </method>
- <method name="getLocalNameInBytes" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the Java UTF8 representation of the local name
- @return the local name in java UTF8]]>
- </doc>
- </method>
- <method name="getFullName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <param name="parent" type="java.lang.String"/>
- <doc>
- <![CDATA[Get the string representation of the full path name
- @param parent the parent path
- @return the full path in string]]>
- </doc>
- </method>
- <method name="getFullPath" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <param name="parent" type="org.apache.hadoop.fs.Path"/>
- <doc>
- <![CDATA[Get the full path
- @param parent the parent path
- @return the full path]]>
- </doc>
- </method>
- <method name="getSymlink" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the string representation of the symlink.
- @return the symlink as a string.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="EMPTY_NAME" type="byte[]"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Interface that represents the over the wire information for a file.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.HdfsFileStatus -->
- <!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
- <class name="LocatedBlock" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="LocatedBlock"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getAccessToken" return="org.apache.hadoop.hdfs.security.BlockAccessToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setAccessToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- </method>
- <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getLocations" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getStartOffset" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isCorrupt" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="read" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read LocatedBlock from in.]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[A LocatedBlock is a pair of Block, DatanodeInfo[]
- objects. It tells where to find a Block.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
- <!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
- <class name="LocatedBlocks" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="LocatedBlocks" type="long, boolean, java.util.List, org.apache.hadoop.hdfs.protocol.LocatedBlock, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[public Constructor]]>
- </doc>
- </constructor>
- <method name="getLocatedBlocks" return="java.util.List"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get located blocks.]]>
- </doc>
- </method>
- <method name="getLastLocatedBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the last located block.]]>
- </doc>
- </method>
- <method name="isLastBlockComplete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Is the last block completed?]]>
- </doc>
- </method>
- <method name="get" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- <doc>
- <![CDATA[Get located block.]]>
- </doc>
- </method>
- <method name="locatedBlockCount" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get number of located blocks.]]>
- </doc>
- </method>
- <method name="getFileLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isUnderConstruction" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return ture if file was under construction when
- this LocatedBlocks was constructed, false otherwise.]]>
- </doc>
- </method>
- <method name="findBlock" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="offset" type="long"/>
- <doc>
- <![CDATA[Find block containing specified offset.
-
- @return block if found, or null otherwise.]]>
- </doc>
- </method>
- <method name="insertRange"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blockIdx" type="int"/>
- <param name="newBlocks" type="java.util.List"/>
- </method>
- <method name="getInsertIndex" return="int"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="binSearchResult" type="int"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Collection of blocks with their locations and the file length.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
- <!-- start class org.apache.hadoop.hdfs.protocol.NSQuotaExceededException -->
- <class name="NSQuotaExceededException" extends="org.apache.hadoop.hdfs.protocol.QuotaExceededException"
- abstract="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <constructor name="NSQuotaExceededException"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="NSQuotaExceededException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="NSQuotaExceededException" type="long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getMessage" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="serialVersionUID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.NSQuotaExceededException -->
- <!-- start class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
- <class name="QuotaExceededException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="QuotaExceededException"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <constructor name="QuotaExceededException" type="java.lang.String"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <constructor name="QuotaExceededException" type="long, long"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="setPathName"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- </method>
- <method name="getMessage" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="serialVersionUID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="pathName" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="quota" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="count" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This exception is thrown when modification to HDFS results in violation
- of a directory quota. A directory quota might be namespace quota (limit
- on number of files and directories) or a diskspace quota (limit on space
- taken by all the file under the directory tree). <br> <br>
-
- The message for the exception specifies the directory where the quota
- was violated and actual quotas. Specific message is generated in the
- corresponding Exception class:
- DSQuotaExceededException or
- NSQuotaExceededException]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
- <!-- start class org.apache.hadoop.hdfs.protocol.RecoveryInProgressException -->
- <class name="RecoveryInProgressException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="RecoveryInProgressException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[Exception indicating that a replica is already being recovery.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.RecoveryInProgressException -->
- <!-- start class org.apache.hadoop.hdfs.protocol.UnregisteredNodeException -->
- <class name="UnregisteredNodeException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UnregisteredNodeException" type="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="UnregisteredNodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID, org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The exception is thrown if a different data-node claims the same
- storage id as the existing one.
-
- @param nodeID unregistered data-node
- @param storedNode data-node stored in the system with this storage id]]>
- </doc>
- </constructor>
- <doc>
- <![CDATA[This exception is thrown when a node that has not previously
- registered is trying to access the name node.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.UnregisteredNodeException -->
- <!-- start class org.apache.hadoop.hdfs.protocol.UnresolvedPathException -->
- <class name="UnresolvedPathException" extends="org.apache.hadoop.fs.UnresolvedLinkException"
- abstract="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <constructor name="UnresolvedPathException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Used by RemoteException to instantiate an UnresolvedPathException.]]>
- </doc>
- </constructor>
- <constructor name="UnresolvedPathException" type="java.lang.String, java.lang.String, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getUnresolvedPath" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getResolvedPath" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getMessage" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[Thrown when a symbolic link is encountered in a path.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.UnresolvedPathException -->
- </package>
- <package name="org.apache.hadoop.hdfs.security">
- <!-- start class org.apache.hadoop.hdfs.security.AccessTokenHandler -->
- <class name="AccessTokenHandler" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="AccessTokenHandler" type="boolean, long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Constructor
-
- @param isMaster
- @param keyUpdateInterval
- @param tokenLifetime
- @throws IOException]]>
- </doc>
- </constructor>
- <method name="exportKeys" return="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Export access keys, only to be used in master mode]]>
- </doc>
- </method>
- <method name="setKeys"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="exportedKeys" type="org.apache.hadoop.hdfs.security.ExportedAccessKeys"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set access keys, only to be used in slave mode]]>
- </doc>
- </method>
- <method name="updateKeys"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update access keys, only to be used in master mode]]>
- </doc>
- </method>
- <method name="generateToken" return="org.apache.hadoop.hdfs.security.BlockAccessToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blockID" type="long"/>
- <param name="modes" type="java.util.EnumSet"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Generate an access token for current user]]>
- </doc>
- </method>
- <method name="generateToken" return="org.apache.hadoop.hdfs.security.BlockAccessToken"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="userID" type="java.lang.String"/>
- <param name="blockID" type="long"/>
- <param name="modes" type="java.util.EnumSet"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Generate an access token for a specified user]]>
- </doc>
- </method>
- <method name="checkAccess" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <param name="userID" type="java.lang.String"/>
- <param name="blockID" type="long"/>
- <param name="mode" type="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Check if access should be allowed. userID is not checked if null]]>
- </doc>
- </method>
- <doc>
- <![CDATA[AccessTokenHandler can be instantiated in 2 modes, master mode and slave
- mode. Master can generate new access keys and export access keys to slaves,
- while slaves can only import and use access keys received from master. Both
- master and slave can generate and verify access tokens. Typically, master
- mode is used by NN and slave mode is used by DN.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.AccessTokenHandler -->
- <!-- start class org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode -->
- <class name="AccessTokenHandler.AccessMode" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="READ" type="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="WRITE" type="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COPY" type="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="REPLACE" type="org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.AccessTokenHandler.AccessMode -->
- <!-- start class org.apache.hadoop.hdfs.security.BlockAccessKey -->
- <class name="BlockAccessKey" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="BlockAccessKey"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="BlockAccessKey" type="long, org.apache.hadoop.io.Text, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getKeyID" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getKey" return="org.apache.hadoop.io.Text"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getExpiryDate" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getMac" return="javax.crypto.Mac"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setMac"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="mac" type="javax.crypto.Mac"/>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="obj" type="java.lang.Object"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Key used for generating and verifying access tokens]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.BlockAccessKey -->
- <!-- start class org.apache.hadoop.hdfs.security.BlockAccessToken -->
- <class name="BlockAccessToken" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="BlockAccessToken"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="BlockAccessToken" type="org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getTokenID" return="org.apache.hadoop.io.Text"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getTokenAuthenticator" return="org.apache.hadoop.io.Text"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="obj" type="java.lang.Object"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="DUMMY_TOKEN" type="org.apache.hadoop.hdfs.security.BlockAccessToken"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.BlockAccessToken -->
- <!-- start class org.apache.hadoop.hdfs.security.ExportedAccessKeys -->
- <class name="ExportedAccessKeys" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="ExportedAccessKeys"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="isAccessTokenEnabled" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getKeyUpdateInterval" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getTokenLifetime" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCurrentKey" return="org.apache.hadoop.hdfs.security.BlockAccessKey"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getAllKeys" return="org.apache.hadoop.hdfs.security.BlockAccessKey[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="obj" type="java.lang.Object"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="DUMMY_KEYS" type="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Object for passing access keys]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.ExportedAccessKeys -->
- <!-- start class org.apache.hadoop.hdfs.security.InvalidAccessTokenException -->
- <class name="InvalidAccessTokenException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="InvalidAccessTokenException"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="InvalidAccessTokenException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[Access token verification failed.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.InvalidAccessTokenException -->
- </package>
- <package name="org.apache.hadoop.hdfs.security.token.delegation">
- <!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier -->
- <class name="DelegationTokenIdentifier" extends="org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DelegationTokenIdentifier"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create an empty delegation token identifier for reading into.]]>
- </doc>
- </constructor>
- <constructor name="DelegationTokenIdentifier" type="org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create a new delegation token identifier
- @param owner the effective username of the token owner
- @param renewer the username of the renewer
- @param realUser the real username of the token owner]]>
- </doc>
- </constructor>
- <method name="getKind" return="org.apache.hadoop.io.Text"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="HDFS_DELEGATION_KIND" type="org.apache.hadoop.io.Text"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[A delegation token identifier that is specific to HDFS.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier -->
- <!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager -->
- <class name="DelegationTokenSecretManager" extends="org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DelegationTokenSecretManager" type="long, long, long, long, org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create a secret manager
- @param delegationKeyUpdateInterval the number of seconds for rolling new
- secret keys.
- @param delegationTokenMaxLifetime the maximum lifetime of the delegation
- tokens
- @param delegationTokenRenewInterval how often the tokens must be renewed
- @param delegationTokenRemoverScanInterval how often the tokens are scanned
- for expired tokens]]>
- </doc>
- </constructor>
- <method name="createIdentifier" return="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getTokenExpiryTime" return="long"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dtId" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns expiry time of a token given its identifier.
-
- @param dtId DelegationTokenIdentifier of a token
- @return Expiry time of the token
- @throws IOException]]>
- </doc>
- </method>
- <method name="loadSecretManagerState"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Load SecretManager state from fsimage.
-
- @param in input stream to read fsimage
- @throws IOException]]>
- </doc>
- </method>
- <method name="saveSecretManagerState"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Store the current state of the SecretManager for persistence
-
- @param out Output stream for writing into fsimage.
- @throws IOException]]>
- </doc>
- </method>
- <method name="addPersistedDelegationToken"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="identifier" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
- <param name="expiryTime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[This method is intended to be used only while reading edit logs.
-
- @param identifier DelegationTokenIdentifier read from the edit logs or
- fsimage
-
- @param expiryTime token expiry time
- @throws IOException]]>
- </doc>
- </method>
- <method name="updatePersistedMasterKey"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="key" type="org.apache.hadoop.security.token.delegation.DelegationKey"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Add a MasterKey to the list of keys.
-
- @param key DelegationKey
- @throws IOException]]>
- </doc>
- </method>
- <method name="updatePersistedTokenRenewal"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="identifier" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
- <param name="expiryTime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update the token cache with renewal record in edit logs.
-
- @param identifier DelegationTokenIdentifier of the renewed token
- @param expiryTime
- @throws IOException]]>
- </doc>
- </method>
- <method name="updatePersistedTokenCancellation"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="identifier" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update the token cache with the cancel record in edit logs
-
- @param identifier DelegationTokenIdentifier of the canceled token
- @throws IOException]]>
- </doc>
- </method>
- <method name="getNumberOfKeys" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the number of delegation keys currently stored.
- @return number of delegation keys]]>
- </doc>
- </method>
- <method name="logUpdateMasterKey"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="key" type="org.apache.hadoop.security.token.delegation.DelegationKey"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Call namesystem to update editlogs for new master key.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[A HDFS specific delegation token secret manager.
- The secret manager is responsible for generating and accepting the password
- for each token.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager -->
- <!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector -->
- <class name="DelegationTokenSelector" extends="org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DelegationTokenSelector"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[A delegation token that is specialized for HDFS]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.balancer">
- <!-- start class org.apache.hadoop.hdfs.server.balancer.Balancer -->
- <class name="Balancer" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.util.Tool"/>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <doc>
- <![CDATA[Run a balancer
- @param args]]>
- </doc>
- </method>
- <method name="run" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[main method of Balancer
- @param args arguments to a Balancer
- @exception any exception occurs during datanode balancing]]>
- </doc>
- </method>
- <method name="getConf" return="org.apache.hadoop.conf.Configuration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[return this balancer's configuration]]>
- </doc>
- </method>
- <method name="setConf"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <doc>
- <![CDATA[set this balancer's configuration]]>
- </doc>
- </method>
- <field name="MAX_NUM_CONCURRENT_MOVES" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The maximum number of concurrent blocks moves for
- balancing purpose at a datanode]]>
- </doc>
- </field>
- <field name="SUCCESS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ALREADY_RUNNING" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NO_MOVE_BLOCK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NO_MOVE_PROGRESS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="IO_EXCEPTION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ILLEGAL_ARGS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[<p>The balancer is a tool that balances disk space usage on an HDFS cluster
- when some datanodes become full or when new empty nodes join the cluster.
- The tool is deployed as an application program that can be run by the
- cluster administrator on a live HDFS cluster while applications
- adding and deleting files.
-
- <p>SYNOPSIS
- <pre>
- To start:
- bin/start-balancer.sh [-threshold <threshold>]
- Example: bin/ start-balancer.sh
- start the balancer with a default threshold of 10%
- bin/ start-balancer.sh -threshold 5
- start the balancer with a threshold of 5%
- To stop:
- bin/ stop-balancer.sh
- </pre>
-
- <p>DESCRIPTION
- <p>The threshold parameter is a fraction in the range of (0%, 100%) with a
- default value of 10%. The threshold sets a target for whether the cluster
- is balanced. A cluster is balanced if for each datanode, the utilization
- of the node (ratio of used space at the node to total capacity of the node)
- differs from the utilization of the (ratio of used space in the cluster
- to total capacity of the cluster) by no more than the threshold value.
- The smaller the threshold, the more balanced a cluster will become.
- It takes more time to run the balancer for small threshold values.
- Also for a very small threshold the cluster may not be able to reach the
- balanced state when applications write and delete files concurrently.
-
- <p>The tool moves blocks from highly utilized datanodes to poorly
- utilized datanodes iteratively. In each iteration a datanode moves or
- receives no more than the lesser of 10G bytes or the threshold fraction
- of its capacity. Each iteration runs no more than 20 minutes.
- At the end of each iteration, the balancer obtains updated datanodes
- information from the namenode.
-
- <p>A system property that limits the balancer's use of bandwidth is
- defined in the default configuration file:
- <pre>
- <property>
- <name>dfs.balance.bandwidthPerSec</name>
- <value>1048576</value>
- <description> Specifies the maximum bandwidth that each datanode
- can utilize for the balancing purpose in term of the number of bytes
- per second. </description>
- </property>
- </pre>
-
- <p>This property determines the maximum speed at which a block will be
- moved from one datanode to another. The default value is 1MB/s. The higher
- the bandwidth, the faster a cluster can reach the balanced state,
- but with greater competition with application processes. If an
- administrator changes the value of this property in the configuration
- file, the change is observed when HDFS is next restarted.
-
- <p>MONITERING BALANCER PROGRESS
- <p>After the balancer is started, an output file name where the balancer
- progress will be recorded is printed on the screen. The administrator
- can monitor the running of the balancer by reading the output file.
- The output shows the balancer's status iteration by iteration. In each
- iteration it prints the starting time, the iteration number, the total
- number of bytes that have been moved in the previous iterations,
- the total number of bytes that are left to move in order for the cluster
- to be balanced, and the number of bytes that are being moved in this
- iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left
- To Move" is decreasing.
-
- <p>Running multiple instances of the balancer in an HDFS cluster is
- prohibited by the tool.
-
- <p>The balancer automatically exits when any of the following five
- conditions is satisfied:
- <ol>
- <li>The cluster is balanced;
- <li>No block can be moved;
- <li>No block has been moved for five consecutive iterations;
- <li>An IOException occurs while communicating with the namenode;
- <li>Another balancer is running.
- </ol>
-
- <p>Upon exit, a balancer returns an exit code and prints one of the
- following messages to the output file in corresponding to the above exit
- reasons:
- <ol>
- <li>The cluster is balanced. Exiting
- <li>No block can be moved. Exiting...
- <li>No block has been moved for 3 iterations. Exiting...
- <li>Received an IO exception: failure reason. Exiting...
- <li>Another balancer is running. Exiting...
- </ol>
-
- <p>The administrator can interrupt the execution of the balancer at any
- time by running the command "stop-balancer.sh" on the machine where the
- balancer is running.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.balancer.Balancer -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.common">
- <!-- start class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
- <class name="GenerationStamp" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Comparable"/>
- <constructor name="GenerationStamp"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create a new instance, initialized to FIRST_VALID_STAMP.]]>
- </doc>
- </constructor>
- <method name="getStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the current generation stamp]]>
- </doc>
- </method>
- <method name="setStamp"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="stamp" type="long"/>
- <doc>
- <![CDATA[Sets the current generation stamp]]>
- </doc>
- </method>
- <method name="nextStamp" return="long"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[First increments the counter and then returns the stamp]]>
- </doc>
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="that" type="org.apache.hadoop.hdfs.server.common.GenerationStamp"/>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="FIRST_VALID_STAMP" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The first valid generation stamp.]]>
- </doc>
- </field>
- <field name="GRANDFATHER_GENERATION_STAMP" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Generation stamp of blocks that pre-date the introduction
- of a generation stamp.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[A GenerationStamp is a Hadoop FS primitive, identified by a long.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
- <!-- start interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
- <interface name="HdfsConstants" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <field name="READ_TIMEOUT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="READ_TIMEOUT_EXTENSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="WRITE_TIMEOUT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="WRITE_TIMEOUT_EXTENSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Some handy internal HDFS constants]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
- <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState -->
- <class name="HdfsConstants.BlockUCState" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="COMPLETE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Block construction completed.<br>
- The block has at least one {@link ReplicaState#FINALIZED} replica,
- and is not going to be modified.]]>
- </doc>
- </field>
- <field name="UNDER_CONSTRUCTION" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The block is under construction.<br>
- It has been recently allocated for write or append.]]>
- </doc>
- </field>
- <field name="UNDER_RECOVERY" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The block is under recovery.<br>
- When a file lease expires its last block may not be {@link #COMPLETE}
- and needs to go through a recovery procedure,
- which synchronizes the existing replicas contents.]]>
- </doc>
- </field>
- <field name="COMMITTED" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The block is committed.<br>
- The client reported that all bytes are written to data-nodes
- with the given generation stamp and block length, but no
- {@link ReplicaState#FINALIZED}
- replicas has yet been reported by data-nodes themselves.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[States, which a block can go through while it is under construction.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState -->
- <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole -->
- <class name="HdfsConstants.NamenodeRole" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="ACTIVE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BACKUP" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="STANDBY" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Defines the NameNode role.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole -->
- <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
- <class name="HdfsConstants.NodeType" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="NAME_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DATA_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Type of the node]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
- <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState -->
- <class name="HdfsConstants.ReplicaState" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getValue" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="v" type="int"/>
- </method>
- <method name="read" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read from in]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write to out]]>
- </doc>
- </method>
- <field name="FINALIZED" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Replica is finalized. The state when replica is not modified.]]>
- </doc>
- </field>
- <field name="RBW" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Replica is being written to.]]>
- </doc>
- </field>
- <field name="RWR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Replica is waiting to be recovered.]]>
- </doc>
- </field>
- <field name="RUR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Replica is under recovery.]]>
- </doc>
- </field>
- <field name="TEMPORARY" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Temporary replica: created for replication and relocation only.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[Block replica states, which it can go through while being constructed.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState -->
- <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
- <class name="HdfsConstants.StartupOption" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="toNodeRole" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="FORMAT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="REGULAR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BACKUP" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="UPGRADE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ROLLBACK" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FINALIZE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="IMPORT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Startup options]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
- <!-- start class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
- <class name="InconsistentFSStateException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String, java.lang.Throwable"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The exception is thrown when file system state is inconsistent
- and is not recoverable.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
- <!-- start class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
- <class name="IncorrectVersionException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="IncorrectVersionException" type="int, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="IncorrectVersionException" type="int, java.lang.String, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The exception is thrown when external version does not match
- current version of the application.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
- <!-- start class org.apache.hadoop.hdfs.server.common.JspHelper -->
- <class name="JspHelper" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="bestNode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="streamBlockInAscii"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="addr" type="java.net.InetSocketAddress"/>
- <param name="blockId" type="long"/>
- <param name="accessToken" type="org.apache.hadoop.hdfs.security.BlockAccessToken"/>
- <param name="genStamp" type="long"/>
- <param name="blockSize" type="long"/>
- <param name="offsetIntoBlock" type="long"/>
- <param name="chunkSizeToView" type="long"/>
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="addTableHeader"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="addTableRow"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="columns" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="addTableRow"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="columns" type="java.lang.String[]"/>
- <param name="row" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="addTableFooter"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="sortNodeList"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodes" type="java.util.ArrayList"/>
- <param name="field" type="java.lang.String"/>
- <param name="order" type="java.lang.String"/>
- </method>
- <method name="printPathWithLinks"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dir" type="java.lang.String"/>
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="namenodeInfoPort" type="int"/>
- <param name="tokenString" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="printGotoForm"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="namenodeInfoPort" type="int"/>
- <param name="tokenString" type="java.lang.String"/>
- <param name="file" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="createTitle"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="req" type="javax.servlet.http.HttpServletRequest"/>
- <param name="file" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="string2ChunkSizeToView" return="int"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="s" type="java.lang.String"/>
- <param name="defaultValue" type="int"/>
- <doc>
- <![CDATA[Convert a String to chunk-size-to-view.]]>
- </doc>
- </method>
- <method name="getVersionTable" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return a table containing version information.]]>
- </doc>
- </method>
- <method name="validatePath" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="java.lang.String"/>
- <doc>
- <![CDATA[Validate filename.
- @return null if the filename is invalid.
- Otherwise, return the validated filename.]]>
- </doc>
- </method>
- <method name="validateLong" return="java.lang.Long"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="value" type="java.lang.String"/>
- <doc>
- <![CDATA[Validate a long value.
- @return null if the value is invalid.
- Otherwise, return the validated Long object.]]>
- </doc>
- </method>
- <method name="validateURL" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="value" type="java.lang.String"/>
- <doc>
- <![CDATA[Validate a URL.
- @return null if the value is invalid.
- Otherwise, return the validated URL String.]]>
- </doc>
- </method>
- <method name="getDefaultWebUser" return="org.apache.hadoop.security.UserGroupInformation"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[If security is turned off, what is the default web user?
- @param conf the configuration to look in
- @return the remote user that was configuration]]>
- </doc>
- </method>
- <method name="getUGI" return="org.apache.hadoop.security.UserGroupInformation"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get {@link UserGroupInformation} and possibly the delegation token out of
- the request.
- @param request the http request
- @return a new user from the request
- @throws AccessControlException if the request has no token]]>
- </doc>
- </method>
- <field name="WEB_UGI_PROPERTY_NAME" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DELEGATION_PARAMETER_NAME" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SET_DELEGATION" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.JspHelper -->
- <!-- start class org.apache.hadoop.hdfs.server.common.Storage -->
- <class name="Storage" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create empty storage info of the specified type]]>
- </doc>
- </constructor>
- <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, int, long"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, org.apache.hadoop.hdfs.server.common.StorageInfo"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="dirIterator" return="java.util.Iterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return default iterator
- This iterator returns all entries in storageDirs]]>
- </doc>
- </method>
- <method name="dirIterator" return="java.util.Iterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dirType" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
- <doc>
- <![CDATA[Return iterator based on Storage Directory Type
- This iterator selects entries in storageDirs of type dirType and returns
- them via the Iterator]]>
- </doc>
- </method>
- <method name="listStorageDirectories" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[generate storage list (debug line)]]>
- </doc>
- </method>
- <method name="getNumStorageDirs" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getStorageDir" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="idx" type="int"/>
- </method>
- <method name="addStorageDir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- </method>
- <method name="isConversionNeeded" return="boolean"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="checkVersionUpgradable"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="oldVersion" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Checks if the upgrade from the given old version is supported. If
- no upgrade is supported, it throws IncorrectVersionException.
-
- @param oldVersion]]>
- </doc>
- </method>
- <method name="getFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get common storage fields.
- Should be overloaded if additional fields need to be get.
-
- @param props
- @throws IOException]]>
- </doc>
- </method>
- <method name="setFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set common storage fields.
- Should be overloaded if additional fields need to be set.
-
- @param props
- @throws IOException]]>
- </doc>
- </method>
- <method name="rename"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="from" type="java.io.File"/>
- <param name="to" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="deleteDir"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="dir" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="writeAll"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write all data storage files.
- @throws IOException]]>
- </doc>
- </method>
- <method name="unlockAll"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Unlock all storage directories.
- @throws IOException]]>
- </doc>
- </method>
- <method name="isLockSupported" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="idx" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Check whether underlying file system supports file locking.
-
- @return <code>true</code> if exclusive locks are supported or
- <code>false</code> otherwise.
- @throws IOException
- @see StorageDirectory#lock()]]>
- </doc>
- </method>
- <method name="getBuildVersion" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRegistrationID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="storage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
- </method>
- <method name="corruptPreUpgradeStorage"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="rootDir" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="writeCorruptedData"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="file" type="java.io.RandomAccessFile"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LAST_PRE_UPGRADE_LAYOUT_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="LAST_UPGRADABLE_LAYOUT_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LAST_UPGRADABLE_HADOOP_VERSION" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="PRE_GENERATIONSTAMP_LAYOUT_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="PRE_RBW_LAYOUT_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="STORAGE_FILE_VERSION" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="STORAGE_DIR_CURRENT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="storageDirs" type="java.util.List"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Storage information file.
- <p>
- Local storage information is stored in a separate file VERSION.
- It contains type of the node,
- the storage layout version, the namespace id, and
- the fs state creation time.
- <p>
- Local storage can reside in multiple directories.
- Each directory should contain the same VERSION file as the others.
- During startup Hadoop servers (name-node and data-nodes) read their local
- storage information from them.
- <p>
- The servers hold a lock for each storage directory while they run so that
- other nodes were not able to startup sharing the same storage.
- The locks are released when the servers stop (normally or abnormally).]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.Storage -->
- <!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
- <class name="Storage.StorageDirectory" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="Storage.StorageDirectory" type="java.io.File"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Storage.StorageDirectory" type="java.io.File, org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getRoot" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get root directory of this storage]]>
- </doc>
- </method>
- <method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get storage directory type]]>
- </doc>
- </method>
- <method name="read"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read version file.
-
- @throws IOException if file cannot be read or contains inconsistent data]]>
- </doc>
- </method>
- <method name="read"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="from" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write version file.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="to" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="clearDirectory"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Clear and re-create storage directory.
- <p>
- Removes contents of the current directory and creates an empty directory.
-
- This does not fully format storage directory.
- It cannot write the version file since it should be written last after
- all other storage type dependent files are written.
- Derived storage is responsible for setting specific storage values and
- writing the version file to disk.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getCurrentDir" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Directory {@code current} contains latest files defining
- the file system meta-data.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="getVersionFile" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[File {@code VERSION} contains the following fields:
- <ol>
- <li>node type</li>
- <li>layout version</li>
- <li>namespaceID</li>
- <li>fs state creation time</li>
- <li>other fields specific for this node type</li>
- </ol>
- The version file is always written last during storage directory updates.
- The existence of the version file indicates that all other files have
- been successfully written in the storage directory, the storage is valid
- and does not need to be recovered.
-
- @return the version file path]]>
- </doc>
- </method>
- <method name="getPreviousVersionFile" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[File {@code VERSION} from the {@code previous} directory.
-
- @return the previous version file path]]>
- </doc>
- </method>
- <method name="getPreviousDir" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Directory {@code previous} contains the previous file system state,
- which the system can be rolled back to.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="getPreviousTmp" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@code previous.tmp} is a transient directory, which holds
- current file system state while the new state is saved into the new
- {@code current} during upgrade.
- If the saving succeeds {@code previous.tmp} will be moved to
- {@code previous}, otherwise it will be renamed back to
- {@code current} by the recovery procedure during startup.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="getRemovedTmp" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@code removed.tmp} is a transient directory, which holds
- current file system state while the previous state is moved into
- {@code current} during rollback.
- If the moving succeeds {@code removed.tmp} will be removed,
- otherwise it will be renamed back to
- {@code current} by the recovery procedure during startup.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="getFinalizedTmp" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@code finalized.tmp} is a transient directory, which holds
- the {@code previous} file system state while it is being removed
- in response to the finalize request.
- Finalize operation will remove {@code finalized.tmp} when completed,
- otherwise the removal will resume upon the system startup.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="getLastCheckpointTmp" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@code lastcheckpoint.tmp} is a transient directory, which holds
- current file system state while the new state is saved into the new
- {@code current} during regular namespace updates.
- If the saving succeeds {@code lastcheckpoint.tmp} will be moved to
- {@code previous.checkpoint}, otherwise it will be renamed back to
- {@code current} by the recovery procedure during startup.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="getPreviousCheckpoint" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@code previous.checkpoint} is a directory, which holds the previous
- (before the last save) state of the storage directory.
- The directory is created as a reference only, it does not play role
- in state recovery procedures, and is recycled automatically,
- but it may be useful for manual recovery of a stale state of the system.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="analyzeStorage" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="startOpt" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Check consistency of the storage directory
-
- @param startOpt a startup option.
-
- @return state {@link StorageState} of the storage directory
- @throws InconsistentFSStateException if directory state is not
- consistent and cannot be recovered.
- @throws IOException]]>
- </doc>
- </method>
- <method name="doRecover"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="curState" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Complete or recover storage state from previously failed transition.
-
- @param curState specifies what/how the state should be recovered
- @throws IOException]]>
- </doc>
- </method>
- <method name="lock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Lock storage to provide exclusive access.
-
- <p> Locking is not supported by all file systems.
- E.g., NFS does not consistently support exclusive locks.
-
- <p> If locking is supported we guarantee exculsive access to the
- storage directory. Otherwise, no guarantee is given.
-
- @throws IOException if locking fails]]>
- </doc>
- </method>
- <method name="unlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Unlock storage.
-
- @throws IOException]]>
- </doc>
- </method>
- <doc>
- <![CDATA[One of the storage directories.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
- <!-- start interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
- <interface name="Storage.StorageDirType" abstract="true"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isOfType" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
- </method>
- <doc>
- <![CDATA[An interface to denote storage directory type
- Implementations can define a type for storage directory by implementing
- this interface.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
- <!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
- <class name="Storage.StorageState" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="NON_EXISTENT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NOT_FORMATTED" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COMPLETE_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="RECOVER_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COMPLETE_FINALIZE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COMPLETE_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="RECOVER_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COMPLETE_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="RECOVER_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NORMAL" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
- <!-- start class org.apache.hadoop.hdfs.server.common.StorageInfo -->
- <class name="StorageInfo" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="StorageInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="StorageInfo" type="int, int, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="StorageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getLayoutVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Layout version of the storage data.]]>
- </doc>
- </method>
- <method name="getNamespaceID" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Namespace id of the file system.<p>
- Assigned to the file system at formatting and never changes after that.
- Shared by all file system components.]]>
- </doc>
- </method>
- <method name="getCTime" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Creation time of the file system state.<p>
- Modified during upgrades.]]>
- </doc>
- </method>
- <method name="setStorageInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="from" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="layoutVersion" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="namespaceID" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="cTime" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Common class for storage information.
-
- TODO namespaceID should be long and computed as hash(address + port)]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.StorageInfo -->
- <!-- start interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
- <interface name="Upgradeable" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Comparable"/>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the layout version of the upgrade object.
- @return layout version]]>
- </doc>
- </method>
- <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the type of the software component, which this object is upgrading.
- @return type]]>
- </doc>
- </method>
- <method name="getDescription" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Description of the upgrade object for displaying.
- @return description]]>
- </doc>
- </method>
- <method name="getUpgradeStatus" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Upgrade status determines a percentage of the work done out of the total
- amount required by the upgrade.
-
- 100% means that the upgrade is completed.
- Any value < 100 means it is not complete.
-
- The return value should provide at least 2 values, e.g. 0 and 100.
- @return integer value in the range [0, 100].]]>
- </doc>
- </method>
- <method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Prepare for the upgrade.
- E.g. initialize upgrade data structures and set status to 0.
-
- Returns an upgrade command that is used for broadcasting to other cluster
- components.
- E.g. name-node informs data-nodes that they must perform a distributed upgrade.
-
- @return an UpgradeCommand for broadcasting.
- @throws IOException]]>
- </doc>
- </method>
- <method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Complete upgrade.
- E.g. cleanup upgrade data structures or write metadata to disk.
-
- Returns an upgrade command that is used for broadcasting to other cluster
- components.
- E.g. data-nodes inform the name-node that they completed the upgrade
- while other data-nodes are still upgrading.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="details" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get status report for the upgrade.
-
- @param details true if upgradeStatus details need to be included,
- false otherwise
- @return {@link UpgradeStatusReport}
- @throws IOException]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Common interface for distributed upgrade objects.
-
- Each upgrade object corresponds to a layout version,
- which is the latest version that should be upgraded using this object.
- That is all components whose layout version is greater or equal to the
- one returned by {@link #getVersion()} must be upgraded with this object.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
- <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
- <class name="UpgradeManager" extends="java.lang.Object"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UpgradeManager"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getBroadcastCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUpgradeState" return="boolean"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUpgradeVersion" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setUpgradeState"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="uState" type="boolean"/>
- <param name="uVersion" type="int"/>
- </method>
- <method name="getDistributedUpgrades" return="java.util.SortedSet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getUpgradeStatus" return="short"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="initializeUpgrade" return="boolean"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="isUpgradeCompleted" return="boolean"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="startUpgrade" return="boolean"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="completeUpgrade"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="currentUpgrades" type="java.util.SortedSet"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="upgradeState" type="boolean"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="upgradeVersion" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="broadcastCommand" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Generic upgrade manager.
-
- {@link #broadcastCommand} is the command that should be]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
- <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
- <class name="UpgradeObject" extends="java.lang.Object"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
- <constructor name="UpgradeObject"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getUpgradeStatus" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDescription" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="details" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="status" type="short"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Abstract upgrade object.
-
- Contains default implementation of common methods of {@link Upgradeable}
- interface.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
- <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
- <class name="UpgradeObjectCollection" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UpgradeObjectCollection"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getDistributedUpgrades" return="java.util.SortedSet"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="versionFrom" type="int"/>
- <param name="type" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Collection of upgrade objects.
- Upgrade objects should be registered here before they can be used.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
- <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
- <class name="UpgradeStatusReport" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="UpgradeStatusReport"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="UpgradeStatusReport" type="int, short, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the layout version of the currently running upgrade.
- @return layout version]]>
- </doc>
- </method>
- <method name="getUpgradeStatus" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get upgrade upgradeStatus as a percentage of the total upgrade done.
-
- @see Upgradeable#getUpgradeStatus()]]>
- </doc>
- </method>
- <method name="isFinalized" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Is current upgrade finalized.
- @return true if finalized or false otherwise.]]>
- </doc>
- </method>
- <method name="getStatusText" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="details" type="boolean"/>
- <doc>
- <![CDATA[Get upgradeStatus data as a text for reporting.
- Should be overloaded for a particular upgrade specific upgradeStatus data.
-
- @param details true if upgradeStatus details need to be included,
- false otherwise
- @return text]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Print basic upgradeStatus details.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="version" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="upgradeStatus" type="short"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="finalized" type="boolean"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Base upgrade upgradeStatus class.
- Overload this class if specific status fields need to be reported.
-
- Describes status of current upgrade.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
- <!-- start class org.apache.hadoop.hdfs.server.common.Util -->
- <class name="Util" extends="java.lang.Object"
- abstract="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <constructor name="Util"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="now" return="long"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Current system time.
- @return current time in msec.]]>
- </doc>
- </method>
- <method name="stringAsURI" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="s" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Interprets the passed string as a URI. In case of error it
- assumes the specified string is a file.
- @param s the string to interpret
- @return the resulting URI
- @throws IOException]]>
- </doc>
- </method>
- <method name="fileAsURI" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Converts the passed File to a URI.
- @param f the file to convert
- @return the resulting URI
- @throws IOException]]>
- </doc>
- </method>
- <method name="stringCollectionAsURIs" return="java.util.Collection"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="names" type="java.util.Collection"/>
- <doc>
- <![CDATA[Converts a collection of strings into a collection of URIs.
- @param names collection of strings to convert to URIs
- @return collection of URIs]]>
- </doc>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.Util -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.datanode">
- <!-- start class org.apache.hadoop.hdfs.server.datanode.DataNode -->
- <class name="DataNode" extends="org.apache.hadoop.conf.Configured"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"/>
- <implements name="org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol"/>
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <implements name="java.lang.Runnable"/>
- <method name="createSocketAddr" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Use {@link NetUtils#createSocketAddr(String)} instead.]]>
- </doc>
- </method>
- <method name="newSocket" return="java.net.Socket"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Creates either NIO or regular depending on socketWriteTimeout.]]>
- </doc>
- </method>
- <method name="getDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the DataNode object]]>
- </doc>
- </method>
- <method name="createInterDataNodeProtocolProxy" return="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="datanodeid" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getNameNodeAddr" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getSelfAddr" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDatanodeRegistration" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return DatanodeRegistration]]>
- </doc>
- </method>
- <method name="getNamenode" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the namenode's identifier]]>
- </doc>
- </method>
- <method name="setNewStorageID"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dnReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Shut down this instance of the datanode.
- Returns only after shutdown is complete.
- This method can only be called by the offerService thread.
- Otherwise, deadlock might occur.]]>
- </doc>
- </method>
- <method name="checkDiskError"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="e" type="java.lang.Exception"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Check if there is no space in disk
- @param e that caused this checkDiskError call]]>
- </doc>
- </method>
- <method name="checkDiskError"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Check if there is a disk failure and if so, handle the error]]>
- </doc>
- </method>
- <method name="offerService"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[Main loop for the DataNode. Runs until shutdown,
- forever calling remote NameNode functions.]]>
- </doc>
- </method>
- <method name="notifyNamenodeReceivedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="delHint" type="java.lang.String"/>
- </method>
- <method name="run"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[No matter what kind of exception we get, keep retrying to offerService().
- That's the loop that connects to the NameNode and provides basic DataNode
- functionality.
- Only stop when "shouldRun" is turned off (which can only happen at shutdown).]]>
- </doc>
- </method>
- <method name="runDatanodeDaemon"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dn" type="org.apache.hadoop.hdfs.server.datanode.DataNode"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Start a single datanode daemon and wait for it to finish.
- If this thread is specifically interrupted, it will stop waiting.]]>
- </doc>
- </method>
- <method name="instantiateDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Instantiate a single datanode object. This must be run by invoking
- {@link DataNode#runDatanodeDaemon(DataNode)} subsequently.]]>
- </doc>
- </method>
- <method name="createDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Instantiate & Start a single datanode daemon and wait for it to finish.
- If this thread is specifically interrupted, it will stop waiting.]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="scheduleBlockReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="delay" type="long"/>
- <doc>
- <![CDATA[This methods arranges for the data node to send the block report at the next heartbeat.]]>
- </doc>
- </method>
- <method name="getFSDataset" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[This method is used for testing.
- Examples are adding and deleting blocks directly.
- The most common usage will be when the data node's storage is similated.
-
- @return the fsdataset that stores the blocks]]>
- </doc>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- </method>
- <method name="recoverBlocks" return="org.apache.hadoop.util.Daemon"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="java.util.Collection"/>
- </method>
- <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="recoveryId" type="long"/>
- <param name="newLength" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update replica with the new generation stamp and length.]]>
- </doc>
- </method>
- <method name="getProtocolVersion" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="protocol" type="java.lang.String"/>
- <param name="clientVersion" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getReplicaVisibleLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DN_CLIENTTRACE_FORMAT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="namenode" type="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="data" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="dnRegistration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="EMPTY_DEL_HINT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockScanner" type="org.apache.hadoop.hdfs.server.datanode.DataBlockScanner"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockScannerThread" type="org.apache.hadoop.util.Daemon"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ipcServer" type="org.apache.hadoop.ipc.Server"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="PKT_HEADER_LEN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Header size for a packet]]>
- </doc>
- </field>
- <doc>
- <![CDATA[DataNode is a class (and program) that stores a set of
- blocks for a DFS deployment. A single deployment can
- have one or many DataNodes. Each DataNode communicates
- regularly with a single NameNode. It also communicates
- with client code and other DataNodes from time to time.
- DataNodes store a series of named blocks. The DataNode
- allows client code to read these blocks, or to write new
- block data. The DataNode may also, in response to instructions
- from its NameNode, delete blocks or copy blocks to/from other
- DataNodes.
- The DataNode maintains just one critical table:
- block-> stream of bytes (of BLOCK_SIZE or less)
- This info is stored on a local disk. The DataNode
- reports the table's contents to the NameNode upon startup
- and every so often afterwards.
- DataNodes spend their lives in an endless loop of asking
- the NameNode for something to do. A NameNode cannot connect
- to a DataNode directly; a NameNode simply returns values from
- functions invoked by a DataNode.
- DataNodes maintain an open server socket so that client code
- or other DataNodes can read/write data. The host/port for
- this server is reported to the NameNode, which then sends that
- information to clients or other DataNodes that might be interested.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.DataNode -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper -->
- <class name="DatanodeJspHelper" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DatanodeJspHelper"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
- <class name="DataStorage" extends="org.apache.hadoop.hdfs.server.common.Storage"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DataStorage" type="org.apache.hadoop.hdfs.server.common.StorageInfo, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getStorageID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="isConversionNeeded" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="corruptPreUpgradeStorage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="rootDir" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Data storage information file.
- <p>
- @see Storage]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.DirectoryScanner -->
- <class name="DirectoryScanner" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Periodically scans the data directories for block and block metadata files.
- Reconciles the differences with block information maintained in
- {@link FSDataset}]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.DirectoryScanner -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
- <class name="FSDataset" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <implements name="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"/>
- <constructor name="FSDataset" type="org.apache.hadoop.hdfs.server.datanode.DataStorage, org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[An FSDataset has a directory where it loads its data files.]]>
- </doc>
- </constructor>
- <method name="getMetaFile" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="findBlockFile" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blockId" type="long"/>
- <doc>
- <![CDATA[Return the block file for the given ID]]>
- </doc>
- </method>
- <method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blkid" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="metaFileExists" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getMetaDataLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the total space used by dfs datanode]]>
- </doc>
- </method>
- <method name="hasEnoughResource" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return true - if there are still valid volumes on the DataNode.]]>
- </doc>
- </method>
- <method name="getCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return total capacity, used and unused]]>
- </doc>
- </method>
- <method name="getRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return how many bytes can still be stored in the FSDataset]]>
- </doc>
- </method>
- <method name="getLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Find the block's on-disk length]]>
- </doc>
- </method>
- <method name="getBlockFile" return="java.io.File"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get File name for a given block.]]>
- </doc>
- </method>
- <method name="getBlockInputStream" return="java.io.InputStream"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getBlockInputStream" return="java.io.InputStream"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="seekOffset" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blkOffset" type="long"/>
- <param name="ckoff" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns handles to the block file and its metadata file]]>
- </doc>
- </method>
- <method name="unlinkBlock" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="numLinks" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Make a copy of the block if this block is linked to an existing
- snapshot. This ensures that modifying this block does not modify
- data in any existing snapshots.
- @param block Block
- @param numLinks Unlink if the number of links exceed this value
- @throws IOException
- @return - true if the specified block was unlinked or the block
- is not in any snapshot.]]>
- </doc>
- </method>
- <method name="append" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="expectedBlockLen" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="recoverAppend" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="expectedBlockLen" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="recoverClose"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="expectedBlockLen" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="createRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="recoverRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="minBytesRcvd" type="long"/>
- <param name="maxBytesRcvd" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="createTemporary" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="adjustCrcChannelPosition"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="streams" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
- <param name="checksumSize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Sets the offset in the meta file so that the
- last checksum will be overwritten.]]>
- </doc>
- </method>
- <method name="finalizeBlock"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Complete the block write!]]>
- </doc>
- </method>
- <method name="unfinalizeBlock"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Remove the temporary block file (if any)]]>
- </doc>
- </method>
- <method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Generates a block report from the in-memory block map.]]>
- </doc>
- </method>
- <method name="isValidBlock" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <doc>
- <![CDATA[Check whether the given block is a valid one.
- valid means finalized]]>
- </doc>
- </method>
- <method name="invalidate"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[We're informed that a block is no longer valid. We
- could lazily garbage-collect the block, but why bother?
- just get rid of it.]]>
- </doc>
- </method>
- <method name="getFile" return="java.io.File"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <doc>
- <![CDATA[Turn the block identifier into a filename; ignore generation stamp!!!]]>
- </doc>
- </method>
- <method name="checkDataDir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
- <doc>
- <![CDATA[check if a data directory is healthy
- if some volumes failed - make sure to remove all the blocks that belong
- to these volumes
- @throws DiskErrorException]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getStorageInfo" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="checkAndUpdate"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blockId" type="long"/>
- <param name="diskFile" type="java.io.File"/>
- <param name="diskMetaFile" type="java.io.File"/>
- <param name="vol" type="org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume"/>
- <doc>
- <![CDATA[Reconcile the difference between blocks on the disk and blocks in
- volumeMap
- Check the given block for inconsistencies. Look at the
- current state of the block and reconcile the differences as follows:
- <ul>
- <li>If the block file is missing, delete the block from volumeMap</li>
- <li>If the block file exists and the block is missing in volumeMap,
- add the block to volumeMap <li>
- <li>If generation stamp does not match, then update the block with right
- generation stamp</li>
- <li>If the block length in memory does not match the actual block file length
- then mark the block as corrupt and update the block length in memory</li>
- <li>If the file in {@link ReplicaInfo} does not match the file on
- the disk, update {@link ReplicaInfo} with the correct file</li>
- </ul>
- @param blockId Block that differs
- @param diskFile Block file on the disk
- @param diskMetaFile Metadata file from on the disk
- @param vol Volume of the block file]]>
- </doc>
- </method>
- <method name="getReplica" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="use {@link #fetchReplicaInfo(long)} instead.">
- <param name="blockId" type="long"/>
- <doc>
- <![CDATA[@deprecated use {@link #fetchReplicaInfo(long)} instead.]]>
- </doc>
- </method>
- <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="recoveryId" type="long"/>
- <param name="newlength" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getReplicaVisibleLength" return="long"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="METADATA_EXTENSION" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="METADATA_VERSION" type="short"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[FSDataset manages a set of data blocks. Each block
- has a unique name and an extent on disk.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
- <!-- start interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
- <interface name="FSDatasetInterface" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean"/>
- <method name="getMetaDataLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the length of the metadata file of the specified block
- @param b - the block for which the metadata length is desired
- @return the length of the metadata file for the specified block.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns metaData of block b as an input stream (and its length)
- @param b - the block
- @return the metadata input stream;
- @throws IOException]]>
- </doc>
- </method>
- <method name="metaFileExists" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Does the meta file exist for this block?
- @param b - the block
- @return true of the metafile for specified block exits
- @throws IOException]]>
- </doc>
- </method>
- <method name="getLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the specified block's on-disk length (excluding metadata)
- @param b
- @return the specified block's on-disk length (excluding metadta)
- @throws IOException]]>
- </doc>
- </method>
- <method name="getReplica" return="org.apache.hadoop.hdfs.server.datanode.Replica"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blockId" type="long"/>
- <doc>
- <![CDATA[Get reference to the replica meta info in the replicasMap.
- To be called from methods that are synchronized on {@link FSDataset}
- @param blockId
- @return replica from the replicas map]]>
- </doc>
- </method>
- <method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blkid" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@return the generation stamp stored with the block.]]>
- </doc>
- </method>
- <method name="getBlockInputStream" return="java.io.InputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns an input stream to read the contents of the specified block
- @param b
- @return an input stream to read the contents of the specified block
- @throws IOException]]>
- </doc>
- </method>
- <method name="getBlockInputStream" return="java.io.InputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="seekOffset" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns an input stream at specified offset of the specified block
- @param b
- @param seekOffset
- @return an input stream to read the contents of the specified block,
- starting at the offset
- @throws IOException]]>
- </doc>
- </method>
- <method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blkoff" type="long"/>
- <param name="ckoff" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns an input stream at specified offset of the specified block
- The block is still in the tmp directory and is not finalized
- @param b
- @param blkoff
- @param ckoff
- @return an input stream to read the contents of the specified block,
- starting at the offset
- @throws IOException]]>
- </doc>
- </method>
- <method name="createTemporary" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Creates a temporary replica and returns the meta information of the replica
-
- @param b block
- @return the meta info of the replica which is being written to
- @throws IOException if an error occurs]]>
- </doc>
- </method>
- <method name="createRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Creates a RBW replica and returns the meta info of the replica
-
- @param b block
- @return the meta info of the replica which is being written to
- @throws IOException if an error occurs]]>
- </doc>
- </method>
- <method name="recoverRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="minBytesRcvd" type="long"/>
- <param name="maxBytesRcvd" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Recovers a RBW replica and returns the meta info of the replica
-
- @param b block
- @param newGS the new generation stamp for the replica
- @param minBytesRcvd the minimum number of bytes that the replica could have
- @param maxBytesRcvd the maximum number of bytes that the replica could have
- @return the meta info of the replica which is being written to
- @throws IOException if an error occurs]]>
- </doc>
- </method>
- <method name="append" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="expectedBlockLen" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Append to a finalized replica and returns the meta info of the replica
-
- @param b block
- @param newGS the new generation stamp for the replica
- @param expectedBlockLen the number of bytes the replica is expected to have
- @return the meata info of the replica which is being written to
- @throws IOException]]>
- </doc>
- </method>
- <method name="recoverAppend" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="expectedBlockLen" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Recover a failed append to a finalized replica
- and returns the meta info of the replica
-
- @param b block
- @param newGS the new generation stamp for the replica
- @param expectedBlockLen the number of bytes the replica is expected to have
- @return the meta info of the replica which is being written to
- @throws IOException]]>
- </doc>
- </method>
- <method name="recoverClose"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="expectedBlockLen" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Recover a failed pipeline close
- It bumps the replica's generation stamp and finalize it if RBW replica
-
- @param b block
- @param newGS the new generation stamp for the replica
- @param expectedBlockLen the number of bytes the replica is expected to have
- @throws IOException]]>
- </doc>
- </method>
- <method name="finalizeBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Finalizes the block previously opened for writing using writeToBlock.
- The block size is what is in the parameter b and it must match the amount
- of data written
- @param b
- @throws IOException]]>
- </doc>
- </method>
- <method name="unfinalizeBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Unfinalizes the block previously opened for writing using writeToBlock.
- The temporary file associated with this block is deleted.
- @param b
- @throws IOException]]>
- </doc>
- </method>
- <method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the block report - the full list of blocks stored
- @return - the block report - the full list of blocks stored]]>
- </doc>
- </method>
- <method name="isValidBlock" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <doc>
- <![CDATA[Is the block valid?
- @param b
- @return - true if the specified block is valid]]>
- </doc>
- </method>
- <method name="invalidate"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Invalidates the specified blocks
- @param invalidBlks - the blocks to be invalidated
- @throws IOException]]>
- </doc>
- </method>
- <method name="checkDataDir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
- <doc>
- <![CDATA[Check if all the data directories are healthy
- @throws DiskErrorException]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Stringifies the name of the storage]]>
- </doc>
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Shutdown the FSDataset]]>
- </doc>
- </method>
- <method name="adjustCrcChannelPosition"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="stream" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
- <param name="checksumSize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Sets the file pointer of the checksum stream so that the last checksum
- will be overwritten
- @param b block
- @param stream The stream for the data file and checksum file
- @param checksumSize number of bytes each checksum has
- @throws IOException]]>
- </doc>
- </method>
- <method name="hasEnoughResource" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[checks how many valid storage volumes are there in the DataNode
- @return true if more then minimum valid volumes left in the FSDataSet]]>
- </doc>
- </method>
- <method name="getReplicaVisibleLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get visible length of the specified replica.]]>
- </doc>
- </method>
- <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Initialize a replica recovery.
-
- @return actual state of the replica on this data-node or
- null if data-node does not have the replica.]]>
- </doc>
- </method>
- <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="recoveryId" type="long"/>
- <param name="newLength" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update replica's generation stamp and length and finalize it.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This is an interface for the underlying storage that stores blocks for
- a data node.
- Examples are the FSDataset (which stores blocks on dirs) and
- SimulatedFSDataset (which simulates data).]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
- <class name="FSDatasetInterface.BlockInputStreams" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.io.Closeable"/>
- <method name="close"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This class contains the input streams for the data and checksum
- of a block]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
- <class name="FSDatasetInterface.BlockWriteStreams" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[This class contains the output streams for the data and checksum
- of a block]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
- <class name="FSDatasetInterface.MetaDataInputStream" extends="java.io.FilterInputStream"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[This class provides the input stream and length of the metadata
- of a block]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
- <!-- start interface org.apache.hadoop.hdfs.server.datanode.Replica -->
- <interface name="Replica" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getBlockId" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[get block ID]]>
- </doc>
- </method>
- <method name="getGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[get generation stamp]]>
- </doc>
- </method>
- <method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the replica state
- @return the replica state]]>
- </doc>
- </method>
- <method name="getNumBytes" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the number of bytes received
- @return the number of bytes that have been received]]>
- </doc>
- </method>
- <method name="getBytesOnDisk" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the number of bytes that have written to disk
- @return the number of bytes that have written to disk]]>
- </doc>
- </method>
- <method name="getVisibleLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the number of bytes that are visible to readers
- @return the number of bytes that are visible to readers]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This represents block replicas which stored in DataNode.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.datanode.Replica -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaInfo -->
- <class name="ReplicaInfo" extends="org.apache.hadoop.hdfs.protocol.Block"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.datanode.Replica"/>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[This class is used by datanodes to maintain meta data of its replicas.
- It provides a general interface for meta information of a replica.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaInfo -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException -->
- <class name="ReplicaNotFoundException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="ReplicaNotFoundException"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="ReplicaNotFoundException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[Exception indicating that DataNode does not have a replica
- that matches the target block.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
- <class name="UpgradeObjectDatanode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Runnable"/>
- <constructor name="UpgradeObjectDatanode"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDatanode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </method>
- <method name="doUpgrade"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Specifies how the upgrade is performed.
- @throws IOException]]>
- </doc>
- </method>
- <method name="run"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Complete upgrade and return a status complete command for broadcasting.
-
- Data-nodes finish upgrade at different times.
- The data-node needs to re-confirm with the name-node that the upgrade
- is complete while other nodes are still upgrading.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Base class for data-node upgrade objects.
- Data-node upgrades are run in separate threads.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.datanode.metrics">
- <!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
- <class name="DataNodeActivityMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DataNodeActivityMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[This is the JMX MBean for reporting the DataNode Activity.
- The MBean is register using the name
- "hadoop:service=DataNode,name=DataNodeActivity-<hostname>-<portNumber>"
-
- Many of the activity metrics are sampled and averaged on an interval
- which can be specified in the metrics config file.
- <p>
- For the metrics that are sampled and averaged, one must specify
- a metrics context that does periodic update calls. Most metrics contexts do.
- The default Null metrics context however does NOT. So if you aren't
- using any other metrics context then you can turn on the viewing and averaging
- of sampled metrics by specifying the following two lines
- in the hadoop-meterics.properties file:
- <pre>
- dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
- dfs.period=10
- </pre>
- <p>
- Note that the metrics are collected regardless of the context used.
- The context with the update thread is used to average the data periodically
- Impl details: We use a dynamic mbean that gets the list of the metrics
- from the metrics registry passed as an argument to the constructor]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
- <class name="DataNodeMetrics" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.metrics.Updater"/>
- <constructor name="DataNodeMetrics" type="org.apache.hadoop.conf.Configuration, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="doUpdates"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
- <doc>
- <![CDATA[Since this object is a registered updater, this method will be called
- periodically, e.g. every 5 seconds.]]>
- </doc>
- </method>
- <method name="resetAllMinMax"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="bytesWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="bytesRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksReplicated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksRemoved" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksVerified" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockVerificationFailures" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="readsFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="readsFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="writesFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="writesFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="readBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="writeBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockChecksumOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="copyBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="replaceBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="heartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockReports" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This class is for maintaining the various DataNode statistics
- and publishing them through the metrics interfaces.
- This also registers the JMX MBean for RPC.
- <p>
- This class has a number of metrics variables that are publicly accessible;
- these variables (objects) have methods to update their values;
- for example:
- <p> {@link #blocksRead}.inc()]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
- <!-- start interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
- <interface name="FSDatasetMBean" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the total space (in bytes) used by dfs datanode
- @return the total space used by dfs datanode
- @throws IOException]]>
- </doc>
- </method>
- <method name="getCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns total capacity (in bytes) of storage (used and unused)
- @return total capacity of storage (used and unused)
- @throws IOException]]>
- </doc>
- </method>
- <method name="getRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the amount of free storage space (in bytes)
- @return The amount of free storage space
- @throws IOException]]>
- </doc>
- </method>
- <method name="getStorageInfo" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the storage id of the underlying storage]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This Interface defines the methods to get the status of a the FSDataset of
- a data node.
- It is also used for publishing via JMX (hence we follow the JMX naming
- convention.)
- * Note we have not used the MetricsDynamicMBeanBase to implement this
- because the interface for the FSDatasetMBean is stable and should
- be published as an interface.
-
- <p>
- Data Node runtime statistic info is report in another MBean
- @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeStatisticsMBean]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.namenode">
- <!-- start class org.apache.hadoop.hdfs.server.namenode.BackupNode -->
- <class name="BackupNode" extends="org.apache.hadoop.hdfs.server.namenode.NameNode"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getRpcServerAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setRpcServerAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getHttpServerAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="setHttpServerAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="loadNamesystem"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="stop"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="size" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="register" return="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="startCheckpoint" return="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="endCheckpoint"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="journal"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nnReg" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="jAction" type="int"/>
- <param name="length" type="int"/>
- <param name="args" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[BackupNode.
- <p>
- Backup node can play two roles.
- <ol>
- <li>{@link NamenodeRole#CHECKPOINT} node periodically creates checkpoints,
- that is downloads image and edits from the active node, merges them, and
- uploads the new image back to the active.</li>
- <li>{@link NamenodeRole#BACKUP} node keeps its namespace in sync with the
- active node, and periodically creates checkpoints by simply saving the
- namespace image to local disk(s).</li>
- </ol>]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.BackupNode -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.BackupStorage -->
- <class name="BackupStorage" extends="org.apache.hadoop.hdfs.server.namenode.FSImage"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="isConversionNeeded" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.BackupStorage -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockManager -->
- <class name="BlockManager" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="processReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="node" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <param name="report" type="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The given node is reporting all its blocks. Use this info to
- update the (machine-->blocklist) and (block-->machinelist) tables.]]>
- </doc>
- </method>
- <field name="DEFAULT_INITIAL_MAP_CAPACITY" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_MAP_LOAD_FACTOR" type="float"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_MAX_CORRUPT_FILES_RETURNED" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Keeps information related to the blocks stored in the Hadoop cluster.
- This class is a helper class for {@link FSNamesystem} and requires several
- methods to be called with lock held on {@link FSNamesystem}.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockManager -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy -->
- <class name="BlockPlacementPolicy" extends="java.lang.Object"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockPlacementPolicy"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="verifyBlockPlacement" return="int"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="srcPath" type="java.lang.String"/>
- <param name="lBlk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
- <param name="minRacks" type="int"/>
- <doc>
- <![CDATA[Verify that the block is replicated on at least minRacks different racks
- if there is more than minRacks rack in the system.
-
- @param srcPath the full pathname of the file to be verified
- @param lBlk block with locations
- @param minRacks number of racks the block should be replicated to
- @return the difference between the required and the actual number of racks
- the block is replicated to.]]>
- </doc>
- </method>
- <method name="chooseReplicaToDelete" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="srcInode" type="org.apache.hadoop.hdfs.server.namenode.FSInodeInfo"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="replicationFactor" type="short"/>
- <param name="existingReplicas" type="java.util.Collection"/>
- <param name="moreExistingReplicas" type="java.util.Collection"/>
- <doc>
- <![CDATA[Decide whether deleting the specified replica of the block still makes
- the block conform to the configured block placement policy.
-
- @param srcInode The inode of the file to which the block-to-be-deleted belongs
- @param block The block to be deleted
- @param replicationFactor The required number of replicas for this block
- @param existingReplicas The replica locations of this block that are present
- on at least two unique racks.
- @param moreExistingReplicas Replica locations of this block that are not
- listed in the previous parameter.
- @return the replica that is the best candidate for deletion]]>
- </doc>
- </method>
- <method name="initialize"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="stats" type="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
- <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
- <doc>
- <![CDATA[Used to setup a BlockPlacementPolicy object. This should be defined by
- all implementations of a BlockPlacementPolicy.
-
- @param conf the configuration object
- @param stats retrieve cluster status from here
- @param clusterMap cluster topology]]>
- </doc>
- </method>
- <method name="getInstance" return="org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="stats" type="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
- <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
- <doc>
- <![CDATA[Get an instance of the configured Block Placement Policy based on the
- value of the configuration paramater dfs.block.replicator.classname.
-
- @param conf the configuration to be used
- @param stats an object thatis used to retrieve the load on the cluster
- @param clusterMap the network topology of the cluster
- @return an instance of BlockPlacementPolicy]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This interface is used for choosing the desired number of targets
- for placing block replicas.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy.NotEnoughReplicasException -->
- <class name="BlockPlacementPolicy.NotEnoughReplicasException" extends="java.lang.Exception"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy.NotEnoughReplicasException -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault -->
- <class name="BlockPlacementPolicyDefault" extends="org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="stats" type="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
- <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="chooseTarget" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="srcPath" type="java.lang.String"/>
- <param name="numOfReplicas" type="int"/>
- <param name="writer" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <param name="chosenNodes" type="java.util.List"/>
- <param name="blocksize" type="long"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="chooseTarget" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="srcPath" type="java.lang.String"/>
- <param name="numOfReplicas" type="int"/>
- <param name="writer" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <param name="chosenNodes" type="java.util.List"/>
- <param name="excludedNodes" type="java.util.HashMap"/>
- <param name="blocksize" type="long"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="chooseTarget" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="srcInode" type="org.apache.hadoop.hdfs.server.namenode.FSInodeInfo"/>
- <param name="numOfReplicas" type="int"/>
- <param name="writer" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <param name="chosenNodes" type="java.util.List"/>
- <param name="blocksize" type="long"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="verifyBlockPlacement" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="srcPath" type="java.lang.String"/>
- <param name="lBlk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
- <param name="minRacks" type="int"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="chooseReplicaToDelete" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.FSInodeInfo"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="replicationFactor" type="short"/>
- <param name="first" type="java.util.Collection"/>
- <param name="second" type="java.util.Collection"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[The class is responsible for choosing the desired number of targets
- for placing block replicas.
- The replica placement strategy is that if the writer is on a datanode,
- the 1st replica is placed on the local machine,
- otherwise a random datanode. The 2nd replica is placed on a datanode
- that is on a different rack. The 3rd replica is placed on a datanode
- which is on a different node of the rack as the second replica.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
- <class name="CheckpointSignature" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.WritableComparable"/>
- <constructor name="CheckpointSignature"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[A unique signature intended to identify checkpoint transactions.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.ContentSummaryServlet -->
- <class name="ContentSummaryServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="ContentSummaryServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Servlets for file checksum]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.ContentSummaryServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
- <class name="CorruptReplicasMap" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="CorruptReplicasMap"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="addToCorruptReplicasMap"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="dn" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <doc>
- <![CDATA[Mark the block belonging to datanode as corrupt.
- @param blk Block to be added to CorruptReplicasMap
- @param dn DatanodeDescriptor which holds the corrupt replica]]>
- </doc>
- </method>
- <method name="numCorruptReplicas" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- </method>
- <method name="size" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[Stores information about all corrupt blocks in the File System.
- A Block is considered corrupt only if all of its replicas are
- corrupt. While reporting replicas of a Block, we hide any corrupt
- copies. These copies are removed once Block is found to have
- expected number of good replicas.
- Mapping: Block -> TreeSet<DatanodeDescriptor>]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
- <class name="DatanodeDescriptor" extends="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DatanodeDescriptor"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Default constructor]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
- @param nodeID id of the data node]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
-
- @param nodeID id of the data node
- @param networkLocation location of the data node in network]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
-
- @param nodeID id of the data node
- @param networkLocation location of the data node in network
- @param hostName it could be different from host specified for DatanodeID]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, long, long, long, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
-
- @param nodeID id of the data node
- @param capacity capacity of the data node
- @param dfsUsed space used by the data node
- @param remaining remaing capacity of the data node
- @param xceiverCount # of data transfers at the data node]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String, long, long, long, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
-
- @param nodeID id of the data node
- @param networkLocation location of the data node in network
- @param capacity capacity of the data node, including space used by non-dfs
- @param dfsUsed the used space by dfs datanode
- @param remaining remaining capacity of the data node
- @param xceiverCount # of data transfers at the data node]]>
- </doc>
- </constructor>
- <method name="numBlocks" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getBlocksScheduled" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return Approximate number of blocks currently scheduled to be written
- to this datanode.]]>
- </doc>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="obj" type="java.lang.Object"/>
- </method>
- <field name="isAlive" type="boolean"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="needKeyUpdate" type="boolean"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DatanodeDescriptor tracks stats on a given DataNode,
- such as available storage capacity, last update time, etc.,
- and maintains a set of blocks stored on the datanode.
- This data structure is a data structure that is internal
- to the namenode. It is *not* sent over-the-wire to the Client
- or the Datnodes. Neither is it stored persistently in the
- fsImage.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
- <class name="DatanodeDescriptor.BlockTargetPair" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <field name="block" type="org.apache.hadoop.hdfs.protocol.Block"
- transient="false" volatile="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="targets" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
- transient="false" volatile="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Block and targets pair]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.DelegationTokenServlet -->
- <class name="DelegationTokenServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DelegationTokenServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="req" type="javax.servlet.http.HttpServletRequest"/>
- <param name="resp" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="PATH_SPEC" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Serve delegation tokens over http for use in hftp.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.DelegationTokenServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
- <class name="FileChecksumServlets" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FileChecksumServlets"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[Servlets for file checksum]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
- <class name="FileChecksumServlets.GetServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FileChecksumServlets.GetServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Get FileChecksum]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
- <class name="FileChecksumServlets.RedirectServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FileChecksumServlets.RedirectServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Redirect file checksum queries to an appropriate datanode.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
- <class name="FileDataServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FileDataServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="createUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="parent" type="java.lang.String"/>
- <param name="i" type="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"/>
- <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
- <param name="nnproxy" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="URISyntaxException" type="java.net.URISyntaxException"/>
- <doc>
- <![CDATA[Create a redirection URI]]>
- </doc>
- </method>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Service a GET request as described below.
- Request:
- {@code
- GET http://<nn>:<port>/data[/<path>] HTTP/1.1
- }]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Redirect queries about the hosted filesystem to an appropriate datanode.
- @see org.apache.hadoop.hdfs.HftpFileSystem]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
- <class name="FsckServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FsckServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Handle fsck request]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This class is used in Namesystem's web server to do fsck on namenode.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
- <!-- start interface org.apache.hadoop.hdfs.server.namenode.FSClusterStats -->
- <interface name="FSClusterStats" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getTotalLoad" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[an indication of the total load of the cluster.
-
- @return a count of the total number of block transfers and block
- writes that are currently occuring on the cluster.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This interface is used for retrieving the load related statistics of
- the cluster.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.namenode.FSClusterStats -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
- <class name="FSEditLog" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="logSync"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Sync all modifications done by this thread.
- The internal concurrency design of this class is as follows:
- - Log items are written synchronized into an in-memory buffer,
- and each assigned a transaction ID.
- - When a thread (client) would like to sync all of its edits, logSync()
- uses a ThreadLocal transaction ID to determine what edit number must
- be synced to.
- - The isSyncRunning volatile boolean tracks whether a sync is currently
- under progress.
- The data is double-buffered within each edit log implementation so that
- in-memory writing can occur in parallel with the on-disk writing.
- Each sync occurs in three steps:
- 1. synchronized, it swaps the double buffer and sets the isSyncRunning
- flag.
- 2. unsynchronized, it flushes the data to storage
- 3. synchronized, it resets the flag and notifies anyone waiting on the
- sync.
- The lack of synchronization on step 2 allows other threads to continue
- to write into the memory buffer while the sync is in progress.
- Because this step is unsynchronized, actions that need to avoid
- concurrency with sync() should be synchronized and also call
- waitForSyncToFinish() before assuming they are running alone.]]>
- </doc>
- </method>
- <method name="logOpenFile"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction"/>
- <doc>
- <![CDATA[Add open lease record to edit log.
- Records the block locations of the last block.]]>
- </doc>
- </method>
- <method name="logCloseFile"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFile"/>
- <doc>
- <![CDATA[Add close lease record to edit log.]]>
- </doc>
- </method>
- <method name="logMkDir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
- <doc>
- <![CDATA[Add create directory record to edit log]]>
- </doc>
- </method>
- <method name="setBufferCapacity"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="size" type="int"/>
- </method>
- <method name="getOutputStreamIterator" return="java.util.Iterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="streamType" type="org.apache.hadoop.hdfs.server.namenode.JournalStream.JournalType"/>
- <doc>
- <![CDATA[Get stream iterator for the specified type.]]>
- </doc>
- </method>
- <field name="OP_INVALID" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[FSEditLog maintains a log of the namespace modifications.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FSImage -->
- <class name="FSImage" extends="org.apache.hadoop.hdfs.server.common.Storage"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FSImage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getFSNamesystem" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </method>
- <method name="setRestoreFailedStorage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="val" type="boolean"/>
- </method>
- <method name="getRestoreFailedStorage" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write last checkpoint time and version file into the storage directory.
-
- The version file should always be written last.
- Missing or corrupted version file indicates that
- the checkpoint is not valid.
-
- @param sd storage directory
- @throws IOException]]>
- </doc>
- </method>
- <method name="getEditLog" return="org.apache.hadoop.hdfs.server.namenode.FSEditLog"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isConversionNeeded" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="saveCurrent"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Save current image and empty journal into {@code current} directory.]]>
- </doc>
- </method>
- <method name="moveCurrent"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Move {@code current} to {@code lastcheckpoint.tmp} and
- recreate empty {@code current}.
- {@code current} is moved only if it is well formatted,
- that is contains VERSION file.
-
- @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getLastCheckpointTmp()
- @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getPreviousCheckpoint()]]>
- </doc>
- </method>
- <method name="moveLastCheckpoint"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Move {@code lastcheckpoint.tmp} to {@code previous.checkpoint}
-
- @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getPreviousCheckpoint()
- @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getLastCheckpointTmp()]]>
- </doc>
- </method>
- <method name="format"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFsEditName" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="corruptPreUpgradeStorage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="rootDir" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readBytes" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="namesystem" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="checkpointTime" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="editLog" type="org.apache.hadoop.hdfs.server.namenode.FSEditLog"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="removedStorageDirs" type="java.util.List"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[list of failed (and thus removed) storages]]>
- </doc>
- </field>
- <field name="ckptState" type="org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates"
- transient="false" volatile="true"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Can fs-image be rolled?]]>
- </doc>
- </field>
- <doc>
- <![CDATA[FSImage handles checkpointing and logging of the namespace edits.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FSImage -->
- <!-- start interface org.apache.hadoop.hdfs.server.namenode.FSInodeInfo -->
- <interface name="FSInodeInfo" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getFullPathName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[a string representation of an inode
-
- @return the full pathname (from root) that this inode represents]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This interface is used used the pluggable block placement policy
- to expose a few characteristics of an Inode.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.namenode.FSInodeInfo -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
- <class name="FSNamesystem" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <implements name="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean"/>
- <implements name="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
- <method name="getNamespaceDirs" return="java.util.Collection"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getStorageDirs" return="java.util.Collection"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="propertyName" type="java.lang.String"/>
- </method>
- <method name="getNamespaceEditsDirs" return="java.util.Collection"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getUpgradePermission" return="org.apache.hadoop.fs.permission.PermissionStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the default path permission when upgrading from releases with no
- permissions (<=0.15) to releases with permissions (>=0.16)]]>
- </doc>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Close down this file system manager.
- Causes heartbeat and lease daemons to stop; waits briefly for
- them to finish, but a short timeout returns control back to caller.]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Set permissions for an existing file.
- @throws IOException]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="username" type="java.lang.String"/>
- <param name="group" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Set owner for an existing file.
- @throws IOException]]>
- </doc>
- </method>
- <method name="concat"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="java.lang.String"/>
- <param name="srcs" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Moves all the blocks from srcs and appends them to trg
- To avoid rollbacks we will verify validitity of ALL of the args
- before we start actual move.
- @param target
- @param srcs
- @throws IOException]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[stores the modification and access time for this inode.
- The access time is precise upto an hour. The transaction, if needed, is
- written to the edits log but is not flushed.]]>
- </doc>
- </method>
- <method name="createSymlink"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="java.lang.String"/>
- <param name="link" type="java.lang.String"/>
- <param name="dirPerms" type="org.apache.hadoop.fs.permission.PermissionStatus"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create a symbolic link.]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Set replication for an existing file.
-
- The NameNode sets new replication and schedules either replication of
- under-replicated data blocks or removal of the excessive block copies
- if the blocks are over-replicated.
-
- @see ClientProtocol#setReplication(String, short)
- @param src file name
- @param replication new replication
- @return true if successful;
- false if file does not exist or is a directory]]>
- </doc>
- </method>
- <method name="getAdditionalBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="previous" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="excludedNodes" type="java.util.HashMap"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[The client would like to obtain an additional block for the indicated
- filename (which is being written-to). Return an array that consists
- of the block, plus a set of machines. The first on this list should
- be where the client writes data. Subsequent items in the list must
- be provided in the connection to the first datanode.
- Make sure the previous blocks have been reported by datanodes and
- are replicated. Will return an empty 2-elt array if we want the
- client to "try again later".]]>
- </doc>
- </method>
- <method name="abandonBlock" return="boolean"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="src" type="java.lang.String"/>
- <param name="holder" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[The client would like to let go of the given block]]>
- </doc>
- </method>
- <method name="completeFile" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem.CompleteFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="holder" type="java.lang.String"/>
- <param name="last" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="markBlockAsCorrupt"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="dn" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Mark the block belonging to datanode as corrupt
- @param blk Block to be marked as corrupt
- @param dn Datanode which holds the corrupt replica]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Remove the indicated filename from namespace. If the filename
- is a directory (non empty) and recursive is set to false then throw exception.]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permissions" type="org.apache.hadoop.fs.permission.PermissionStatus"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create all the necessary directories]]>
- </doc>
- </method>
- <method name="getListing" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="startAfter" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Get a partial listing of the indicated directory
- @param src the directory name
- @param startAfter the name to start after
- @return a partial listing starting after startAfter]]>
- </doc>
- </method>
- <method name="registerDatanode"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Register Datanode.
- <p>
- The purpose of registration is to identify whether the new datanode
- serves a new data storage, and will report new data block copies,
- which the namenode was not aware of; or the datanode is a replacement
- node for the data storage that was previously served by a different
- or the same (in terms of host:port) datanode.
- The data storages are distinguished by their storageIDs. When a new
- data storage is reported the namenode issues a new unique storageID.
- <p>
- Finally, the namenode returns its namespaceID as the registrationID
- for the datanodes.
- namespaceID is a persistent attribute of the name space.
- The registrationID is checked every time the datanode is communicating
- with the namenode.
- Datanodes with inappropriate registrationID are rejected.
- If the namenode stops, and then restarts it can restore its
- namespaceID and will continue serving the datanodes that has previously
- registered with the namenode without restarting the whole cluster.
-
- @see org.apache.hadoop.hdfs.server.datanode.DataNode#register()]]>
- </doc>
- </method>
- <method name="getRegistrationID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get registrationID for datanodes based on the namespaceID.
-
- @see #registerDatanode(DatanodeRegistration)
- @see FSImage#newNamespaceID()
- @return registration ID]]>
- </doc>
- </method>
- <method name="computeDatanodeWork" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Compute block replication and block invalidation work
- that can be scheduled on data-nodes.
- The datanode will be informed of this work at the next heartbeat.
-
- @return number of blocks scheduled for replication or removal.]]>
- </doc>
- </method>
- <method name="setNodeReplicationLimit"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="limit" type="int"/>
- </method>
- <method name="removeDatanode"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[remove a datanode descriptor
- @param nodeID datanode ID]]>
- </doc>
- </method>
- <method name="processReport"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <param name="newReport" type="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The given node is reporting all its blocks. Use this info to
- update the (machine-->blocklist) and (block-->machinelist) tables.]]>
- </doc>
- </method>
- <method name="blockReceived"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="delHint" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The given node is reporting that it received a certain block.]]>
- </doc>
- </method>
- <method name="getMissingBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCapacityTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total raw bytes including non-dfs used space.]]>
- </doc>
- </method>
- <method name="getCapacityUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total used space by data nodes]]>
- </doc>
- </method>
- <method name="getCapacityUsedPercent" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total used space by data nodes as percentage of total capacity]]>
- </doc>
- </method>
- <method name="getCapacityUsedNonDFS" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total used space by data nodes for non DFS purposes such
- as storing temporary files on the local file system]]>
- </doc>
- </method>
- <method name="getCapacityRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total non-used raw bytes.]]>
- </doc>
- </method>
- <method name="getCapacityRemainingPercent" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total remaining space by data nodes as percentage of total capacity]]>
- </doc>
- </method>
- <method name="getTotalLoad" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total number of connections.]]>
- </doc>
- </method>
- <method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- </method>
- <method name="DFSNodesStatus"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="live" type="java.util.ArrayList"/>
- <param name="dead" type="java.util.ArrayList"/>
- </method>
- <method name="stopDecommission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="node" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Stop decommissioning the specified datanodes.]]>
- </doc>
- </method>
- <method name="getDataNodeInfo" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getStartTime" return="java.util.Date"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Rereads the config to get hosts and exclude list file names.
- Rereads the files to update the hosts and exclude lists. It
- checks if any of the hosts have changed states:
- 1. Added to hosts --> no further work needed here.
- 2. Removed from hosts --> mark AdminState as decommissioned.
- 3. Added to exclude --> start decommission.
- 4. Removed from exclude --> stop decommission.]]>
- </doc>
- </method>
- <method name="getDatanode" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get data node by storage ID.
-
- @param nodeID
- @return DatanodeDescriptor or null if the node is not found.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getBlocksTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the total number of blocks in the system.]]>
- </doc>
- </method>
- <method name="getFilesTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getPendingReplicationBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUnderReplicatedBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCorruptReplicaBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns number of blocks with corrupt replicas]]>
- </doc>
- </method>
- <method name="getScheduledReplicationBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getPendingDeletionBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getExcessBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getBlockCapacity" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getFSState" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getFSNamesystemMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[get FSNamesystemMetrics]]>
- </doc>
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[shutdown FSNamesystem]]>
- </doc>
- </method>
- <method name="getNumLiveDataNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of live data nodes
- @return Number of live data nodes]]>
- </doc>
- </method>
- <method name="getNumDeadDataNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of dead data nodes
- @return Number of dead data nodes]]>
- </doc>
- </method>
- <method name="setGenerationStamp"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="stamp" type="long"/>
- <doc>
- <![CDATA[Sets the generation stamp for this filesystem]]>
- </doc>
- </method>
- <method name="getGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the generation stamp for this filesystem]]>
- </doc>
- </method>
- <method name="numCorruptReplicas" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- </method>
- <method name="getDecommissioningNodes" return="java.util.ArrayList"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDelegationTokenSecretManager" return="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="renewer" type="org.apache.hadoop.io.Text"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@param renewer
- @return Token<DelegationTokenIdentifier>
- @throws IOException]]>
- </doc>
- </method>
- <method name="renewDelegationToken" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@param token
- @return New expiryTime of the token
- @throws InvalidToken
- @throws IOException]]>
- </doc>
- </method>
- <method name="cancelDelegationToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@param token
- @throws IOException]]>
- </doc>
- </method>
- <method name="logUpdateMasterKey"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="key" type="org.apache.hadoop.security.token.delegation.DelegationKey"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Log the updateMasterKey operation to edit logs
-
- @param key new delegation key.]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="auditLog" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Logger for audit events, noting successful FSNamesystem operations. Emits
- to FSNamesystem.audit at INFO. Each event causes a set of tab-separated
- <code>key=value</code> pairs to be written for the following properties:
- <code>
- ugi=<ugi in RPC>
- ip=<remote IP>
- cmd=<command>
- src=<src path>
- dst=<dst path (optional)>
- perm=<permissions (optional)>
- </code>]]>
- </doc>
- </field>
- <field name="dir" type="org.apache.hadoop.hdfs.server.namenode.FSDirectory"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="leaseManager" type="org.apache.hadoop.hdfs.server.namenode.LeaseManager"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="lmthread" type="org.apache.hadoop.util.Daemon"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="replthread" type="org.apache.hadoop.util.Daemon"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[FSNamesystem does the actual bookkeeping work for the
- DataNode.
- It tracks several important tables.
- 1) valid fsname --> blocklist (kept on disk, logged)
- 2) Set of all valid blocks (inverted #1)
- 3) block --> machinelist (kept in memory, rebuilt dynamically from reports)
- 4) machine --> blocklist (inverted #2)
- 5) LRU cache of updated-heartbeat machines]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
- <class name="GetImageServlet" extends="javax.servlet.http.HttpServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="GetImageServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[This class is used in Namesystem's jetty to retrieve a file.
- Typically used by the Secondary NameNode to retrieve image and
- edit file for periodic checkpointing.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeSymlink -->
- <class name="INodeSymlink" extends="org.apache.hadoop.hdfs.server.namenode.INode"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="isLink" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getLinkValue" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getSymlink" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isDirectory" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[An INode representing a symbolic link.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeSymlink -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
- <class name="LeaseExpiredException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="LeaseExpiredException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The lease that was being used to create this file has expired.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
- <class name="LeaseManager" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getLeaseByPath" return="org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <doc>
- <![CDATA[@return the lease containing src]]>
- </doc>
- </method>
- <method name="countLease" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return the number of leases currently in the system]]>
- </doc>
- </method>
- <method name="setLeasePeriod"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="softLimit" type="long"/>
- <param name="hardLimit" type="long"/>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[LeaseManager does the lease housekeeping for writing on files.
- This class also provides useful static methods for lease recovery.
-
- Lease Recovery Algorithm
- 1) Namenode retrieves lease information
- 2) For each file f in the lease, consider the last block b of f
- 2.1) Get the datanodes which contains b
- 2.2) Assign one of the datanodes as the primary datanode p
- 2.3) p obtains a new generation stamp form the namenode
- 2.4) p get the block info from each datanode
- 2.5) p computes the minimum block length
- 2.6) p updates the datanodes, which have a valid generation stamp,
- with the new generation stamp and the minimum block length
- 2.7) p acknowledges the namenode the update results
- 2.8) Namenode updates the BlockInfo
- 2.9) Namenode removes f from the lease
- and removes the lease once all files have been removed
- 2.10) Namenode commit changes to edit log]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
- <class name="ListPathsServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="ListPathsServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="buildRoot" return="java.util.Map"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="doc" type="org.znerd.xmlenc.XMLOutputter"/>
- <doc>
- <![CDATA[Build a map from the query string, setting values and defaults.]]>
- </doc>
- </method>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Service a GET request as described below.
- Request:
- {@code
- GET http://<nn>:<port>/listPaths[/<path>][<?option>[&option]*] HTTP/1.1
- }
- Where <i>option</i> (default) in:
- recursive ("no")
- filter (".*")
- exclude ("\..*\.crc")
- Response: A flat list of files/directories in the following format:
- {@code
- <listing path="..." recursive="(yes|no)" filter="..."
- time="yyyy-MM-dd hh:mm:ss UTC" version="...">
- <directory path="..." modified="yyyy-MM-dd hh:mm:ss"/>
- <file path="..." modified="yyyy-MM-dd'T'hh:mm:ssZ" accesstime="yyyy-MM-dd'T'hh:mm:ssZ"
- blocksize="..."
- replication="..." size="..."/>
- </listing>
- }]]>
- </doc>
- </method>
- <field name="df" type="java.lang.ThreadLocal"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Obtain meta-information about a filesystem.
- @see org.apache.hadoop.hdfs.HftpFileSystem]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.NameNode -->
- <class name="NameNode" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols"/>
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <constructor name="NameNode" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Start NameNode.
- <p>
- The name-node can be started with one of the following startup options:
- <ul>
- <li>{@link StartupOption#REGULAR REGULAR} - normal name node startup</li>
- <li>{@link StartupOption#FORMAT FORMAT} - format name node</li>
- <li>{@link StartupOption#BACKUP BACKUP} - start backup node</li>
- <li>{@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node</li>
- <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster
- upgrade and create a snapshot of the current file system state</li>
- <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the
- cluster back to the previous state</li>
- <li>{@link StartupOption#FINALIZE FINALIZE} - finalize
- previous upgrade</li>
- <li>{@link StartupOption#IMPORT IMPORT} - import checkpoint</li>
- </ul>
- The option is passed via configuration field:
- <tt>dfs.namenode.startup</tt>
-
- The conf will be modified to reflect the actual ports on which
- the NameNode is up and running if the user passes the port as
- <code>zero</code> in the conf.
-
- @param conf confirguration
- @throws IOException]]>
- </doc>
- </constructor>
- <constructor name="NameNode" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </constructor>
- <method name="getProtocolVersion" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="protocol" type="java.lang.String"/>
- <param name="clientVersion" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="format"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Format a new filesystem. Destroys any filesystem that may already
- exist at this location.]]>
- </doc>
- </method>
- <method name="getNameNodeMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="address" type="java.lang.String"/>
- </method>
- <method name="getAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="namenode" type="java.net.InetSocketAddress"/>
- </method>
- <method name="getHostPortString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="addr" type="java.net.InetSocketAddress"/>
- <doc>
- <![CDATA[Compose a "host:port" string from the address.]]>
- </doc>
- </method>
- <method name="getRole" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRpcServerAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setRpcServerAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getHttpServerAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="setHttpServerAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="loadNamesystem"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Initialize name-node.
-
- @param conf the configuration]]>
- </doc>
- </method>
- <method name="join"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Wait for service to finish.
- (Normally, it runs forever.)]]>
- </doc>
- </method>
- <method name="stop"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Stop all NameNode threads and wait for all to finish.]]>
- </doc>
- </method>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="size" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getAccessKeys" return="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="errorReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="errorCode" type="int"/>
- <param name="msg" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="register" return="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="startCheckpoint" return="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="endCheckpoint"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="journalSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="journal"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="jAction" type="int"/>
- <param name="length" type="int"/>
- <param name="args" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="renewer" type="org.apache.hadoop.io.Text"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="renewDelegationToken" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="cancelDelegationToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="offset" type="long"/>
- <param name="length" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="create"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="flag" type="org.apache.hadoop.io.EnumSetWritable"/>
- <param name="createParent" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permissions" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="previous" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="excludedNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="abandonBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="src" type="java.lang.String"/>
- <param name="holder" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client needs to give up on the block.]]>
- </doc>
- </method>
- <method name="complete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="last" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="reportBadBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client has detected an error on the specified located blocks
- and is reporting them to the server. For now, the namenode will
- mark the block as corrupt. In the future we might
- check the blocks are actually corrupt.]]>
- </doc>
- </method>
- <method name="updateBlockForPipeline" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="updatePipeline"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="clientName" type="java.lang.String"/>
- <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="commitBlockSynchronization"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newgenerationstamp" type="long"/>
- <param name="newlength" type="long"/>
- <param name="closeFile" type="boolean"/>
- <param name="deleteblock" type="boolean"/>
- <param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getPreferredBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="filename" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="concat"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="trg" type="java.lang.String"/>
- <param name="src" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="rename"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="renewLease"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getListing" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="startAfter" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFileInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the file info for a specific file.
- @param src The string representation of the path to the file
- @return object containing information regarding the file
- or null if file not found]]>
- </doc>
- </method>
- <method name="getFileLinkInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the file info for a specific file. If the path refers to a
- symlink then the FileStatus of the symlink is returned.
- @param src The string representation of the path to the file
- @return object containing information regarding the file
- or null if file not found]]>
- </doc>
- </method>
- <method name="getStats" return="long[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="isInSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Is the cluster currently in safe mode?]]>
- </doc>
- </method>
- <method name="restoreFailedStorage" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="arg" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <doc>
- <![CDATA[@throws AccessControlException
- @inheritDoc]]>
- </doc>
- </method>
- <method name="saveNamespace"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refresh the list of datanodes that the namenode should allow to
- connect. Re-reads conf by creating new HdfsConfiguration object and
- uses the files list in the configuration to update the list.]]>
- </doc>
- </method>
- <method name="getEditLogSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the size of the current edit log.]]>
- </doc>
- </method>
- <method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Roll the edit log.]]>
- </doc>
- </method>
- <method name="rollFsImage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Roll the image]]>
- </doc>
- </method>
- <method name="finalizeUpgrade"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="metaSave"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="filename" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Dumps namenode state into specified file]]>
- </doc>
- </method>
- <method name="getCorruptFiles" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setQuota"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="namespaceQuota" type="long"/>
- <param name="diskspaceQuota" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="fsync"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="createSymlink"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="java.lang.String"/>
- <param name="link" type="java.lang.String"/>
- <param name="dirPerms" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="getLinkTarget" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="registerDatanode" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="capacity" type="long"/>
- <param name="dfsUsed" type="long"/>
- <param name="remaining" type="long"/>
- <param name="xmitsInProgress" type="int"/>
- <param name="xceiverCount" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Data node notify the name node that it is alive
- Return an array of block-oriented commands for the datanode to execute.
- This will be either a transfer or a delete operation.]]>
- </doc>
- </method>
- <method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="blocks" type="long[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="blockReceived"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <param name="delHints" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="errorReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="errorCode" type="int"/>
- <param name="msg" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="verifyRequest"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Verify request.
-
- Verifies correctness of the datanode version, registration ID, and
- if the datanode does not need to be shutdown.
-
- @param nodeReg data node registration
- @throws IOException]]>
- </doc>
- </method>
- <method name="verifyVersion"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="version" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Verify version.
-
- @param version
- @throws IOException]]>
- </doc>
- </method>
- <method name="getFsImageName" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the name of the fsImage file]]>
- </doc>
- </method>
- <method name="getFSImage" return="org.apache.hadoop.hdfs.server.namenode.FSImage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getFsImageNameCheckpoint" return="java.io.File[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the name of the fsImage file uploaded by periodic
- checkpointing]]>
- </doc>
- </method>
- <method name="getNameNodeAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the address on which the NameNodes is listening to.
- @return the address on which the NameNodes is listening to.]]>
- </doc>
- </method>
- <method name="getHttpAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the address of the NameNodes http server,
- which is used to access the name-node web UI.
-
- @return the http address.]]>
- </doc>
- </method>
- <method name="refreshServiceAcl"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="refreshUserToGroupsMappings"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="createNameNode" return="org.apache.hadoop.hdfs.server.namenode.NameNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- </method>
- <field name="DEFAULT_PORT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="stateChangeLog" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="namesystem" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="role" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="server" type="org.apache.hadoop.ipc.Server"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[RPC server]]>
- </doc>
- </field>
- <field name="rpcAddress" type="java.net.InetSocketAddress"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[RPC server address]]>
- </doc>
- </field>
- <field name="httpServer" type="org.apache.hadoop.http.HttpServer"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[httpServer]]>
- </doc>
- </field>
- <field name="httpAddress" type="java.net.InetSocketAddress"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[HTTP server address]]>
- </doc>
- </field>
- <field name="stopRequested" type="boolean"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[only used for testing purposes]]>
- </doc>
- </field>
- <field name="nodeRegistration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Registration information of this name-node]]>
- </doc>
- </field>
- <doc>
- <![CDATA[NameNode serves as both directory namespace manager and
- "inode table" for the Hadoop DFS. There is a single NameNode
- running in any DFS deployment. (Well, except when there
- is a second backup/failover NameNode.)
- The NameNode controls two critical tables:
- 1) filename->blocksequence (namespace)
- 2) block->machinelist ("inodes")
- The first table is stored on disk and is very precious.
- The second table is rebuilt every time the NameNode comes
- up.
- 'NameNode' refers to both this class as well as the 'NameNode server'.
- The 'FSNamesystem' class actually performs most of the filesystem
- management. The majority of the 'NameNode' class itself is concerned
- with exposing the IPC interface and the http server to the outside world,
- plus some configuration management.
- NameNode implements the ClientProtocol interface, which allows
- clients to ask for DFS services. ClientProtocol is not
- designed for direct use by authors of DFS client code. End-users
- should instead use the org.apache.nutch.hadoop.fs.FileSystem class.
- NameNode also implements the DatanodeProtocol interface, used by
- DataNode programs that actually store DFS data blocks. These
- methods are invoked repeatedly and automatically by all the
- DataNodes in a DFS deployment.
- NameNode also implements the NamenodeProtocol interface, used by
- secondary namenodes or rebalancing processes to get partial namenode's
- state, for example partial blocksMap etc.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.NameNode -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
- <class name="NamenodeFsck" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="fsck"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Check files on DFS, starting from the indicated path.]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="CORRUPT_STATUS" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="HEALTHY_STATUS" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NONEXISTENT_STATUS" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FAILURE_STATUS" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FIXING_NONE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Don't attempt any fixing .]]>
- </doc>
- </field>
- <field name="FIXING_MOVE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Move corrupted files to /lost+found .]]>
- </doc>
- </field>
- <field name="FIXING_DELETE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Delete corrupted files.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
- sub-optimal conditions.
- <p>The tool scans all files and directories, starting from an indicated
- root path. The following abnormal conditions are detected and handled:</p>
- <ul>
- <li>files with blocks that are completely missing from all datanodes.<br/>
- In this case the tool can perform one of the following actions:
- <ul>
- <li>none ({@link #FIXING_NONE})</li>
- <li>move corrupted files to /lost+found directory on DFS
- ({@link #FIXING_MOVE}). Remaining data blocks are saved as a
- block chains, representing longest consecutive series of valid blocks.</li>
- <li>delete corrupted files ({@link #FIXING_DELETE})</li>
- </ul>
- </li>
- <li>detect files with under-replicated or over-replicated blocks</li>
- </ul>
- Additionally, the tool collects a detailed overall DFS statistics, and
- optionally can print detailed statistics on block locations and replication
- factors of each file.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
- <class name="NotReplicatedYetException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="NotReplicatedYetException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The file has not finished being written to enough datanodes yet.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
- <class name="SafeModeException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="SafeModeException"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="SafeModeException" type="java.lang.String, org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[This exception is thrown when the name node is in safe mode.
- Client cannot modified namespace until the safe mode is off.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
- <class name="SecondaryNameNode" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Runnable"/>
- <constructor name="SecondaryNameNode" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a connection to the primary namenode.]]>
- </doc>
- </constructor>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Shut down this instance of the datanode.
- Returns only after shutdown is complete.]]>
- </doc>
- </method>
- <method name="run"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[main() has some simple utility methods.
- @param argv Command line parameters.
- @exception Exception if the filesystem does not exist.]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[The Secondary NameNode is a helper to the primary NameNode.
- The Secondary is responsible for supporting periodic checkpoints
- of the HDFS metadata. The current design allows only one Secondary
- NameNode per HDFs cluster.
- The Secondary NameNode is a daemon that periodically wakes
- up (determined by the schedule specified in the configuration),
- triggers a periodic checkpoint and then goes back to sleep.
- The Secondary NameNode uses the ClientProtocol to talk to the
- primary NameNode.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
- <class name="StreamFile" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="StreamFile"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getDFSClient" return="org.apache.hadoop.hdfs.DFSClient"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="InterruptedException" type="java.lang.InterruptedException"/>
- <doc>
- <![CDATA[getting a client for connecting to dfs]]>
- </doc>
- </method>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException -->
- <class name="UnsupportedActionException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UnsupportedActionException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[This exception is thrown when an operation is not supported.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
- <class name="UpgradeObjectNamenode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UpgradeObjectNamenode"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="command" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Process an upgrade command.
- RPC has only one very generic command for all upgrade related inter
- component communications.
- The actual command recognition and execution should be handled here.
- The reply is sent back also as an UpgradeCommand.
-
- @param command
- @return the reply command which is analyzed on the client side.]]>
- </doc>
- </method>
- <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="forceProceed"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Base class for name-node upgrade objects.
- Data-node upgrades are run in separate threads.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.namenode.metrics">
- <!-- start interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
- <interface name="FSNamesystemMBean" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getFSState" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The state of the file system: Safemode or Operational
- @return the state]]>
- </doc>
- </method>
- <method name="getBlocksTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of allocated blocks in the system
- @return - number of allocated blocks]]>
- </doc>
- </method>
- <method name="getCapacityTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total storage capacity
- @return - total capacity in bytes]]>
- </doc>
- </method>
- <method name="getCapacityRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Free (unused) storage capacity
- @return - free capacity in bytes]]>
- </doc>
- </method>
- <method name="getCapacityUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Used storage capacity
- @return - used capacity in bytes]]>
- </doc>
- </method>
- <method name="getFilesTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total number of files and directories
- @return - num of files and directories]]>
- </doc>
- </method>
- <method name="getPendingReplicationBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Blocks pending to be replicated
- @return - num of blocks to be replicated]]>
- </doc>
- </method>
- <method name="getUnderReplicatedBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Blocks under replicated
- @return - num of blocks under replicated]]>
- </doc>
- </method>
- <method name="getScheduledReplicationBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Blocks scheduled for replication
- @return - num of blocks scheduled for replication]]>
- </doc>
- </method>
- <method name="getTotalLoad" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total Load on the FSNamesystem
- @return - total load of FSNamesystem]]>
- </doc>
- </method>
- <method name="getNumLiveDataNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of Live data nodes
- @return number of live data nodes]]>
- </doc>
- </method>
- <method name="getNumDeadDataNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of dead data nodes
- @return number of dead data nodes]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This Interface defines the methods to get the status of a the FSNamesystem of
- a name node.
- It is also used for publishing via JMX (hence we follow the JMX naming
- convention.)
-
- Note we have not used the MetricsDynamicMBeanBase to implement this
- because the interface for the NameNodeStateMBean is stable and should
- be published as an interface.
-
- <p>
- Name Node runtime activity statistic info is report in another MBean
- @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
- <class name="FSNamesystemMetrics" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.metrics.Updater"/>
- <constructor name="FSNamesystemMetrics" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem, org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doUpdates"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
- <doc>
- <![CDATA[Since this object is a registered updater, this method will be called
- periodically, e.g. every 5 seconds.
- We set the metrics value within this function before pushing it out.
- FSNamesystem updates its own local variables which are
- light weight compared to Metrics counters.
- Some of the metrics are explicity casted to int. Few metrics collectors
- do not handle long values. It is safe to cast to int for now as all these
- values fit in int value.
- Metrics related to DFS capacity are stored in bytes which do not fit in
- int, so they are rounded to GB]]>
- </doc>
- </method>
- <field name="numExpiredHeartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This class is for maintaining the various FSNamesystem status metrics
- and publishing them through the metrics interfaces.
- The SNamesystem creates and registers the JMX MBean.
- <p>
- This class has a number of metrics variables that are publicly accessible;
- these variables (objects) have methods to update their values;
- for example:
- <p> {@link #filesTotal}.set()]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean -->
- <class name="NameNodeActivityMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="NameNodeActivityMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[This is the JMX MBean for reporting the NameNode Activity.
- The MBean is register using the name
- "hadoop:service=NameNode,name=NameNodeActivity"
-
- Many of the activity metrics are sampled and averaged on an interval
- which can be specified in the metrics config file.
- <p>
- For the metrics that are sampled and averaged, one must specify
- a metrics context that does periodic update calls. Most metrics contexts do.
- The default Null metrics context however does NOT. So if you aren't
- using any other metrics context then you can turn on the viewing and averaging
- of sampled metrics by specifying the following two lines
- in the hadoop-meterics.properties file:
- <pre>
- dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
- dfs.period=10
- </pre>
- <p>
- Note that the metrics are collected regardless of the context used.
- The context with the update thread is used to average the data periodically
- Impl details: We use a dynamic mbean that gets the list of the metrics
- from the metrics registry passed as an argument to the constructor]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
- <class name="NameNodeMetrics" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.metrics.Updater"/>
- <constructor name="NameNodeMetrics" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="doUpdates"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
- <doc>
- <![CDATA[Since this object is a registered updater, this method will be called
- periodically, e.g. every 5 seconds.]]>
- </doc>
- </method>
- <method name="resetAllMinMax"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numCreateFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFilesCreated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFilesAppended" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numGetBlockLocations" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFilesRenamed" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numGetListingOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numDeleteFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFilesDeleted" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFileInfoOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numAddBlockOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numcreateSymlinkOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numgetLinkTargetOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="transactions" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="syncs" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="transactionsBatchedInSync" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockReport" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="safeModeTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="fsImageLoadTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numBlocksCorrupted" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFilesInGetListingOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This class is for maintaining the various NameNode activity statistics
- and publishing them through the metrics interfaces.
- This also registers the JMX MBean for RPC.
- <p>
- This class has a number of metrics variables that are publicly accessible;
- these variables (objects) have methods to update their values;
- for example:
- <p> {@link #syncs}.inc()]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.protocol">
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
- <class name="BlockCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="BlockCommand" type="int, java.util.List"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create BlockCommand for transferring blocks to another datanode
- @param blocktargetlist blocks to be transferred]]>
- </doc>
- </constructor>
- <constructor name="BlockCommand" type="int, org.apache.hadoop.hdfs.protocol.Block[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create BlockCommand for the given action
- @param blocks blocks related to the action]]>
- </doc>
- </constructor>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.protocol.Block[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getTargets" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[][]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[A BlockCommand is an instruction to a datanode
- regarding some blocks under its control. It tells
- the DataNode to either invalidate a set of indicated
- blocks, or to copy a set of indicated blocks to
- another DataNode.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand -->
- <class name="BlockRecoveryCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockRecoveryCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create empty BlockRecoveryCommand.]]>
- </doc>
- </constructor>
- <constructor name="BlockRecoveryCommand" type="int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create BlockRecoveryCommand with
- the specified capacity for recovering blocks.]]>
- </doc>
- </constructor>
- <method name="getRecoveringBlocks" return="java.util.Collection"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the list of recovering blocks.]]>
- </doc>
- </method>
- <method name="add"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
- <doc>
- <![CDATA[Add recovering block to the command.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[BlockRecoveryCommand is an instruction to a data-node to recover
- the specified blocks.
- The data-node that receives this command treats itself as a primary
- data-node in the recover process.
- Block recovery is identified by a recoveryId, which is also the new
- generation stamp, which the block will have after the recovery succeeds.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock -->
- <class name="BlockRecoveryCommand.RecoveringBlock" extends="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockRecoveryCommand.RecoveringBlock"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create empty RecoveringBlock.]]>
- </doc>
- </constructor>
- <constructor name="BlockRecoveryCommand.RecoveringBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create RecoveringBlock.]]>
- </doc>
- </constructor>
- <method name="getNewGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the new generation stamp of the block,
- which also plays role of the recovery id.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[This is a block with locations from which it should be recovered
- and the new generation stamp, which the block will have after
- successful recovery.
-
- The new generation stamp of the block, also plays role of the recovery id.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
- <class name="BlocksWithLocations" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="BlocksWithLocations" type="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Constructor with one parameter]]>
- </doc>
- </constructor>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[getter]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[serialization method]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[deserialization method]]>
- </doc>
- </method>
- <doc>
- <![CDATA[A class to implement an array of BlockLocations
- It provide efficient customized serialization/deserialization methods
- in stead of using the default array (de)serialization provided by RPC]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
- <class name="BlocksWithLocations.BlockWithLocations" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="BlocksWithLocations.BlockWithLocations"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[default constructor]]>
- </doc>
- </constructor>
- <constructor name="BlocksWithLocations.BlockWithLocations" type="org.apache.hadoop.hdfs.protocol.Block, java.lang.String[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[constructor]]>
- </doc>
- </constructor>
- <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[get the block]]>
- </doc>
- </method>
- <method name="getDatanodes" return="java.lang.String[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[get the block's locations]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[deserialization method]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[serialization method]]>
- </doc>
- </method>
- <doc>
- <![CDATA[A class to keep track of a block and its locations]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.CheckpointCommand -->
- <class name="CheckpointCommand" extends="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="CheckpointCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="CheckpointCommand" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature, boolean, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getSignature" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Checkpoint signature is used to ensure
- that nodes are talking about the same checkpoint.]]>
- </doc>
- </method>
- <method name="isImageObsolete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Indicates whether current backup image is obsolete, and therefore
- need to be discarded?
-
- @return true if current image should be discarded.]]>
- </doc>
- </method>
- <method name="needToReturnImage" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Indicates whether the new checkpoint image needs to be transfered
- back to the name-node after the checkpoint is done.
-
- @return true if the checkpoint should be returned back.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Checkpoint command.
- <p>
- Returned to the backup node by the name-node as a reply to the
- {@link NamenodeProtocol#startCheckpoint(NamenodeRegistration)}
- request.<br>
- Contains:
- <ul>
- <li>{@link CheckpointSignature} identifying the particular checkpoint</li>
- <li>indicator whether the backup image should be discarded before starting
- the checkpoint</li>
- <li>indicator whether the image should be transfered back to the name-node
- upon completion of the checkpoint.</li>
- </ul>]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.CheckpointCommand -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
- <class name="DatanodeCommand" extends="org.apache.hadoop.hdfs.server.protocol.ServerCommand"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DatanodeCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <field name="REGISTER" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FINALIZE" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Base class for data-node command.
- Issued by the name-node to notify data-nodes what should be done.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
- <!-- start interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
- <interface name="DatanodeProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="registerDatanode" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Register Datanode.
- @see org.apache.hadoop.hdfs.server.datanode.DataNode#dnRegistration
- @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
-
- @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains
- new storageID if the datanode did not have one and
- registration ID for further communication.]]>
- </doc>
- </method>
- <method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="capacity" type="long"/>
- <param name="dfsUsed" type="long"/>
- <param name="remaining" type="long"/>
- <param name="xmitsInProgress" type="int"/>
- <param name="xceiverCount" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[sendHeartbeat() tells the NameNode that the DataNode is still
- alive and well. Includes some status info, too.
- It also gives the NameNode a chance to return
- an array of "DatanodeCommand" objects.
- A DatanodeCommand tells the DataNode to invalidate local block(s),
- or to copy them to other DataNodes, etc.]]>
- </doc>
- </method>
- <method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="blocks" type="long[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[blockReport() tells the NameNode about all the locally-stored blocks.
- The NameNode returns an array of Blocks that have become obsolete
- and should be deleted. This function is meant to upload *all*
- the locally-stored blocks. It's invoked upon startup and then
- infrequently afterwards.
- @param registration
- @param blocks - the block list as an array of longs.
- Each block is represented as 2 longs.
- This is done instead of Block[] to reduce memory used by block reports.
-
- @return - the next command for DN to process.
- @throws IOException]]>
- </doc>
- </method>
- <method name="blockReceived"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <param name="delHints" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[blockReceived() allows the DataNode to tell the NameNode about
- recently-received block data, with a hint for pereferred replica
- to be deleted when there is any excessive blocks.
- For example, whenever client code
- writes a new Block here, or another DataNode copies a Block to
- this DataNode, it will call blockReceived().]]>
- </doc>
- </method>
- <method name="errorReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="errorCode" type="int"/>
- <param name="msg" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[errorReport() tells the NameNode about something that has gone
- awry. Useful for debugging.]]>
- </doc>
- </method>
- <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[This is a very general way to send a command to the name-node during
- distributed upgrade process.
-
- The generosity is because the variety of upgrade commands is unpredictable.
- The reply from the name-node is also received in the form of an upgrade
- command.
-
- @return a reply in the form of an upgrade command]]>
- </doc>
- </method>
- <method name="reportBadBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])}
- }]]>
- </doc>
- </method>
- <method name="commitBlockSynchronization"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newgenerationstamp" type="long"/>
- <param name="newlength" type="long"/>
- <param name="closeFile" type="boolean"/>
- <param name="deleteblock" type="boolean"/>
- <param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Commit block synchronization in lease recovery]]>
- </doc>
- </method>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[24: register() renamed registerDatanode()]]>
- </doc>
- </field>
- <field name="NOTIFY" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DISK_ERROR" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="INVALID_BLOCK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FATAL_DISK_ERROR" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_UNKNOWN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Determines actions that data node should perform
- when receiving a datanode command.]]>
- </doc>
- </field>
- <field name="DNA_TRANSFER" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_INVALIDATE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_SHUTDOWN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_REGISTER" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_FINALIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_RECOVERBLOCK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_ACCESSKEYUPDATE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Protocol that a DFS datanode uses to communicate with the NameNode.
- It's used to upload current load information and block reports.
- The only way a NameNode can communicate with a DataNode is by
- returning values from these functions.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
- <class name="DatanodeRegistration" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <implements name="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"/>
- <constructor name="DatanodeRegistration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Default constructor.]]>
- </doc>
- </constructor>
- <constructor name="DatanodeRegistration" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create DatanodeRegistration]]>
- </doc>
- </constructor>
- <method name="setInfoPort"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="infoPort" type="int"/>
- </method>
- <method name="setIpcPort"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="ipcPort" type="int"/>
- </method>
- <method name="setStorageInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="storage" type="org.apache.hadoop.hdfs.server.datanode.DataStorage"/>
- </method>
- <method name="setName"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRegistrationID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getAddress" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="to" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="storageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="exportedKeys" type="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DatanodeRegistration class contains all information the name-node needs
- to identify and verify a data-node when it contacts the name-node.
- This information is sent by data-node with each communication request.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
- <class name="DisallowedDatanodeException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DisallowedDatanodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[This exception is thrown when a datanode tries to register or communicate
- with the namenode when it does not appear on the list of included nodes,
- or has been specifically excluded.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
- <!-- start interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
- <interface name="InterDatanodeProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Initialize a replica recovery.
-
- @return actual state of the replica on this data-node or
- null if data-node does not have the replica.]]>
- </doc>
- </method>
- <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="recoveryId" type="long"/>
- <param name="newLength" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update replica with the new generation stamp and length.]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[5: getBlockMetaDataInfo(), updateBlock() removed.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[An inter-datanode protocol for updating generation stamp]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand -->
- <class name="KeyUpdateCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="KeyUpdateCommand" type="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getExportedKeys" return="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.NamenodeCommand -->
- <class name="NamenodeCommand" extends="org.apache.hadoop.hdfs.server.protocol.ServerCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="NamenodeCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="NamenodeCommand" type="int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[Base class for name-node command.
- Issued by the name-node to notify other name-nodes what should be done.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.NamenodeCommand -->
- <!-- start interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
- <interface name="NamenodeProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="size" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a list of blocks belonging to <code>datanode</code>
- whose total size equals <code>size</code>.
-
- @see org.apache.hadoop.hdfs.server.balancer.Balancer
- @param datanode a data node
- @param size requested size
- @return a list of blocks & their locations
- @throws RemoteException if size is less than or equal to 0 or
- datanode does not exist]]>
- </doc>
- </method>
- <method name="getAccessKeys" return="org.apache.hadoop.hdfs.security.ExportedAccessKeys"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the current access keys
-
- @return ExportedAccessKeys containing current access keys
- @throws IOException]]>
- </doc>
- </method>
- <method name="getEditLogSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the size of the current edit log (in bytes).
- @return The number of bytes in the current edit log.
- @throws IOException
- @deprecated
- See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}]]>
- </doc>
- </method>
- <method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Closes the current edit log and opens a new one. The
- call fails if the file system is in SafeMode.
- @throws IOException
- @return a unique token to identify this transaction.
- @deprecated
- See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}]]>
- </doc>
- </method>
- <method name="rollFsImage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Rolls the fsImage log. It removes the old fsImage, copies the
- new image to fsImage, removes the old edits and renames edits.new
- to edits. The call fails if any of the four files are missing.
- @throws IOException
- @deprecated
- See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}]]>
- </doc>
- </method>
- <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Request name-node version and storage information.
-
- @return {@link NamespaceInfo} identifying versions and storage information
- of the name-node
- @throws IOException]]>
- </doc>
- </method>
- <method name="errorReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="errorCode" type="int"/>
- <param name="msg" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Report to the active name-node an error occurred on a subordinate node.
- Depending on the error code the active node may decide to unregister the
- reporting node.
-
- @param registration requesting node.
- @param errorCode indicates the error
- @param msg free text description of the error
- @throws IOException]]>
- </doc>
- </method>
- <method name="register" return="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Register a subordinate name-node like backup node.
- @return {@link NamenodeRegistration} of the node,
- which this node has just registered with.]]>
- </doc>
- </method>
- <method name="startCheckpoint" return="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[A request to the active name-node to start a checkpoint.
- The name-node should decide whether to admit it or reject.
- The name-node also decides what should be done with the backup node
- image before and after the checkpoint.
-
- @see CheckpointCommand
- @see NamenodeCommand
- @see #ACT_SHUTDOWN
-
- @param registration the requesting node
- @return {@link CheckpointCommand} if checkpoint is allowed.
- @throws IOException]]>
- </doc>
- </method>
- <method name="endCheckpoint"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[A request to the active name-node to finalize
- previously started checkpoint.
-
- @param registration the requesting node
- @param sig {@code CheckpointSignature} which identifies the checkpoint.
- @throws IOException]]>
- </doc>
- </method>
- <method name="journalSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the size of the active name-node journal (edit log) in bytes.
-
- @param registration the requesting node
- @return The number of bytes in the journal.
- @throws IOException]]>
- </doc>
- </method>
- <method name="journal"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="jAction" type="int"/>
- <param name="length" type="int"/>
- <param name="records" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Journal edit records.
- This message is sent by the active name-node to the backup node
- via {@code EditLogBackupOutputStream} in order to synchronize meta-data
- changes with the backup namespace image.
-
- @param registration active node registration
- @param jAction journal action
- @param length length of the byte array
- @param records byte array containing serialized journal records
- @throws IOException]]>
- </doc>
- </method>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Compared to the previous version the following changes have been introduced:
- (Only the latest change is reflected.
- The log of historical changes can be retrieved from the svn).
-
- 4: new method added: getAccessKeys()]]>
- </doc>
- </field>
- <field name="NOTIFY" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FATAL" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="JA_IS_ALIVE" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="JA_JOURNAL" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="JA_JSPOOL_START" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="JA_CHECKPOINT_TIME" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ACT_UNKNOWN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ACT_SHUTDOWN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ACT_CHECKPOINT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Protocol that a secondary NameNode uses to communicate with the NameNode.
- It's used to get part of the name node state]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
- <!-- start interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols -->
- <interface name="NamenodeProtocols" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
- <implements name="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"/>
- <implements name="org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol"/>
- <implements name="org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol"/>
- <implements name="org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol"/>
- <doc>
- <![CDATA[The full set of RPC methods implemented by the Namenode.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration -->
- <class name="NamenodeRegistration" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"/>
- <constructor name="NamenodeRegistration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="NamenodeRegistration" type="java.lang.String, java.lang.String, org.apache.hadoop.hdfs.server.common.StorageInfo, org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getAddress" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRegistrationID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRole" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get name-node role.]]>
- </doc>
- </method>
- <method name="isRole" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="that" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"/>
- </method>
- <method name="getCheckpointTime" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the age of the image.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Information sent by a subordinate name-node to the active name-node
- during the registration process.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
- <class name="NamespaceInfo" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="NamespaceInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="NamespaceInfo" type="int, long, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getBuildVersion" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDistributedUpgradeVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[NamespaceInfo is returned by the name-node in reply
- to a data-node handshake.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
- <!-- start interface org.apache.hadoop.hdfs.server.protocol.NodeRegistration -->
- <interface name="NodeRegistration" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getAddress" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get address of the server node.
- @return hostname:portNumber]]>
- </doc>
- </method>
- <method name="getRegistrationID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get registration ID of the server node.]]>
- </doc>
- </method>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get layout version of the server node.]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[Generic class specifying information, which need to be sent to the name-node
- during the registration process.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.protocol.NodeRegistration -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo -->
- <class name="ReplicaRecoveryInfo" extends="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="ReplicaRecoveryInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="ReplicaRecoveryInfo" type="long, long, long, org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getOriginalReplicaState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Replica recovery information.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.ServerCommand -->
- <class name="ServerCommand" extends="java.lang.Object"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="ServerCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Unknown server command constructor.
- Creates a command with action 0.
-
- @see NamenodeProtocol#ACT_UNKNOWN
- @see DatanodeProtocol#DNA_UNKNOWN]]>
- </doc>
- </constructor>
- <constructor name="ServerCommand" type="int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create a command for the specified action.
- Actions are protocol specific.
-
- @see DatanodeProtocol
- @see NamenodeProtocol
- @param action]]>
- </doc>
- </constructor>
- <method name="getAction" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get server command action.
- @return action code.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Base class for a server command.
- Issued by the name-node to notify other servers what should be done.
- Commands are defined by actions defined in respective protocols.
-
- @see DatanodeProtocol
- @see NamenodeProtocol]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.ServerCommand -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
- <class name="UpgradeCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UpgradeCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="UpgradeCommand" type="int, int, short"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCurrentStatus" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="UC_ACTION_REPORT_STATUS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="UC_ACTION_START_UPGRADE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This as a generic distributed upgrade command.
-
- During the upgrade cluster components send upgrade commands to each other
- in order to obtain or share information with them.
- It is supposed that each upgrade defines specific upgrade command by
- deriving them from this class.
- The upgrade command contains version of the upgrade, which is verified
- on the receiving side and current status of the upgrade.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
- </package>
- <package name="org.apache.hadoop.hdfs.tools">
- <!-- start class org.apache.hadoop.hdfs.tools.DelegationTokenFetcher -->
- <class name="DelegationTokenFetcher" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DelegationTokenFetcher" type="org.apache.hadoop.hdfs.DistributedFileSystem, java.io.DataOutputStream, org.apache.hadoop.security.UserGroupInformation"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[Command-line interface]]>
- </doc>
- </method>
- <method name="go"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Fetch a DelegationToken from the current Namenode and store it in the
- specified file.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.DelegationTokenFetcher -->
- <!-- start class org.apache.hadoop.hdfs.tools.DFSAdmin -->
- <class name="DFSAdmin" extends="org.apache.hadoop.fs.FsShell"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DFSAdmin"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Construct a DFSAdmin object.]]>
- </doc>
- </constructor>
- <constructor name="DFSAdmin" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Construct a DFSAdmin object.]]>
- </doc>
- </constructor>
- <method name="report"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Gives a report on how the FileSystem is doing.
- @exception IOException if the filesystem does not exist.]]>
- </doc>
- </method>
- <method name="setSafeMode"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <param name="idx" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Safe mode maintenance command.
- Usage: java DFSAdmin -safemode [enter | leave | get]
- @param argv List of of command line parameters.
- @param idx The index of the command that is being processed.
- @exception IOException if the filesystem does not exist.]]>
- </doc>
- </method>
- <method name="saveNamespace" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to ask the namenode to save the namespace.
- Usage: java DFSAdmin -saveNamespace
- @exception IOException
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
- </doc>
- </method>
- <method name="restoreFaileStorage" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="arg" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to enable/disable/check restoring of failed storage replicas in the namenode.
- Usage: java DFSAdmin -restoreFailedStorage true|false|check
- @exception IOException
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)]]>
- </doc>
- </method>
- <method name="refreshNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to ask the namenode to reread the hosts and excluded hosts
- file.
- Usage: java DFSAdmin -refreshNodes
- @exception IOException]]>
- </doc>
- </method>
- <method name="finalizeUpgrade" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to ask the namenode to finalize previously performed upgrade.
- Usage: java DFSAdmin -finalizeUpgrade
- @exception IOException]]>
- </doc>
- </method>
- <method name="upgradeProgress" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <param name="idx" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to request current distributed upgrade status,
- a detailed status, or to force the upgrade to proceed.
-
- Usage: java DFSAdmin -upgradeProgress [status | details | force]
- @exception IOException]]>
- </doc>
- </method>
- <method name="metaSave" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <param name="idx" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Dumps DFS data structures into specified file.
- Usage: java DFSAdmin -metasave filename
- @param argv List of of command line parameters.
- @param idx The index of the command that is being processed.
- @exception IOException if an error accoured wile accessing
- the file or path.]]>
- </doc>
- </method>
- <method name="printTopology" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Display each rack and the nodes assigned to that rack, as determined
- by the NameNode, in a hierarchical manner. The nodes and racks are
- sorted alphabetically.
-
- @throws IOException If an error while getting datanode report]]>
- </doc>
- </method>
- <method name="refreshServiceAcl" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refresh the authorization policy on the {@link NameNode}.
- @return exitcode 0 on success, non-zero on failure
- @throws IOException]]>
- </doc>
- </method>
- <method name="refreshUserToGroupsMappings" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refresh the user-to-groups mappings on the {@link NameNode}.
- @return exitcode 0 on success, non-zero on failure
- @throws IOException]]>
- </doc>
- </method>
- <method name="run" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[@param argv The parameters passed to this program.
- @exception Exception if the filesystem does not exist.
- @return 0 on success, non zero on error.]]>
- </doc>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[main() has some simple utility methods.
- @param argv Command line parameters.
- @exception Exception if the filesystem does not exist.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This class provides some DFS administrative access.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.DFSAdmin -->
- <!-- start class org.apache.hadoop.hdfs.tools.DFSck -->
- <class name="DFSck" extends="org.apache.hadoop.conf.Configured"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.util.Tool"/>
- <constructor name="DFSck" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Filesystem checker.
- @param conf current Configuration]]>
- </doc>
- </constructor>
- <method name="run" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@param args]]>
- </doc>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- </method>
- <doc>
- <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
- sub-optimal conditions.
- <p>The tool scans all files and directories, starting from an indicated
- root path. The following abnormal conditions are detected and handled:</p>
- <ul>
- <li>files with blocks that are completely missing from all datanodes.<br/>
- In this case the tool can perform one of the following actions:
- <ul>
- <li>none ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_NONE})</li>
- <li>move corrupted files to /lost+found directory on DFS
- ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a
- block chains, representing longest consecutive series of valid blocks.</li>
- <li>delete corrupted files ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_DELETE})</li>
- </ul>
- </li>
- <li>detect files with under-replicated or over-replicated blocks</li>
- </ul>
- Additionally, the tool collects a detailed overall DFS statistics, and
- optionally can print detailed statistics on block locations and replication
- factors of each file.
- The tool also provides and option to filter open files during the scan.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.DFSck -->
- <!-- start class org.apache.hadoop.hdfs.tools.HDFSConcat -->
- <class name="HDFSConcat" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HDFSConcat"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@param args]]>
- </doc>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.HDFSConcat -->
- <!-- start class org.apache.hadoop.hdfs.tools.JMXGet -->
- <class name="JMXGet" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="JMXGet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="setService"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="service" type="java.lang.String"/>
- </method>
- <method name="setPort"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="port" type="java.lang.String"/>
- </method>
- <method name="setServer"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="server" type="java.lang.String"/>
- </method>
- <method name="setLocalVMUrl"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="url" type="java.lang.String"/>
- </method>
- <method name="printAllValues"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[print all attributes' values]]>
- </doc>
- </method>
- <method name="getValue" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="key" type="java.lang.String"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[get single value by key]]>
- </doc>
- </method>
- <method name="init"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[@throws Exception
- initializes MBeanServer]]>
- </doc>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <doc>
- <![CDATA[main
-
- @param args]]>
- </doc>
- </method>
- <doc>
- <![CDATA[tool to get data from NameNode or DataNode using MBeans currently the
- following MBeans are available (under hadoop domain):
- hadoop:service=NameNode,name=FSNamesystemState (static)
- hadoop:service=NameNode,name=NameNodeActivity (dynamic)
- hadoop:service=NameNode,name=RpcActivityForPort9000 (dynamic)
- hadoop:service=DataNode,name=RpcActivityForPort50020 (dynamic)
- hadoop:name=service=DataNode,FSDatasetState-UndefinedStorageId663800459
- (static)
- hadoop:service=DataNode,name=DataNodeActivity-UndefinedStorageId-520845215
- (dynamic)
-
-
- implementation note: all logging is sent to System.err (since it is a command
- line tool)]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.JMXGet -->
- </package>
- <package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
- <!-- start class org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer -->
- <class name="OfflineImageViewer" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="OfflineImageViewer" type="java.lang.String, org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="go"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Process image file.]]>
- </doc>
- </method>
- <method name="buildOptions" return="org.apache.commons.cli.Options"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Build command-line options and descriptions]]>
- </doc>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Entry point to command-line-driven operation. User may specify
- options and start fsimage viewer from the command line. Program
- will process image file and exit cleanly or, if an error is
- encountered, inform user and exit.
- @param args Command line options
- @throws IOException]]>
- </doc>
- </method>
- <doc>
- <![CDATA[OfflineImageViewer to dump the contents of an Hadoop image file to XML
- or the console. Main entry point into utility, either via the
- command line or programatically.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer -->
- </package>
- </api>
|