hdfs.c 68 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #include "hdfs.h"
  19. #include "hdfsJniHelper.h"
  20. /* Some frequently used Java paths */
  21. #define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
  22. #define HADOOP_PATH "org/apache/hadoop/fs/Path"
  23. #define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem"
  24. #define HADOOP_FS "org/apache/hadoop/fs/FileSystem"
  25. #define HADOOP_FSSTATUS "org/apache/hadoop/fs/FsStatus"
  26. #define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation"
  27. #define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem"
  28. #define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream"
  29. #define HADOOP_OSTRM "org/apache/hadoop/fs/FSDataOutputStream"
  30. #define HADOOP_STAT "org/apache/hadoop/fs/FileStatus"
  31. #define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission"
  32. #define JAVA_NET_ISA "java/net/InetSocketAddress"
  33. #define JAVA_NET_URI "java/net/URI"
  34. #define JAVA_STRING "java/lang/String"
  35. #define JAVA_VOID "V"
  36. /* Macros for constructing method signatures */
  37. #define JPARAM(X) "L" X ";"
  38. #define JARRPARAM(X) "[L" X ";"
  39. #define JMETHOD1(X, R) "(" X ")" R
  40. #define JMETHOD2(X, Y, R) "(" X Y ")" R
  41. #define JMETHOD3(X, Y, Z, R) "(" X Y Z")" R
  42. #define KERBEROS_TICKET_CACHE_PATH "hadoop.security.kerberos.ticket.cache.path"
  43. /**
  44. * hdfsJniEnv: A wrapper struct to be used as 'value'
  45. * while saving thread -> JNIEnv* mappings
  46. */
  47. typedef struct
  48. {
  49. JNIEnv* env;
  50. } hdfsJniEnv;
  51. /**
  52. * Helper function to destroy a local reference of java.lang.Object
  53. * @param env: The JNIEnv pointer.
  54. * @param jFile: The local reference of java.lang.Object object
  55. * @return None.
  56. */
  57. static void destroyLocalReference(JNIEnv *env, jobject jObject)
  58. {
  59. if (jObject)
  60. (*env)->DeleteLocalRef(env, jObject);
  61. }
  62. /**
  63. * Helper function to create a org.apache.hadoop.fs.Path object.
  64. * @param env: The JNIEnv pointer.
  65. * @param path: The file-path for which to construct org.apache.hadoop.fs.Path
  66. * object.
  67. * @return Returns a jobject on success and NULL on error.
  68. */
  69. static jobject constructNewObjectOfPath(JNIEnv *env, const char *path)
  70. {
  71. //Construct a java.lang.String object
  72. jstring jPathString = (*env)->NewStringUTF(env, path);
  73. //Construct the org.apache.hadoop.fs.Path object
  74. jobject jPath =
  75. constructNewObjectOfClass(env, NULL, "org/apache/hadoop/fs/Path",
  76. "(Ljava/lang/String;)V", jPathString);
  77. if (jPath == NULL) {
  78. fprintf(stderr, "Can't construct instance of class "
  79. "org.apache.hadoop.fs.Path for %s\n", path);
  80. errno = EINTERNAL;
  81. return NULL;
  82. }
  83. // Destroy the local reference to the java.lang.String object
  84. destroyLocalReference(env, jPathString);
  85. return jPath;
  86. }
  87. /**
  88. * Helper function to translate an exception into a meaningful errno value.
  89. * @param exc: The exception.
  90. * @param env: The JNIEnv Pointer.
  91. * @param method: The name of the method that threw the exception. This
  92. * may be format string to be used in conjuction with additional arguments.
  93. * @return Returns a meaningful errno value if possible, or EINTERNAL if not.
  94. */
  95. static int errnoFromException(jthrowable exc, JNIEnv *env,
  96. const char *method, ...)
  97. {
  98. va_list ap;
  99. int errnum = 0;
  100. char *excClass = NULL;
  101. if (exc == NULL)
  102. goto default_error;
  103. if ((excClass = classNameOfObject((jobject) exc, env)) == NULL) {
  104. errnum = EINTERNAL;
  105. goto done;
  106. }
  107. if (!strcmp(excClass, "java.lang.UnsupportedOperationException")) {
  108. errnum = ENOTSUP;
  109. goto done;
  110. }
  111. if (!strcmp(excClass, "org.apache.hadoop.security."
  112. "AccessControlException")) {
  113. errnum = EACCES;
  114. goto done;
  115. }
  116. if (!strcmp(excClass, "org.apache.hadoop.hdfs.protocol."
  117. "QuotaExceededException")) {
  118. errnum = EDQUOT;
  119. goto done;
  120. }
  121. if (!strcmp(excClass, "java.io.FileNotFoundException")) {
  122. errnum = ENOENT;
  123. goto done;
  124. }
  125. //TODO: interpret more exceptions; maybe examine exc.getMessage()
  126. default_error:
  127. //Can't tell what went wrong, so just punt
  128. (*env)->ExceptionDescribe(env);
  129. fprintf(stderr, "Call to ");
  130. va_start(ap, method);
  131. vfprintf(stderr, method, ap);
  132. va_end(ap);
  133. fprintf(stderr, " failed!\n");
  134. errnum = EINTERNAL;
  135. done:
  136. (*env)->ExceptionClear(env);
  137. if (excClass != NULL)
  138. free(excClass);
  139. return errnum;
  140. }
  141. /**
  142. * Set a configuration value.
  143. *
  144. * @param env The JNI environment
  145. * @param jConfiguration The configuration object to modify
  146. * @param key The key to modify
  147. * @param value The value to set the key to
  148. *
  149. * @return 0 on success; error code otherwise
  150. */
  151. static int hadoopConfSet(JNIEnv *env, jobject jConfiguration,
  152. const char *key, const char *value)
  153. {
  154. int ret;
  155. jthrowable jExc = NULL;
  156. jstring jkey = NULL, jvalue = NULL;
  157. char buf[1024];
  158. jkey = (*env)->NewStringUTF(env, key);
  159. if (!jkey) {
  160. ret = ENOMEM;
  161. goto done;
  162. }
  163. jvalue = (*env)->NewStringUTF(env, value);
  164. if (!jvalue) {
  165. ret = ENOMEM;
  166. goto done;
  167. }
  168. ret = invokeMethod(env, NULL, &jExc, INSTANCE, jConfiguration,
  169. HADOOP_CONF, "set", JMETHOD2(JPARAM(JAVA_STRING),
  170. JPARAM(JAVA_STRING), JAVA_VOID),
  171. jkey, jvalue);
  172. if (ret) {
  173. snprintf(buf, sizeof(buf), "hadoopConfSet(%s, %s)", key, value);
  174. ret = errnoFromException(jExc, env, buf);
  175. goto done;
  176. }
  177. done:
  178. if (jkey)
  179. destroyLocalReference(env, jkey);
  180. if (jvalue)
  181. destroyLocalReference(env, jvalue);
  182. if (ret)
  183. errno = ret;
  184. return ret;
  185. }
  186. /**
  187. * Convert a Java string into a C string.
  188. *
  189. * @param env The JNI environment
  190. * @param jStr The Java string to convert
  191. * @param cstr (out param) the C string.
  192. * This will be set to a dynamically allocated
  193. * UTF-8 C string on success.
  194. *
  195. * @return 0 on success; error code otherwise
  196. */
  197. static int jStrToCstr(JNIEnv *env, jstring jstr, char **cstr)
  198. {
  199. char *tmp;
  200. tmp = (*env)->GetStringUTFChars(env, jstr, NULL);
  201. *cstr = strdup(tmp);
  202. (*env)->ReleaseStringUTFChars(env, jstr, tmp);
  203. return 0;
  204. }
  205. static int hadoopConfGet(JNIEnv *env, jobject jConfiguration,
  206. const char *key, char **val)
  207. {
  208. int ret;
  209. jthrowable jExc = NULL;
  210. jvalue jVal;
  211. jstring jkey = NULL;
  212. char buf[1024];
  213. jkey = (*env)->NewStringUTF(env, key);
  214. if (!jkey) {
  215. ret = ENOMEM;
  216. goto done;
  217. }
  218. ret = invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  219. HADOOP_CONF, "get", JMETHOD1(JPARAM(JAVA_STRING),
  220. JPARAM(JAVA_STRING)), jkey);
  221. if (ret) {
  222. snprintf(buf, sizeof(buf), "hadoopConfGet(%s)", key);
  223. ret = errnoFromException(jExc, env, buf);
  224. goto done;
  225. }
  226. if (!jVal.l) {
  227. *val = NULL;
  228. goto done;
  229. }
  230. ret = jStrToCstr(env, jVal.l, val);
  231. if (ret)
  232. goto done;
  233. done:
  234. if (jkey)
  235. destroyLocalReference(env, jkey);
  236. if (ret)
  237. errno = ret;
  238. return ret;
  239. }
  240. int hdfsConfGet(const char *key, char **val)
  241. {
  242. JNIEnv *env;
  243. int ret;
  244. jobject jConfiguration = NULL;
  245. env = getJNIEnv();
  246. if (env == NULL) {
  247. ret = EINTERNAL;
  248. goto done;
  249. }
  250. jConfiguration = constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  251. if (jConfiguration == NULL) {
  252. fprintf(stderr, "Can't construct instance of class "
  253. "org.apache.hadoop.conf.Configuration\n");
  254. ret = EINTERNAL;
  255. goto done;
  256. }
  257. ret = hadoopConfGet(env, jConfiguration, key, val);
  258. if (ret)
  259. goto done;
  260. ret = 0;
  261. done:
  262. if (jConfiguration)
  263. destroyLocalReference(env, jConfiguration);
  264. if (ret)
  265. errno = ret;
  266. return ret;
  267. }
  268. void hdfsConfFree(char *val)
  269. {
  270. free(val);
  271. }
  272. struct hdfsBuilder {
  273. int forceNewInstance;
  274. const char *nn;
  275. tPort port;
  276. const char *kerbTicketCachePath;
  277. const char *userName;
  278. };
  279. struct hdfsBuilder *hdfsNewBuilder(void)
  280. {
  281. struct hdfsBuilder *bld = calloc(1, sizeof(struct hdfsBuilder));
  282. if (!bld) {
  283. errno = ENOMEM;
  284. return NULL;
  285. }
  286. return bld;
  287. }
  288. void hdfsFreeBuilder(struct hdfsBuilder *bld)
  289. {
  290. free(bld);
  291. }
  292. void hdfsBuilderSetForceNewInstance(struct hdfsBuilder *bld)
  293. {
  294. bld->forceNewInstance = 1;
  295. }
  296. void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn)
  297. {
  298. bld->nn = nn;
  299. }
  300. void hdfsBuilderSetNameNodePort(struct hdfsBuilder *bld, tPort port)
  301. {
  302. bld->port = port;
  303. }
  304. void hdfsBuilderSetUserName(struct hdfsBuilder *bld, const char *userName)
  305. {
  306. bld->userName = userName;
  307. }
  308. void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder *bld,
  309. const char *kerbTicketCachePath)
  310. {
  311. bld->kerbTicketCachePath = kerbTicketCachePath;
  312. }
  313. hdfsFS hdfsConnect(const char* host, tPort port)
  314. {
  315. struct hdfsBuilder *bld = hdfsNewBuilder();
  316. if (!bld)
  317. return NULL;
  318. hdfsBuilderSetNameNode(bld, host);
  319. hdfsBuilderSetNameNodePort(bld, port);
  320. return hdfsBuilderConnect(bld);
  321. }
  322. /** Always return a new FileSystem handle */
  323. hdfsFS hdfsConnectNewInstance(const char* host, tPort port)
  324. {
  325. struct hdfsBuilder *bld = hdfsNewBuilder();
  326. if (!bld)
  327. return NULL;
  328. hdfsBuilderSetNameNode(bld, host);
  329. hdfsBuilderSetNameNodePort(bld, port);
  330. hdfsBuilderSetForceNewInstance(bld);
  331. return hdfsBuilderConnect(bld);
  332. }
  333. hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user)
  334. {
  335. struct hdfsBuilder *bld = hdfsNewBuilder();
  336. if (!bld)
  337. return NULL;
  338. hdfsBuilderSetNameNode(bld, host);
  339. hdfsBuilderSetNameNodePort(bld, port);
  340. hdfsBuilderSetUserName(bld, user);
  341. return hdfsBuilderConnect(bld);
  342. }
  343. /** Always return a new FileSystem handle */
  344. hdfsFS hdfsConnectAsUserNewInstance(const char* host, tPort port,
  345. const char *user)
  346. {
  347. struct hdfsBuilder *bld = hdfsNewBuilder();
  348. if (!bld)
  349. return NULL;
  350. hdfsBuilderSetNameNode(bld, host);
  351. hdfsBuilderSetNameNodePort(bld, port);
  352. hdfsBuilderSetForceNewInstance(bld);
  353. hdfsBuilderSetUserName(bld, user);
  354. return hdfsBuilderConnect(bld);
  355. }
  356. hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
  357. {
  358. JNIEnv *env = 0;
  359. jobject gFsRef = NULL;
  360. jobject jConfiguration = NULL, jFS = NULL, jURI = NULL, jCachePath = NULL;
  361. jstring jURIString = NULL, jUserString = NULL;
  362. jvalue jVal;
  363. jthrowable jExc = NULL;
  364. size_t cURILen;
  365. char *cURI = 0;
  366. int ret = 0;
  367. //Get the JNIEnv* corresponding to current thread
  368. env = getJNIEnv();
  369. if (env == NULL) {
  370. ret = EINTERNAL;
  371. goto done;
  372. }
  373. // jConfiguration = new Configuration();
  374. jConfiguration = constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  375. if (jConfiguration == NULL) {
  376. fprintf(stderr, "Can't construct instance of class "
  377. "org.apache.hadoop.conf.Configuration\n");
  378. goto done;
  379. }
  380. //Check what type of FileSystem the caller wants...
  381. if (bld->nn == NULL) {
  382. // Get a local filesystem.
  383. if (bld->forceNewInstance) {
  384. // fs = FileSytem::newInstanceLocal(conf);
  385. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS,
  386. "newInstanceLocal", JMETHOD1(JPARAM(HADOOP_CONF),
  387. JPARAM(HADOOP_LOCALFS)), jConfiguration)) {
  388. ret = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  389. "FileSystem::newInstanceLocal");
  390. goto done;
  391. }
  392. jFS = jVal.l;
  393. } else {
  394. // fs = FileSytem::getLocal(conf);
  395. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "getLocal",
  396. JMETHOD1(JPARAM(HADOOP_CONF),
  397. JPARAM(HADOOP_LOCALFS)),
  398. jConfiguration) != 0) {
  399. ret = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  400. "FileSystem::getLocal");
  401. goto done;
  402. }
  403. jFS = jVal.l;
  404. }
  405. } else {
  406. if (!strcmp(bld->nn, "default")) {
  407. // jURI = FileSystem.getDefaultUri(conf)
  408. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS,
  409. "getDefaultUri",
  410. "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/URI;",
  411. jConfiguration) != 0) {
  412. ret = errnoFromException(jExc, env, "org.apache.hadoop.fs.",
  413. "FileSystem::getDefaultUri");
  414. goto done;
  415. }
  416. jURI = jVal.l;
  417. } else {
  418. // fs = FileSystem::get(URI, conf, ugi);
  419. cURILen = strlen(bld->nn) + 16;
  420. cURI = malloc(cURILen);
  421. if (!cURI) {
  422. fprintf(stderr, "failed to allocate memory for HDFS URI\n");
  423. ret = ENOMEM;
  424. goto done;
  425. }
  426. snprintf(cURI, cURILen, "hdfs://%s:%d", bld->nn, (int)(bld->port));
  427. jURIString = (*env)->NewStringUTF(env, cURI);
  428. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, JAVA_NET_URI,
  429. "create", "(Ljava/lang/String;)Ljava/net/URI;",
  430. jURIString) != 0) {
  431. ret = errnoFromException(jExc, env, "java.net.URI::create");
  432. goto done;
  433. }
  434. jURI = jVal.l;
  435. }
  436. if (bld->kerbTicketCachePath) {
  437. ret = hadoopConfSet(env, jConfiguration,
  438. KERBEROS_TICKET_CACHE_PATH, bld->kerbTicketCachePath);
  439. if (ret)
  440. goto done;
  441. }
  442. if (bld->userName) {
  443. jUserString = (*env)->NewStringUTF(env, bld->userName);
  444. }
  445. if (bld->forceNewInstance) {
  446. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
  447. HADOOP_FS, "newInstance",
  448. JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF),
  449. JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)), jURI,
  450. jConfiguration, jUserString)) {
  451. ret = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  452. "Filesystem::newInstance(URI, Configuration)");
  453. goto done;
  454. }
  455. jFS = jVal.l;
  456. } else {
  457. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "get",
  458. JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF),
  459. JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)),
  460. jURI, jConfiguration, jUserString)) {
  461. ret = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  462. "Filesystem::get(URI, Configuration, String)");
  463. goto done;
  464. }
  465. jFS = jVal.l;
  466. }
  467. }
  468. done:
  469. if (jFS) {
  470. /* Create a global reference for this fs */
  471. gFsRef = (*env)->NewGlobalRef(env, jFS);
  472. }
  473. // Release unnecessary local references
  474. destroyLocalReference(env, jConfiguration);
  475. destroyLocalReference(env, jFS);
  476. destroyLocalReference(env, jURI);
  477. destroyLocalReference(env, jCachePath);
  478. destroyLocalReference(env, jURIString);
  479. destroyLocalReference(env, jUserString);
  480. free(cURI);
  481. free(bld);
  482. if (ret)
  483. errno = ret;
  484. return gFsRef;
  485. }
  486. int hdfsDisconnect(hdfsFS fs)
  487. {
  488. // JAVA EQUIVALENT:
  489. // fs.close()
  490. //Get the JNIEnv* corresponding to current thread
  491. JNIEnv* env = getJNIEnv();
  492. if (env == NULL) {
  493. errno = EINTERNAL;
  494. return -2;
  495. }
  496. //Parameters
  497. jobject jFS = (jobject)fs;
  498. //Caught exception
  499. jthrowable jExc = NULL;
  500. //Sanity check
  501. if (fs == NULL) {
  502. errno = EBADF;
  503. return -1;
  504. }
  505. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  506. "close", "()V") != 0) {
  507. errno = errnoFromException(jExc, env, "Filesystem::close");
  508. return -1;
  509. }
  510. //Release unnecessary references
  511. (*env)->DeleteGlobalRef(env, fs);
  512. return 0;
  513. }
  514. hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
  515. int bufferSize, short replication, tSize blockSize)
  516. {
  517. /*
  518. JAVA EQUIVALENT:
  519. File f = new File(path);
  520. FSData{Input|Output}Stream f{is|os} = fs.create(f);
  521. return f{is|os};
  522. */
  523. /* Get the JNIEnv* corresponding to current thread */
  524. JNIEnv* env = getJNIEnv();
  525. if (env == NULL) {
  526. errno = EINTERNAL;
  527. return NULL;
  528. }
  529. jobject jFS = (jobject)fs;
  530. if (flags & O_RDWR) {
  531. fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n");
  532. errno = ENOTSUP;
  533. return NULL;
  534. }
  535. if ((flags & O_CREAT) && (flags & O_EXCL)) {
  536. fprintf(stderr, "WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
  537. }
  538. /* The hadoop java api/signature */
  539. const char* method = ((flags & O_WRONLY) == 0) ? "open" : (flags & O_APPEND) ? "append" : "create";
  540. const char* signature = ((flags & O_WRONLY) == 0) ?
  541. JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM)) :
  542. (flags & O_APPEND) ?
  543. JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_OSTRM)) :
  544. JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
  545. /* Return value */
  546. hdfsFile file = NULL;
  547. /* Create an object of org.apache.hadoop.fs.Path */
  548. jobject jPath = constructNewObjectOfPath(env, path);
  549. if (jPath == NULL) {
  550. return NULL;
  551. }
  552. /* Get the Configuration object from the FileSystem object */
  553. jvalue jVal;
  554. jobject jConfiguration = NULL;
  555. jthrowable jExc = NULL;
  556. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  557. "getConf", JMETHOD1("", JPARAM(HADOOP_CONF))) != 0) {
  558. errno = errnoFromException(jExc, env, "get configuration object "
  559. "from filesystem");
  560. destroyLocalReference(env, jPath);
  561. return NULL;
  562. }
  563. jConfiguration = jVal.l;
  564. jint jBufferSize = bufferSize;
  565. jshort jReplication = replication;
  566. jlong jBlockSize = blockSize;
  567. jstring jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size");
  568. jstring jStrReplication = (*env)->NewStringUTF(env, "dfs.replication");
  569. jstring jStrBlockSize = (*env)->NewStringUTF(env, "dfs.block.size");
  570. //bufferSize
  571. if (!bufferSize) {
  572. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  573. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  574. jStrBufferSize, 4096) != 0) {
  575. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  576. "Configuration::getInt");
  577. goto done;
  578. }
  579. jBufferSize = jVal.i;
  580. }
  581. if ((flags & O_WRONLY) && (flags & O_APPEND) == 0) {
  582. //replication
  583. if (!replication) {
  584. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  585. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  586. jStrReplication, 1) != 0) {
  587. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  588. "Configuration::getInt");
  589. goto done;
  590. }
  591. jReplication = jVal.i;
  592. }
  593. //blockSize
  594. if (!blockSize) {
  595. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  596. HADOOP_CONF, "getLong", "(Ljava/lang/String;J)J",
  597. jStrBlockSize, (jlong)67108864)) {
  598. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  599. "FileSystem::%s(%s)", method,
  600. signature);
  601. goto done;
  602. }
  603. jBlockSize = jVal.j;
  604. }
  605. }
  606. /* Create and return either the FSDataInputStream or
  607. FSDataOutputStream references jobject jStream */
  608. // READ?
  609. if ((flags & O_WRONLY) == 0) {
  610. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  611. method, signature, jPath, jBufferSize)) {
  612. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  613. "FileSystem::%s(%s)", method,
  614. signature);
  615. goto done;
  616. }
  617. } else if ((flags & O_WRONLY) && (flags & O_APPEND)) {
  618. // WRITE/APPEND?
  619. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  620. method, signature, jPath)) {
  621. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  622. "FileSystem::%s(%s)", method,
  623. signature);
  624. goto done;
  625. }
  626. } else {
  627. // WRITE/CREATE
  628. jboolean jOverWrite = 1;
  629. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  630. method, signature, jPath, jOverWrite,
  631. jBufferSize, jReplication, jBlockSize)) {
  632. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  633. "FileSystem::%s(%s)", method,
  634. signature);
  635. goto done;
  636. }
  637. }
  638. file = malloc(sizeof(struct hdfsFile_internal));
  639. if (!file) {
  640. errno = ENOMEM;
  641. } else {
  642. file->file = (*env)->NewGlobalRef(env, jVal.l);
  643. file->type = (((flags & O_WRONLY) == 0) ? INPUT : OUTPUT);
  644. file->flags = 0;
  645. destroyLocalReference(env, jVal.l);
  646. if ((flags & O_WRONLY) == 0) {
  647. // Try a test read to see if we can do direct reads
  648. errno = 0;
  649. char buf;
  650. if (readDirect(fs, file, &buf, 0) == 0) {
  651. // Success - 0-byte read should return 0
  652. file->flags |= HDFS_FILE_SUPPORTS_DIRECT_READ;
  653. } else {
  654. if (errno != ENOTSUP) {
  655. // Unexpected error. Clear it, don't set the direct flag.
  656. fprintf(stderr,
  657. "WARN: Unexpected error %d when testing "
  658. "for direct read compatibility\n", errno);
  659. errno = 0;
  660. goto done;
  661. }
  662. }
  663. errno = 0;
  664. }
  665. }
  666. done:
  667. //Delete unnecessary local references
  668. destroyLocalReference(env, jStrBufferSize);
  669. destroyLocalReference(env, jStrReplication);
  670. destroyLocalReference(env, jStrBlockSize);
  671. destroyLocalReference(env, jConfiguration);
  672. destroyLocalReference(env, jPath);
  673. return file;
  674. }
  675. int hdfsCloseFile(hdfsFS fs, hdfsFile file)
  676. {
  677. // JAVA EQUIVALENT:
  678. // file.close
  679. //Get the JNIEnv* corresponding to current thread
  680. JNIEnv* env = getJNIEnv();
  681. if (env == NULL) {
  682. errno = EINTERNAL;
  683. return -2;
  684. }
  685. //Parameters
  686. jobject jStream = (jobject)(file ? file->file : NULL);
  687. //Caught exception
  688. jthrowable jExc = NULL;
  689. //Sanity check
  690. if (!file || file->type == UNINITIALIZED) {
  691. errno = EBADF;
  692. return -1;
  693. }
  694. //The interface whose 'close' method to be called
  695. const char* interface = (file->type == INPUT) ?
  696. HADOOP_ISTRM : HADOOP_OSTRM;
  697. if (invokeMethod(env, NULL, &jExc, INSTANCE, jStream, interface,
  698. "close", "()V") != 0) {
  699. errno = errnoFromException(jExc, env, "%s::close", interface);
  700. return -1;
  701. }
  702. //De-allocate memory
  703. free(file);
  704. (*env)->DeleteGlobalRef(env, jStream);
  705. return 0;
  706. }
  707. int hdfsExists(hdfsFS fs, const char *path)
  708. {
  709. JNIEnv *env = getJNIEnv();
  710. if (env == NULL) {
  711. errno = EINTERNAL;
  712. return -2;
  713. }
  714. jobject jPath = constructNewObjectOfPath(env, path);
  715. jvalue jVal;
  716. jthrowable jExc = NULL;
  717. jobject jFS = (jobject)fs;
  718. if (jPath == NULL) {
  719. return -1;
  720. }
  721. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  722. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
  723. jPath) != 0) {
  724. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  725. "FileSystem::exists");
  726. destroyLocalReference(env, jPath);
  727. return -1;
  728. }
  729. destroyLocalReference(env, jPath);
  730. return jVal.z ? 0 : -1;
  731. }
  732. // Checks input file for readiness for reading.
  733. static int readPrepare(JNIEnv* env, hdfsFS fs, hdfsFile f,
  734. jobject* jInputStream)
  735. {
  736. *jInputStream = (jobject)(f ? f->file : NULL);
  737. //Sanity check
  738. if (!f || f->type == UNINITIALIZED) {
  739. errno = EBADF;
  740. return -1;
  741. }
  742. //Error checking... make sure that this file is 'readable'
  743. if (f->type != INPUT) {
  744. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  745. errno = EINVAL;
  746. return -1;
  747. }
  748. return 0;
  749. }
  750. // Common error-handling code between read paths.
  751. static int handleReadResult(int success, jvalue jVal, jthrowable jExc,
  752. JNIEnv* env)
  753. {
  754. int noReadBytes;
  755. if (success != 0) {
  756. if ((*env)->ExceptionCheck(env)) {
  757. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  758. "FSDataInputStream::read");
  759. }
  760. noReadBytes = -1;
  761. } else {
  762. noReadBytes = jVal.i;
  763. if (noReadBytes < 0) {
  764. // -1 from Java is EOF, which is 0 here
  765. noReadBytes = 0;
  766. }
  767. errno = 0;
  768. }
  769. return noReadBytes;
  770. }
  771. tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
  772. {
  773. if (f->flags & HDFS_FILE_SUPPORTS_DIRECT_READ) {
  774. return readDirect(fs, f, buffer, length);
  775. }
  776. // JAVA EQUIVALENT:
  777. // byte [] bR = new byte[length];
  778. // fis.read(bR);
  779. //Get the JNIEnv* corresponding to current thread
  780. JNIEnv* env = getJNIEnv();
  781. if (env == NULL) {
  782. errno = EINTERNAL;
  783. return -1;
  784. }
  785. //Parameters
  786. jobject jInputStream;
  787. if (readPrepare(env, fs, f, &jInputStream) == -1) {
  788. return -1;
  789. }
  790. jbyteArray jbRarray;
  791. jint noReadBytes = 0;
  792. jvalue jVal;
  793. jthrowable jExc = NULL;
  794. //Read the requisite bytes
  795. jbRarray = (*env)->NewByteArray(env, length);
  796. int success = invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  797. "read", "([B)I", jbRarray);
  798. noReadBytes = handleReadResult(success, jVal, jExc, env);;
  799. if (noReadBytes > 0) {
  800. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  801. }
  802. destroyLocalReference(env, jbRarray);
  803. return noReadBytes;
  804. }
  805. // Reads using the read(ByteBuffer) API, which does fewer copies
  806. tSize readDirect(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
  807. {
  808. // JAVA EQUIVALENT:
  809. // ByteBuffer bbuffer = ByteBuffer.allocateDirect(length) // wraps C buffer
  810. // fis.read(bbuffer);
  811. //Get the JNIEnv* corresponding to current thread
  812. JNIEnv* env = getJNIEnv();
  813. if (env == NULL) {
  814. errno = EINTERNAL;
  815. return -1;
  816. }
  817. jobject jInputStream;
  818. if (readPrepare(env, fs, f, &jInputStream) == -1) {
  819. return -1;
  820. }
  821. jint noReadBytes = 0;
  822. jvalue jVal;
  823. jthrowable jExc = NULL;
  824. //Read the requisite bytes
  825. jobject bb = (*env)->NewDirectByteBuffer(env, buffer, length);
  826. if (bb == NULL) {
  827. fprintf(stderr, "Could not allocate ByteBuffer");
  828. if ((*env)->ExceptionCheck(env)) {
  829. errno = errnoFromException(NULL, env, "JNIEnv::NewDirectByteBuffer");
  830. } else {
  831. errno = ENOMEM; // Best guess if there's no exception waiting
  832. }
  833. return -1;
  834. }
  835. int success = invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream,
  836. HADOOP_ISTRM, "read", "(Ljava/nio/ByteBuffer;)I",
  837. bb);
  838. noReadBytes = handleReadResult(success, jVal, jExc, env);
  839. destroyLocalReference(env, bb);
  840. return noReadBytes;
  841. }
  842. tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
  843. void* buffer, tSize length)
  844. {
  845. // JAVA EQUIVALENT:
  846. // byte [] bR = new byte[length];
  847. // fis.read(pos, bR, 0, length);
  848. //Get the JNIEnv* corresponding to current thread
  849. JNIEnv* env = getJNIEnv();
  850. if (env == NULL) {
  851. errno = EINTERNAL;
  852. return -1;
  853. }
  854. //Parameters
  855. jobject jInputStream = (jobject)(f ? f->file : NULL);
  856. jbyteArray jbRarray;
  857. jint noReadBytes = 0;
  858. jvalue jVal;
  859. jthrowable jExc = NULL;
  860. //Sanity check
  861. if (!f || f->type == UNINITIALIZED) {
  862. errno = EBADF;
  863. return -1;
  864. }
  865. //Error checking... make sure that this file is 'readable'
  866. if (f->type != INPUT) {
  867. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  868. errno = EINVAL;
  869. return -1;
  870. }
  871. //Read the requisite bytes
  872. jbRarray = (*env)->NewByteArray(env, length);
  873. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  874. "read", "(J[BII)I", position, jbRarray, 0, length) != 0) {
  875. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  876. "FSDataInputStream::read");
  877. noReadBytes = -1;
  878. }
  879. else {
  880. noReadBytes = jVal.i;
  881. if (noReadBytes > 0) {
  882. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  883. } else {
  884. //This is a valid case: there aren't any bytes left to read!
  885. if (noReadBytes == 0 || noReadBytes < -1) {
  886. fprintf(stderr, "WARN: FSDataInputStream.read returned invalid return code - libhdfs returning EOF, i.e., 0: %d\n", noReadBytes);
  887. }
  888. noReadBytes = 0;
  889. }
  890. errno = 0;
  891. }
  892. destroyLocalReference(env, jbRarray);
  893. return noReadBytes;
  894. }
  895. tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
  896. {
  897. // JAVA EQUIVALENT
  898. // byte b[] = str.getBytes();
  899. // fso.write(b);
  900. //Get the JNIEnv* corresponding to current thread
  901. JNIEnv* env = getJNIEnv();
  902. if (env == NULL) {
  903. errno = EINTERNAL;
  904. return -1;
  905. }
  906. //Parameters
  907. jobject jOutputStream = (jobject)(f ? f->file : 0);
  908. jbyteArray jbWarray;
  909. //Caught exception
  910. jthrowable jExc = NULL;
  911. //Sanity check
  912. if (!f || f->type == UNINITIALIZED) {
  913. errno = EBADF;
  914. return -1;
  915. }
  916. if (length < 0) {
  917. errno = EINVAL;
  918. return -1;
  919. }
  920. //Error checking... make sure that this file is 'writable'
  921. if (f->type != OUTPUT) {
  922. fprintf(stderr, "Cannot write into a non-OutputStream object!\n");
  923. errno = EINVAL;
  924. return -1;
  925. }
  926. // 'length' equals 'zero' is a valid use-case according to Posix!
  927. if (length != 0) {
  928. //Write the requisite bytes into the file
  929. jbWarray = (*env)->NewByteArray(env, length);
  930. (*env)->SetByteArrayRegion(env, jbWarray, 0, length, buffer);
  931. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  932. HADOOP_OSTRM, "write",
  933. "([B)V", jbWarray) != 0) {
  934. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  935. "FSDataOutputStream::write");
  936. length = -1;
  937. }
  938. destroyLocalReference(env, jbWarray);
  939. }
  940. //Return no. of bytes succesfully written (libc way)
  941. //i.e. 'length' itself! ;-)
  942. return length;
  943. }
  944. int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
  945. {
  946. // JAVA EQUIVALENT
  947. // fis.seek(pos);
  948. //Get the JNIEnv* corresponding to current thread
  949. JNIEnv* env = getJNIEnv();
  950. if (env == NULL) {
  951. errno = EINTERNAL;
  952. return -1;
  953. }
  954. //Parameters
  955. jobject jInputStream = (jobject)(f ? f->file : 0);
  956. //Caught exception
  957. jthrowable jExc = NULL;
  958. //Sanity check
  959. if (!f || f->type != INPUT) {
  960. errno = EBADF;
  961. return -1;
  962. }
  963. if (invokeMethod(env, NULL, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  964. "seek", "(J)V", desiredPos) != 0) {
  965. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  966. "FSDataInputStream::seek");
  967. return -1;
  968. }
  969. return 0;
  970. }
  971. tOffset hdfsTell(hdfsFS fs, hdfsFile f)
  972. {
  973. // JAVA EQUIVALENT
  974. // pos = f.getPos();
  975. //Get the JNIEnv* corresponding to current thread
  976. JNIEnv* env = getJNIEnv();
  977. if (env == NULL) {
  978. errno = EINTERNAL;
  979. return -1;
  980. }
  981. //Parameters
  982. jobject jStream = (jobject)(f ? f->file : 0);
  983. //Sanity check
  984. if (!f || f->type == UNINITIALIZED) {
  985. errno = EBADF;
  986. return -1;
  987. }
  988. const char* interface = (f->type == INPUT) ?
  989. HADOOP_ISTRM : HADOOP_OSTRM;
  990. jlong currentPos = -1;
  991. jvalue jVal;
  992. jthrowable jExc = NULL;
  993. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStream,
  994. interface, "getPos", "()J") != 0) {
  995. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  996. "FSDataInputStream::getPos");
  997. return -1;
  998. }
  999. currentPos = jVal.j;
  1000. return (tOffset)currentPos;
  1001. }
  1002. int hdfsFlush(hdfsFS fs, hdfsFile f)
  1003. {
  1004. // JAVA EQUIVALENT
  1005. // fos.flush();
  1006. //Get the JNIEnv* corresponding to current thread
  1007. JNIEnv* env = getJNIEnv();
  1008. if (env == NULL) {
  1009. errno = EINTERNAL;
  1010. return -1;
  1011. }
  1012. //Parameters
  1013. jobject jOutputStream = (jobject)(f ? f->file : 0);
  1014. //Caught exception
  1015. jthrowable jExc = NULL;
  1016. //Sanity check
  1017. if (!f || f->type != OUTPUT) {
  1018. errno = EBADF;
  1019. return -1;
  1020. }
  1021. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  1022. HADOOP_OSTRM, "flush", "()V") != 0) {
  1023. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1024. "FSDataInputStream::flush");
  1025. return -1;
  1026. }
  1027. return 0;
  1028. }
  1029. int hdfsHFlush(hdfsFS fs, hdfsFile f)
  1030. {
  1031. //Get the JNIEnv* corresponding to current thread
  1032. JNIEnv* env = getJNIEnv();
  1033. if (env == NULL) {
  1034. errno = EINTERNAL;
  1035. return -1;
  1036. }
  1037. //Parameters
  1038. jobject jOutputStream = (jobject)(f ? f->file : 0);
  1039. //Caught exception
  1040. jthrowable jExc = NULL;
  1041. //Sanity check
  1042. if (!f || f->type != OUTPUT) {
  1043. errno = EBADF;
  1044. return -1;
  1045. }
  1046. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  1047. HADOOP_OSTRM, "hflush", "()V") != 0) {
  1048. errno = errnoFromException(jExc, env, HADOOP_OSTRM "::hflush");
  1049. return -1;
  1050. }
  1051. return 0;
  1052. }
  1053. int hdfsAvailable(hdfsFS fs, hdfsFile f)
  1054. {
  1055. // JAVA EQUIVALENT
  1056. // fis.available();
  1057. //Get the JNIEnv* corresponding to current thread
  1058. JNIEnv* env = getJNIEnv();
  1059. if (env == NULL) {
  1060. errno = EINTERNAL;
  1061. return -1;
  1062. }
  1063. //Parameters
  1064. jobject jInputStream = (jobject)(f ? f->file : 0);
  1065. //Caught exception
  1066. jthrowable jExc = NULL;
  1067. //Sanity check
  1068. if (!f || f->type != INPUT) {
  1069. errno = EBADF;
  1070. return -1;
  1071. }
  1072. jint available = -1;
  1073. jvalue jVal;
  1074. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream,
  1075. HADOOP_ISTRM, "available", "()I") != 0) {
  1076. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1077. "FSDataInputStream::available");
  1078. return -1;
  1079. }
  1080. available = jVal.i;
  1081. return available;
  1082. }
  1083. int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  1084. {
  1085. //JAVA EQUIVALENT
  1086. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath,
  1087. // deleteSource = false, conf)
  1088. //Get the JNIEnv* corresponding to current thread
  1089. JNIEnv* env = getJNIEnv();
  1090. if (env == NULL) {
  1091. errno = EINTERNAL;
  1092. return -1;
  1093. }
  1094. //Parameters
  1095. jobject jSrcFS = (jobject)srcFS;
  1096. jobject jDstFS = (jobject)dstFS;
  1097. jobject jSrcPath = NULL;
  1098. jobject jDstPath = NULL;
  1099. jSrcPath = constructNewObjectOfPath(env, src);
  1100. if (jSrcPath == NULL) {
  1101. return -1;
  1102. }
  1103. jDstPath = constructNewObjectOfPath(env, dst);
  1104. if (jDstPath == NULL) {
  1105. destroyLocalReference(env, jSrcPath);
  1106. return -1;
  1107. }
  1108. int retval = 0;
  1109. //Create the org.apache.hadoop.conf.Configuration object
  1110. jobject jConfiguration =
  1111. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  1112. if (jConfiguration == NULL) {
  1113. fprintf(stderr, "Can't construct instance of class "
  1114. "org.apache.hadoop.conf.Configuration\n");
  1115. errno = EINTERNAL;
  1116. destroyLocalReference(env, jSrcPath);
  1117. destroyLocalReference(env, jDstPath);
  1118. return -1;
  1119. }
  1120. //FileUtil::copy
  1121. jboolean deleteSource = 0; //Only copy
  1122. jvalue jVal;
  1123. jthrowable jExc = NULL;
  1124. if (invokeMethod(env, &jVal, &jExc, STATIC,
  1125. NULL, "org/apache/hadoop/fs/FileUtil", "copy",
  1126. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  1127. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  1128. jConfiguration) != 0) {
  1129. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1130. "FileUtil::copy");
  1131. retval = -1;
  1132. goto done;
  1133. }
  1134. done:
  1135. //Delete unnecessary local references
  1136. destroyLocalReference(env, jConfiguration);
  1137. destroyLocalReference(env, jSrcPath);
  1138. destroyLocalReference(env, jDstPath);
  1139. return retval;
  1140. }
  1141. int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  1142. {
  1143. //JAVA EQUIVALENT
  1144. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath,
  1145. // deleteSource = true, conf)
  1146. //Get the JNIEnv* corresponding to current thread
  1147. JNIEnv* env = getJNIEnv();
  1148. if (env == NULL) {
  1149. errno = EINTERNAL;
  1150. return -1;
  1151. }
  1152. //Parameters
  1153. jobject jSrcFS = (jobject)srcFS;
  1154. jobject jDstFS = (jobject)dstFS;
  1155. jobject jSrcPath = NULL;
  1156. jobject jDstPath = NULL;
  1157. jSrcPath = constructNewObjectOfPath(env, src);
  1158. if (jSrcPath == NULL) {
  1159. return -1;
  1160. }
  1161. jDstPath = constructNewObjectOfPath(env, dst);
  1162. if (jDstPath == NULL) {
  1163. destroyLocalReference(env, jSrcPath);
  1164. return -1;
  1165. }
  1166. int retval = 0;
  1167. //Create the org.apache.hadoop.conf.Configuration object
  1168. jobject jConfiguration =
  1169. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  1170. if (jConfiguration == NULL) {
  1171. fprintf(stderr, "Can't construct instance of class "
  1172. "org.apache.hadoop.conf.Configuration\n");
  1173. errno = EINTERNAL;
  1174. destroyLocalReference(env, jSrcPath);
  1175. destroyLocalReference(env, jDstPath);
  1176. return -1;
  1177. }
  1178. //FileUtil::copy
  1179. jboolean deleteSource = 1; //Delete src after copy
  1180. jvalue jVal;
  1181. jthrowable jExc = NULL;
  1182. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
  1183. "org/apache/hadoop/fs/FileUtil", "copy",
  1184. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  1185. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  1186. jConfiguration) != 0) {
  1187. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1188. "FileUtil::copy(move)");
  1189. retval = -1;
  1190. goto done;
  1191. }
  1192. done:
  1193. //Delete unnecessary local references
  1194. destroyLocalReference(env, jConfiguration);
  1195. destroyLocalReference(env, jSrcPath);
  1196. destroyLocalReference(env, jDstPath);
  1197. return retval;
  1198. }
  1199. int hdfsDelete(hdfsFS fs, const char* path, int recursive)
  1200. {
  1201. // JAVA EQUIVALENT:
  1202. // File f = new File(path);
  1203. // bool retval = fs.delete(f);
  1204. //Get the JNIEnv* corresponding to current thread
  1205. JNIEnv* env = getJNIEnv();
  1206. if (env == NULL) {
  1207. errno = EINTERNAL;
  1208. return -1;
  1209. }
  1210. jobject jFS = (jobject)fs;
  1211. //Create an object of java.io.File
  1212. jobject jPath = constructNewObjectOfPath(env, path);
  1213. if (jPath == NULL) {
  1214. return -1;
  1215. }
  1216. //Delete the file
  1217. jvalue jVal;
  1218. jthrowable jExc = NULL;
  1219. jboolean jRecursive = recursive;
  1220. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1221. "delete", "(Lorg/apache/hadoop/fs/Path;Z)Z",
  1222. jPath, jRecursive) != 0) {
  1223. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1224. "FileSystem::delete");
  1225. destroyLocalReference(env, jPath);
  1226. return -1;
  1227. }
  1228. //Delete unnecessary local references
  1229. destroyLocalReference(env, jPath);
  1230. return (jVal.z) ? 0 : -1;
  1231. }
  1232. int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
  1233. {
  1234. // JAVA EQUIVALENT:
  1235. // Path old = new Path(oldPath);
  1236. // Path new = new Path(newPath);
  1237. // fs.rename(old, new);
  1238. //Get the JNIEnv* corresponding to current thread
  1239. JNIEnv* env = getJNIEnv();
  1240. if (env == NULL) {
  1241. errno = EINTERNAL;
  1242. return -1;
  1243. }
  1244. jobject jFS = (jobject)fs;
  1245. //Create objects of org.apache.hadoop.fs.Path
  1246. jobject jOldPath = NULL;
  1247. jobject jNewPath = NULL;
  1248. jOldPath = constructNewObjectOfPath(env, oldPath);
  1249. if (jOldPath == NULL) {
  1250. return -1;
  1251. }
  1252. jNewPath = constructNewObjectOfPath(env, newPath);
  1253. if (jNewPath == NULL) {
  1254. destroyLocalReference(env, jOldPath);
  1255. return -1;
  1256. }
  1257. //Rename the file
  1258. jvalue jVal;
  1259. jthrowable jExc = NULL;
  1260. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, "rename",
  1261. JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_PATH), "Z"),
  1262. jOldPath, jNewPath) != 0) {
  1263. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1264. "FileSystem::rename");
  1265. destroyLocalReference(env, jOldPath);
  1266. destroyLocalReference(env, jNewPath);
  1267. return -1;
  1268. }
  1269. //Delete unnecessary local references
  1270. destroyLocalReference(env, jOldPath);
  1271. destroyLocalReference(env, jNewPath);
  1272. return (jVal.z) ? 0 : -1;
  1273. }
  1274. char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
  1275. {
  1276. // JAVA EQUIVALENT:
  1277. // Path p = fs.getWorkingDirectory();
  1278. // return p.toString()
  1279. //Get the JNIEnv* corresponding to current thread
  1280. JNIEnv* env = getJNIEnv();
  1281. if (env == NULL) {
  1282. errno = EINTERNAL;
  1283. return NULL;
  1284. }
  1285. jobject jFS = (jobject)fs;
  1286. jobject jPath = NULL;
  1287. jvalue jVal;
  1288. jthrowable jExc = NULL;
  1289. //FileSystem::getWorkingDirectory()
  1290. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
  1291. HADOOP_FS, "getWorkingDirectory",
  1292. "()Lorg/apache/hadoop/fs/Path;") != 0 ||
  1293. jVal.l == NULL) {
  1294. errno = errnoFromException(jExc, env, "FileSystem::"
  1295. "getWorkingDirectory");
  1296. return NULL;
  1297. }
  1298. jPath = jVal.l;
  1299. //Path::toString()
  1300. jstring jPathString;
  1301. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath,
  1302. "org/apache/hadoop/fs/Path", "toString",
  1303. "()Ljava/lang/String;") != 0) {
  1304. errno = errnoFromException(jExc, env, "Path::toString");
  1305. destroyLocalReference(env, jPath);
  1306. return NULL;
  1307. }
  1308. jPathString = jVal.l;
  1309. const char *jPathChars = (const char*)
  1310. ((*env)->GetStringUTFChars(env, jPathString, NULL));
  1311. //Copy to user-provided buffer
  1312. strncpy(buffer, jPathChars, bufferSize);
  1313. //Delete unnecessary local references
  1314. (*env)->ReleaseStringUTFChars(env, jPathString, jPathChars);
  1315. destroyLocalReference(env, jPathString);
  1316. destroyLocalReference(env, jPath);
  1317. return buffer;
  1318. }
  1319. int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
  1320. {
  1321. // JAVA EQUIVALENT:
  1322. // fs.setWorkingDirectory(Path(path));
  1323. //Get the JNIEnv* corresponding to current thread
  1324. JNIEnv* env = getJNIEnv();
  1325. if (env == NULL) {
  1326. errno = EINTERNAL;
  1327. return -1;
  1328. }
  1329. jobject jFS = (jobject)fs;
  1330. int retval = 0;
  1331. jthrowable jExc = NULL;
  1332. //Create an object of org.apache.hadoop.fs.Path
  1333. jobject jPath = constructNewObjectOfPath(env, path);
  1334. if (jPath == NULL) {
  1335. return -1;
  1336. }
  1337. //FileSystem::setWorkingDirectory()
  1338. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1339. "setWorkingDirectory",
  1340. "(Lorg/apache/hadoop/fs/Path;)V", jPath) != 0) {
  1341. errno = errnoFromException(jExc, env, "FileSystem::"
  1342. "setWorkingDirectory");
  1343. retval = -1;
  1344. }
  1345. //Delete unnecessary local references
  1346. destroyLocalReference(env, jPath);
  1347. return retval;
  1348. }
  1349. int hdfsCreateDirectory(hdfsFS fs, const char* path)
  1350. {
  1351. // JAVA EQUIVALENT:
  1352. // fs.mkdirs(new Path(path));
  1353. //Get the JNIEnv* corresponding to current thread
  1354. JNIEnv* env = getJNIEnv();
  1355. if (env == NULL) {
  1356. errno = EINTERNAL;
  1357. return -1;
  1358. }
  1359. jobject jFS = (jobject)fs;
  1360. //Create an object of org.apache.hadoop.fs.Path
  1361. jobject jPath = constructNewObjectOfPath(env, path);
  1362. if (jPath == NULL) {
  1363. return -1;
  1364. }
  1365. //Create the directory
  1366. jvalue jVal;
  1367. jVal.z = 0;
  1368. jthrowable jExc = NULL;
  1369. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1370. "mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z",
  1371. jPath) != 0) {
  1372. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1373. "FileSystem::mkdirs");
  1374. goto done;
  1375. }
  1376. done:
  1377. //Delete unnecessary local references
  1378. destroyLocalReference(env, jPath);
  1379. return (jVal.z) ? 0 : -1;
  1380. }
  1381. int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
  1382. {
  1383. // JAVA EQUIVALENT:
  1384. // fs.setReplication(new Path(path), replication);
  1385. //Get the JNIEnv* corresponding to current thread
  1386. JNIEnv* env = getJNIEnv();
  1387. if (env == NULL) {
  1388. errno = EINTERNAL;
  1389. return -1;
  1390. }
  1391. jobject jFS = (jobject)fs;
  1392. //Create an object of org.apache.hadoop.fs.Path
  1393. jobject jPath = constructNewObjectOfPath(env, path);
  1394. if (jPath == NULL) {
  1395. return -1;
  1396. }
  1397. //Create the directory
  1398. jvalue jVal;
  1399. jthrowable jExc = NULL;
  1400. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1401. "setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
  1402. jPath, replication) != 0) {
  1403. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1404. "FileSystem::setReplication");
  1405. goto done;
  1406. }
  1407. done:
  1408. //Delete unnecessary local references
  1409. destroyLocalReference(env, jPath);
  1410. return (jVal.z) ? 0 : -1;
  1411. }
  1412. int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
  1413. {
  1414. // JAVA EQUIVALENT:
  1415. // fs.setOwner(path, owner, group)
  1416. //Get the JNIEnv* corresponding to current thread
  1417. JNIEnv* env = getJNIEnv();
  1418. if (env == NULL) {
  1419. errno = EINTERNAL;
  1420. return -1;
  1421. }
  1422. if (owner == NULL && group == NULL) {
  1423. fprintf(stderr, "Both owner and group cannot be null in chown");
  1424. errno = EINVAL;
  1425. return -1;
  1426. }
  1427. jobject jFS = (jobject)fs;
  1428. jobject jPath = constructNewObjectOfPath(env, path);
  1429. if (jPath == NULL) {
  1430. return -1;
  1431. }
  1432. jstring jOwnerString = (*env)->NewStringUTF(env, owner);
  1433. jstring jGroupString = (*env)->NewStringUTF(env, group);
  1434. //Create the directory
  1435. int ret = 0;
  1436. jthrowable jExc = NULL;
  1437. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1438. "setOwner", JMETHOD3(JPARAM(HADOOP_PATH), JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID),
  1439. jPath, jOwnerString, jGroupString) != 0) {
  1440. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1441. "FileSystem::setOwner");
  1442. ret = -1;
  1443. goto done;
  1444. }
  1445. done:
  1446. destroyLocalReference(env, jPath);
  1447. destroyLocalReference(env, jOwnerString);
  1448. destroyLocalReference(env, jGroupString);
  1449. return ret;
  1450. }
  1451. int hdfsChmod(hdfsFS fs, const char* path, short mode)
  1452. {
  1453. // JAVA EQUIVALENT:
  1454. // fs.setPermission(path, FsPermission)
  1455. //Get the JNIEnv* corresponding to current thread
  1456. JNIEnv* env = getJNIEnv();
  1457. if (env == NULL) {
  1458. errno = EINTERNAL;
  1459. return -1;
  1460. }
  1461. jobject jFS = (jobject)fs;
  1462. // construct jPerm = FsPermission.createImmutable(short mode);
  1463. jshort jmode = mode;
  1464. jobject jPermObj =
  1465. constructNewObjectOfClass(env, NULL, HADOOP_FSPERM,"(S)V",jmode);
  1466. if (jPermObj == NULL) {
  1467. return -2;
  1468. }
  1469. //Create an object of org.apache.hadoop.fs.Path
  1470. jobject jPath = constructNewObjectOfPath(env, path);
  1471. if (jPath == NULL) {
  1472. destroyLocalReference(env, jPermObj);
  1473. return -3;
  1474. }
  1475. //Create the directory
  1476. int ret = 0;
  1477. jthrowable jExc = NULL;
  1478. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1479. "setPermission", JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSPERM), JAVA_VOID),
  1480. jPath, jPermObj) != 0) {
  1481. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1482. "FileSystem::setPermission");
  1483. ret = -1;
  1484. goto done;
  1485. }
  1486. done:
  1487. destroyLocalReference(env, jPath);
  1488. destroyLocalReference(env, jPermObj);
  1489. return ret;
  1490. }
  1491. int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
  1492. {
  1493. // JAVA EQUIVALENT:
  1494. // fs.setTimes(src, mtime, atime)
  1495. //Get the JNIEnv* corresponding to current thread
  1496. JNIEnv* env = getJNIEnv();
  1497. if (env == NULL) {
  1498. errno = EINTERNAL;
  1499. return -1;
  1500. }
  1501. jobject jFS = (jobject)fs;
  1502. //Create an object of org.apache.hadoop.fs.Path
  1503. jobject jPath = constructNewObjectOfPath(env, path);
  1504. if (jPath == NULL) {
  1505. fprintf(stderr, "could not construct path object\n");
  1506. return -2;
  1507. }
  1508. const tTime NO_CHANGE = -1;
  1509. jlong jmtime = (mtime == NO_CHANGE) ? -1 : (mtime * (jlong)1000);
  1510. jlong jatime = (atime == NO_CHANGE) ? -1 : (atime * (jlong)1000);
  1511. int ret = 0;
  1512. jthrowable jExc = NULL;
  1513. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1514. "setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J", JAVA_VOID),
  1515. jPath, jmtime, jatime) != 0) {
  1516. fprintf(stderr, "call to setTime failed\n");
  1517. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1518. "FileSystem::setTimes");
  1519. ret = -1;
  1520. goto done;
  1521. }
  1522. done:
  1523. destroyLocalReference(env, jPath);
  1524. return ret;
  1525. }
  1526. char***
  1527. hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
  1528. {
  1529. // JAVA EQUIVALENT:
  1530. // fs.getFileBlockLoctions(new Path(path), start, length);
  1531. //Get the JNIEnv* corresponding to current thread
  1532. JNIEnv* env = getJNIEnv();
  1533. if (env == NULL) {
  1534. errno = EINTERNAL;
  1535. return NULL;
  1536. }
  1537. jobject jFS = (jobject)fs;
  1538. //Create an object of org.apache.hadoop.fs.Path
  1539. jobject jPath = constructNewObjectOfPath(env, path);
  1540. if (jPath == NULL) {
  1541. return NULL;
  1542. }
  1543. jvalue jFSVal;
  1544. jthrowable jFSExc = NULL;
  1545. if (invokeMethod(env, &jFSVal, &jFSExc, INSTANCE, jFS,
  1546. HADOOP_FS, "getFileStatus",
  1547. "(Lorg/apache/hadoop/fs/Path;)"
  1548. "Lorg/apache/hadoop/fs/FileStatus;",
  1549. jPath) != 0) {
  1550. errno = errnoFromException(jFSExc, env, "org.apache.hadoop.fs."
  1551. "FileSystem::getFileStatus");
  1552. destroyLocalReference(env, jPath);
  1553. return NULL;
  1554. }
  1555. jobject jFileStatus = jFSVal.l;
  1556. //org.apache.hadoop.fs.FileSystem::getFileBlockLocations
  1557. char*** blockHosts = NULL;
  1558. jobjectArray jBlockLocations;;
  1559. jvalue jVal;
  1560. jthrowable jExc = NULL;
  1561. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
  1562. HADOOP_FS, "getFileBlockLocations",
  1563. "(Lorg/apache/hadoop/fs/FileStatus;JJ)"
  1564. "[Lorg/apache/hadoop/fs/BlockLocation;",
  1565. jFileStatus, start, length) != 0) {
  1566. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1567. "FileSystem::getFileBlockLocations");
  1568. destroyLocalReference(env, jPath);
  1569. destroyLocalReference(env, jFileStatus);
  1570. return NULL;
  1571. }
  1572. jBlockLocations = jVal.l;
  1573. //Figure out no of entries in jBlockLocations
  1574. //Allocate memory and add NULL at the end
  1575. jsize jNumFileBlocks = (*env)->GetArrayLength(env, jBlockLocations);
  1576. blockHosts = malloc(sizeof(char**) * (jNumFileBlocks+1));
  1577. if (blockHosts == NULL) {
  1578. errno = ENOMEM;
  1579. goto done;
  1580. }
  1581. blockHosts[jNumFileBlocks] = NULL;
  1582. if (jNumFileBlocks == 0) {
  1583. errno = 0;
  1584. goto done;
  1585. }
  1586. //Now parse each block to get hostnames
  1587. int i = 0;
  1588. for (i=0; i < jNumFileBlocks; ++i) {
  1589. jobject jFileBlock =
  1590. (*env)->GetObjectArrayElement(env, jBlockLocations, i);
  1591. jvalue jVal;
  1592. jobjectArray jFileBlockHosts;
  1593. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFileBlock, HADOOP_BLK_LOC,
  1594. "getHosts", "()[Ljava/lang/String;") ||
  1595. jVal.l == NULL) {
  1596. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1597. "BlockLocation::getHosts");
  1598. destroyLocalReference(env, jPath);
  1599. destroyLocalReference(env, jFileStatus);
  1600. destroyLocalReference(env, jBlockLocations);
  1601. return NULL;
  1602. }
  1603. jFileBlockHosts = jVal.l;
  1604. //Figure out no of hosts in jFileBlockHosts
  1605. //Allocate memory and add NULL at the end
  1606. jsize jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts);
  1607. blockHosts[i] = malloc(sizeof(char*) * (jNumBlockHosts+1));
  1608. if (blockHosts[i] == NULL) {
  1609. int x = 0;
  1610. for (x=0; x < i; ++x) {
  1611. free(blockHosts[x]);
  1612. }
  1613. free(blockHosts);
  1614. errno = ENOMEM;
  1615. goto done;
  1616. }
  1617. blockHosts[i][jNumBlockHosts] = NULL;
  1618. //Now parse each hostname
  1619. int j = 0;
  1620. const char *hostName;
  1621. for (j=0; j < jNumBlockHosts; ++j) {
  1622. jstring jHost =
  1623. (*env)->GetObjectArrayElement(env, jFileBlockHosts, j);
  1624. hostName =
  1625. (const char*)((*env)->GetStringUTFChars(env, jHost, NULL));
  1626. blockHosts[i][j] = strdup(hostName);
  1627. (*env)->ReleaseStringUTFChars(env, jHost, hostName);
  1628. destroyLocalReference(env, jHost);
  1629. }
  1630. destroyLocalReference(env, jFileBlockHosts);
  1631. }
  1632. done:
  1633. //Delete unnecessary local references
  1634. destroyLocalReference(env, jPath);
  1635. destroyLocalReference(env, jFileStatus);
  1636. destroyLocalReference(env, jBlockLocations);
  1637. return blockHosts;
  1638. }
  1639. void hdfsFreeHosts(char ***blockHosts)
  1640. {
  1641. int i, j;
  1642. for (i=0; blockHosts[i]; i++) {
  1643. for (j=0; blockHosts[i][j]; j++) {
  1644. free(blockHosts[i][j]);
  1645. }
  1646. free(blockHosts[i]);
  1647. }
  1648. free(blockHosts);
  1649. }
  1650. tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
  1651. {
  1652. // JAVA EQUIVALENT:
  1653. // fs.getDefaultBlockSize();
  1654. //Get the JNIEnv* corresponding to current thread
  1655. JNIEnv* env = getJNIEnv();
  1656. if (env == NULL) {
  1657. errno = EINTERNAL;
  1658. return -1;
  1659. }
  1660. jobject jFS = (jobject)fs;
  1661. //FileSystem::getDefaultBlockSize()
  1662. tOffset blockSize = -1;
  1663. jvalue jVal;
  1664. jthrowable jExc = NULL;
  1665. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1666. "getDefaultBlockSize", "()J") != 0) {
  1667. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1668. "FileSystem::getDefaultBlockSize");
  1669. return -1;
  1670. }
  1671. blockSize = jVal.j;
  1672. return blockSize;
  1673. }
  1674. tOffset hdfsGetCapacity(hdfsFS fs)
  1675. {
  1676. // JAVA EQUIVALENT:
  1677. // FsStatus fss = fs.getStatus();
  1678. // return Fss.getCapacity();
  1679. //Get the JNIEnv* corresponding to current thread
  1680. JNIEnv* env = getJNIEnv();
  1681. if (env == NULL) {
  1682. errno = EINTERNAL;
  1683. return -1;
  1684. }
  1685. jobject jFS = (jobject)fs;
  1686. //FileSystem::getStatus
  1687. jvalue jVal;
  1688. jthrowable jExc = NULL;
  1689. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1690. "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;") != 0) {
  1691. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1692. "FileSystem::getStatus");
  1693. return -1;
  1694. }
  1695. jobject fss = (jobject)jVal.l;
  1696. if (invokeMethod(env, &jVal, &jExc, INSTANCE, fss, HADOOP_FSSTATUS,
  1697. "getCapacity", "()J") != 0) {
  1698. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1699. "FsStatus::getCapacity");
  1700. destroyLocalReference(env, fss);
  1701. return -1;
  1702. }
  1703. destroyLocalReference(env, fss);
  1704. return jVal.j;
  1705. }
  1706. tOffset hdfsGetUsed(hdfsFS fs)
  1707. {
  1708. // JAVA EQUIVALENT:
  1709. // FsStatus fss = fs.getStatus();
  1710. // return Fss.getUsed();
  1711. //Get the JNIEnv* corresponding to current thread
  1712. JNIEnv* env = getJNIEnv();
  1713. if (env == NULL) {
  1714. errno = EINTERNAL;
  1715. return -1;
  1716. }
  1717. jobject jFS = (jobject)fs;
  1718. //FileSystem::getStatus
  1719. jvalue jVal;
  1720. jthrowable jExc = NULL;
  1721. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1722. "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;") != 0) {
  1723. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1724. "FileSystem::getStatus");
  1725. return -1;
  1726. }
  1727. jobject fss = (jobject)jVal.l;
  1728. if (invokeMethod(env, &jVal, &jExc, INSTANCE, fss, HADOOP_FSSTATUS,
  1729. "getUsed", "()J") != 0) {
  1730. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1731. "FsStatus::getUsed");
  1732. destroyLocalReference(env, fss);
  1733. return -1;
  1734. }
  1735. destroyLocalReference(env, fss);
  1736. return jVal.j;
  1737. }
  1738. static int
  1739. getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
  1740. {
  1741. jvalue jVal;
  1742. jthrowable jExc = NULL;
  1743. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1744. HADOOP_STAT, "isDir", "()Z") != 0) {
  1745. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1746. "FileStatus::isDir");
  1747. return -1;
  1748. }
  1749. fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile;
  1750. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1751. HADOOP_STAT, "getReplication", "()S") != 0) {
  1752. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1753. "FileStatus::getReplication");
  1754. return -1;
  1755. }
  1756. fileInfo->mReplication = jVal.s;
  1757. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1758. HADOOP_STAT, "getBlockSize", "()J") != 0) {
  1759. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1760. "FileStatus::getBlockSize");
  1761. return -1;
  1762. }
  1763. fileInfo->mBlockSize = jVal.j;
  1764. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1765. HADOOP_STAT, "getModificationTime", "()J") != 0) {
  1766. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1767. "FileStatus::getModificationTime");
  1768. return -1;
  1769. }
  1770. fileInfo->mLastMod = (tTime) (jVal.j / 1000);
  1771. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1772. HADOOP_STAT, "getAccessTime", "()J") != 0) {
  1773. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1774. "FileStatus::getAccessTime");
  1775. return -1;
  1776. }
  1777. fileInfo->mLastAccess = (tTime) (jVal.j / 1000);
  1778. if (fileInfo->mKind == kObjectKindFile) {
  1779. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1780. HADOOP_STAT, "getLen", "()J") != 0) {
  1781. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1782. "FileStatus::getLen");
  1783. return -1;
  1784. }
  1785. fileInfo->mSize = jVal.j;
  1786. }
  1787. jobject jPath;
  1788. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1789. "getPath", "()Lorg/apache/hadoop/fs/Path;") ||
  1790. jVal.l == NULL) {
  1791. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1792. "Path::getPath");
  1793. return -1;
  1794. }
  1795. jPath = jVal.l;
  1796. jstring jPathName;
  1797. const char *cPathName;
  1798. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath, HADOOP_PATH,
  1799. "toString", "()Ljava/lang/String;")) {
  1800. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1801. "Path::toString");
  1802. destroyLocalReference(env, jPath);
  1803. return -1;
  1804. }
  1805. jPathName = jVal.l;
  1806. cPathName = (const char*) ((*env)->GetStringUTFChars(env, jPathName, NULL));
  1807. fileInfo->mName = strdup(cPathName);
  1808. (*env)->ReleaseStringUTFChars(env, jPathName, cPathName);
  1809. destroyLocalReference(env, jPath);
  1810. destroyLocalReference(env, jPathName);
  1811. jstring jUserName;
  1812. const char* cUserName;
  1813. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1814. "getOwner", "()Ljava/lang/String;")) {
  1815. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1816. "FileStatus::getOwner failed!\n");
  1817. errno = EINTERNAL;
  1818. return -1;
  1819. }
  1820. jUserName = jVal.l;
  1821. cUserName = (const char*) ((*env)->GetStringUTFChars(env, jUserName, NULL));
  1822. fileInfo->mOwner = strdup(cUserName);
  1823. (*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
  1824. destroyLocalReference(env, jUserName);
  1825. jstring jGroupName;
  1826. const char* cGroupName;
  1827. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1828. "getGroup", "()Ljava/lang/String;")) {
  1829. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1830. "FileStatus::getGroup failed!\n");
  1831. errno = EINTERNAL;
  1832. return -1;
  1833. }
  1834. jGroupName = jVal.l;
  1835. cGroupName = (const char*) ((*env)->GetStringUTFChars(env, jGroupName, NULL));
  1836. fileInfo->mGroup = strdup(cGroupName);
  1837. (*env)->ReleaseStringUTFChars(env, jGroupName, cGroupName);
  1838. destroyLocalReference(env, jGroupName);
  1839. jobject jPermission;
  1840. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1841. "getPermission", "()Lorg/apache/hadoop/fs/permission/FsPermission;") ||
  1842. jVal.l == NULL) {
  1843. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1844. "FileStatus::getPermission failed!\n");
  1845. errno = EINTERNAL;
  1846. return -1;
  1847. }
  1848. jPermission = jVal.l;
  1849. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPermission, HADOOP_FSPERM,
  1850. "toShort", "()S") != 0) {
  1851. fprintf(stderr, "Call to org.apache.hadoop.fs.permission."
  1852. "FsPermission::toShort failed!\n");
  1853. errno = EINTERNAL;
  1854. return -1;
  1855. }
  1856. fileInfo->mPermissions = jVal.s;
  1857. destroyLocalReference(env, jPermission);
  1858. return 0;
  1859. }
  1860. static int
  1861. getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo *fileInfo)
  1862. {
  1863. // JAVA EQUIVALENT:
  1864. // fs.isDirectory(f)
  1865. // fs.getModificationTime()
  1866. // fs.getAccessTime()
  1867. // fs.getLength(f)
  1868. // f.getPath()
  1869. // f.getOwner()
  1870. // f.getGroup()
  1871. // f.getPermission().toShort()
  1872. jobject jStat;
  1873. jvalue jVal;
  1874. jthrowable jExc = NULL;
  1875. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1876. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
  1877. jPath) != 0) {
  1878. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1879. "FileSystem::exists");
  1880. return -1;
  1881. }
  1882. if (jVal.z == 0) {
  1883. errno = ENOENT;
  1884. return -1;
  1885. }
  1886. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1887. "getFileStatus", JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_STAT)),
  1888. jPath) != 0) {
  1889. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1890. "FileSystem::getFileStatus");
  1891. return -1;
  1892. }
  1893. jStat = jVal.l;
  1894. int ret = getFileInfoFromStat(env, jStat, fileInfo);
  1895. destroyLocalReference(env, jStat);
  1896. return ret;
  1897. }
  1898. hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
  1899. {
  1900. // JAVA EQUIVALENT:
  1901. // Path p(path);
  1902. // Path []pathList = fs.listPaths(p)
  1903. // foreach path in pathList
  1904. // getFileInfo(path)
  1905. //Get the JNIEnv* corresponding to current thread
  1906. JNIEnv* env = getJNIEnv();
  1907. if (env == NULL) {
  1908. errno = EINTERNAL;
  1909. return NULL;
  1910. }
  1911. jobject jFS = (jobject)fs;
  1912. //Create an object of org.apache.hadoop.fs.Path
  1913. jobject jPath = constructNewObjectOfPath(env, path);
  1914. if (jPath == NULL) {
  1915. return NULL;
  1916. }
  1917. hdfsFileInfo *pathList = 0;
  1918. jobjectArray jPathList = NULL;
  1919. jvalue jVal;
  1920. jthrowable jExc = NULL;
  1921. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS, "listStatus",
  1922. JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_STAT)),
  1923. jPath) != 0) {
  1924. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1925. "FileSystem::listStatus");
  1926. destroyLocalReference(env, jPath);
  1927. return NULL;
  1928. }
  1929. jPathList = jVal.l;
  1930. //Figure out no of entries in that directory
  1931. jsize jPathListSize = (*env)->GetArrayLength(env, jPathList);
  1932. *numEntries = jPathListSize;
  1933. if (jPathListSize == 0) {
  1934. errno = 0;
  1935. goto done;
  1936. }
  1937. //Allocate memory
  1938. pathList = calloc(jPathListSize, sizeof(hdfsFileInfo));
  1939. if (pathList == NULL) {
  1940. errno = ENOMEM;
  1941. goto done;
  1942. }
  1943. //Save path information in pathList
  1944. jsize i;
  1945. jobject tmpStat;
  1946. for (i=0; i < jPathListSize; ++i) {
  1947. tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i);
  1948. if (getFileInfoFromStat(env, tmpStat, &pathList[i])) {
  1949. hdfsFreeFileInfo(pathList, jPathListSize);
  1950. destroyLocalReference(env, tmpStat);
  1951. pathList = NULL;
  1952. goto done;
  1953. }
  1954. destroyLocalReference(env, tmpStat);
  1955. }
  1956. done:
  1957. //Delete unnecessary local references
  1958. destroyLocalReference(env, jPath);
  1959. destroyLocalReference(env, jPathList);
  1960. return pathList;
  1961. }
  1962. hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
  1963. {
  1964. // JAVA EQUIVALENT:
  1965. // File f(path);
  1966. // fs.isDirectory(f)
  1967. // fs.lastModified() ??
  1968. // fs.getLength(f)
  1969. // f.getPath()
  1970. //Get the JNIEnv* corresponding to current thread
  1971. JNIEnv* env = getJNIEnv();
  1972. if (env == NULL) {
  1973. errno = EINTERNAL;
  1974. return NULL;
  1975. }
  1976. jobject jFS = (jobject)fs;
  1977. //Create an object of org.apache.hadoop.fs.Path
  1978. jobject jPath = constructNewObjectOfPath(env, path);
  1979. if (jPath == NULL) {
  1980. return NULL;
  1981. }
  1982. hdfsFileInfo *fileInfo = calloc(1, sizeof(hdfsFileInfo));
  1983. if (getFileInfo(env, jFS, jPath, fileInfo)) {
  1984. hdfsFreeFileInfo(fileInfo, 1);
  1985. fileInfo = NULL;
  1986. goto done;
  1987. }
  1988. done:
  1989. //Delete unnecessary local references
  1990. destroyLocalReference(env, jPath);
  1991. return fileInfo;
  1992. }
  1993. void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
  1994. {
  1995. //Free the mName, mOwner, and mGroup
  1996. int i;
  1997. for (i=0; i < numEntries; ++i) {
  1998. if (hdfsFileInfo[i].mName) {
  1999. free(hdfsFileInfo[i].mName);
  2000. }
  2001. if (hdfsFileInfo[i].mOwner) {
  2002. free(hdfsFileInfo[i].mOwner);
  2003. }
  2004. if (hdfsFileInfo[i].mGroup) {
  2005. free(hdfsFileInfo[i].mGroup);
  2006. }
  2007. }
  2008. //Free entire block
  2009. free(hdfsFileInfo);
  2010. }
  2011. /**
  2012. * vim: ts=4: sw=4: et:
  2013. */