hdfs.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #include "hdfs.h"
  19. #include "hdfsJniHelper.h"
  20. /* Some frequently used Java paths */
  21. #define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
  22. #define HADOOP_PATH "org/apache/hadoop/fs/Path"
  23. #define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem"
  24. #define HADOOP_FS "org/apache/hadoop/fs/FileSystem"
  25. #define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation"
  26. #define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem"
  27. #define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream"
  28. #define HADOOP_OSTRM "org/apache/hadoop/fs/FSDataOutputStream"
  29. #define HADOOP_STAT "org/apache/hadoop/fs/FileStatus"
  30. #define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission"
  31. #define HADOOP_UNIX_USER_GROUP_INFO "org/apache/hadoop/security/UnixUserGroupInformation"
  32. #define HADOOP_USER_GROUP_INFO "org/apache/hadoop/security/UserGroupInformation"
  33. #define JAVA_NET_ISA "java/net/InetSocketAddress"
  34. #define JAVA_NET_URI "java/net/URI"
  35. #define JAVA_STRING "java/lang/String"
  36. #define JAVA_VOID "V"
  37. /* Macros for constructing method signatures */
  38. #define JPARAM(X) "L" X ";"
  39. #define JARRPARAM(X) "[L" X ";"
  40. #define JMETHOD1(X, R) "(" X ")" R
  41. #define JMETHOD2(X, Y, R) "(" X Y ")" R
  42. #define JMETHOD3(X, Y, Z, R) "(" X Y Z")" R
  43. /**
  44. * hdfsJniEnv: A wrapper struct to be used as 'value'
  45. * while saving thread -> JNIEnv* mappings
  46. */
  47. typedef struct
  48. {
  49. JNIEnv* env;
  50. } hdfsJniEnv;
  51. /**
  52. * Helper function to destroy a local reference of java.lang.Object
  53. * @param env: The JNIEnv pointer.
  54. * @param jFile: The local reference of java.lang.Object object
  55. * @return None.
  56. */
  57. static void destroyLocalReference(JNIEnv *env, jobject jObject)
  58. {
  59. if (jObject)
  60. (*env)->DeleteLocalRef(env, jObject);
  61. }
  62. /**
  63. * Helper function to create a org.apache.hadoop.fs.Path object.
  64. * @param env: The JNIEnv pointer.
  65. * @param path: The file-path for which to construct org.apache.hadoop.fs.Path
  66. * object.
  67. * @return Returns a jobject on success and NULL on error.
  68. */
  69. static jobject constructNewObjectOfPath(JNIEnv *env, const char *path)
  70. {
  71. //Construct a java.lang.String object
  72. jstring jPathString = (*env)->NewStringUTF(env, path);
  73. //Construct the org.apache.hadoop.fs.Path object
  74. jobject jPath =
  75. constructNewObjectOfClass(env, NULL, "org/apache/hadoop/fs/Path",
  76. "(Ljava/lang/String;)V", jPathString);
  77. if (jPath == NULL) {
  78. fprintf(stderr, "Can't construct instance of class "
  79. "org.apache.hadoop.fs.Path for %s\n", path);
  80. errno = EINTERNAL;
  81. return NULL;
  82. }
  83. // Destroy the local reference to the java.lang.String object
  84. destroyLocalReference(env, jPathString);
  85. return jPath;
  86. }
  87. /**
  88. * Helper function to translate an exception into a meaningful errno value.
  89. * @param exc: The exception.
  90. * @param env: The JNIEnv Pointer.
  91. * @param method: The name of the method that threw the exception. This
  92. * may be format string to be used in conjuction with additional arguments.
  93. * @return Returns a meaningful errno value if possible, or EINTERNAL if not.
  94. */
  95. static int errnoFromException(jthrowable exc, JNIEnv *env,
  96. const char *method, ...)
  97. {
  98. va_list ap;
  99. int errnum = 0;
  100. char *excClass = NULL;
  101. if (exc == NULL)
  102. goto default_error;
  103. if ((excClass = classNameOfObject((jobject) exc, env)) == NULL) {
  104. errnum = EINTERNAL;
  105. goto done;
  106. }
  107. if (!strcmp(excClass, "org.apache.hadoop.fs.permission."
  108. "AccessControlException")) {
  109. errnum = EACCES;
  110. goto done;
  111. }
  112. //TODO: interpret more exceptions; maybe examine exc.getMessage()
  113. default_error:
  114. //Can't tell what went wrong, so just punt
  115. (*env)->ExceptionDescribe(env);
  116. fprintf(stderr, "Call to ");
  117. va_start(ap, method);
  118. vfprintf(stderr, method, ap);
  119. va_end(ap);
  120. fprintf(stderr, " failed!\n");
  121. errnum = EINTERNAL;
  122. done:
  123. (*env)->ExceptionClear(env);
  124. if (excClass != NULL)
  125. free(excClass);
  126. return errnum;
  127. }
  128. hdfsFS hdfsConnect(const char* host, tPort port) {
  129. // conect with NULL as user name/groups
  130. return hdfsConnectAsUser(host, port, NULL, NULL, 0);
  131. }
  132. hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user , const char **groups, int groups_size )
  133. {
  134. // JAVA EQUIVALENT:
  135. // FileSystem fs = FileSystem.get(new Configuration());
  136. // return fs;
  137. JNIEnv *env = 0;
  138. jobject jConfiguration = NULL;
  139. jobject jFS = NULL;
  140. jobject jURI = NULL;
  141. jstring jURIString = NULL;
  142. jvalue jVal;
  143. jthrowable jExc = NULL;
  144. char *cURI = 0;
  145. jobject gFsRef = NULL;
  146. //Get the JNIEnv* corresponding to current thread
  147. env = getJNIEnv();
  148. if (env == NULL) {
  149. errno = EINTERNAL;
  150. return NULL;
  151. }
  152. //Create the org.apache.hadoop.conf.Configuration object
  153. jConfiguration =
  154. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  155. if (jConfiguration == NULL) {
  156. fprintf(stderr, "Can't construct instance of class "
  157. "org.apache.hadoop.conf.Configuration\n");
  158. errno = EINTERNAL;
  159. return NULL;
  160. }
  161. if (user != NULL) {
  162. if (groups == NULL || groups_size <= 0) {
  163. fprintf(stderr, "ERROR: groups must not be empty/null\n");
  164. errno = EINVAL;
  165. return NULL;
  166. }
  167. jstring jUserString = (*env)->NewStringUTF(env, user);
  168. jarray jGroups = constructNewArrayString(env, &jExc, groups, groups_size);
  169. if (jGroups == NULL) {
  170. errno = EINTERNAL;
  171. fprintf(stderr, "ERROR: could not construct groups array\n");
  172. return NULL;
  173. }
  174. jobject jUgi;
  175. if ((jUgi = constructNewObjectOfClass(env, &jExc, HADOOP_UNIX_USER_GROUP_INFO, JMETHOD2(JPARAM(JAVA_STRING), JARRPARAM(JAVA_STRING), JAVA_VOID), jUserString, jGroups)) == NULL) {
  176. fprintf(stderr,"failed to construct hadoop user unix group info object\n");
  177. errno = errnoFromException(jExc, env, HADOOP_UNIX_USER_GROUP_INFO,
  178. "init");
  179. destroyLocalReference(env, jConfiguration);
  180. destroyLocalReference(env, jUserString);
  181. if (jGroups != NULL) {
  182. destroyLocalReference(env, jGroups);
  183. }
  184. return NULL;
  185. }
  186. #define USE_UUGI
  187. #ifdef USE_UUGI
  188. // UnixUserGroupInformation.UGI_PROPERTY_NAME
  189. jstring jAttrString = (*env)->NewStringUTF(env,"hadoop.job.ugi");
  190. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_UNIX_USER_GROUP_INFO, "saveToConf",
  191. JMETHOD3(JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING), JPARAM(HADOOP_UNIX_USER_GROUP_INFO), JAVA_VOID),
  192. jConfiguration, jAttrString, jUgi) != 0) {
  193. errno = errnoFromException(jExc, env, HADOOP_FSPERM,
  194. "init");
  195. destroyLocalReference(env, jConfiguration);
  196. destroyLocalReference(env, jUserString);
  197. if (jGroups != NULL) {
  198. destroyLocalReference(env, jGroups);
  199. }
  200. destroyLocalReference(env, jUgi);
  201. return NULL;
  202. }
  203. destroyLocalReference(env, jUserString);
  204. destroyLocalReference(env, jGroups);
  205. destroyLocalReference(env, jUgi);
  206. }
  207. #else
  208. // what does "current" mean in the context of libhdfs ? does it mean for the last hdfs connection we used?
  209. // that's why this code cannot be activated. We know the above use of the conf object should work well with
  210. // multiple connections.
  211. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_USER_GROUP_INFO, "setCurrentUGI",
  212. JMETHOD1(JPARAM(HADOOP_USER_GROUP_INFO), JAVA_VOID),
  213. jUgi) != 0) {
  214. errno = errnoFromException(jExc, env, HADOOP_USER_GROUP_INFO,
  215. "setCurrentUGI");
  216. destroyLocalReference(env, jConfiguration);
  217. destroyLocalReference(env, jUserString);
  218. if (jGroups != NULL) {
  219. destroyLocalReference(env, jGroups);
  220. }
  221. destroyLocalReference(env, jUgi);
  222. return NULL;
  223. }
  224. destroyLocalReference(env, jUserString);
  225. destroyLocalReference(env, jGroups);
  226. destroyLocalReference(env, jUgi);
  227. }
  228. #endif
  229. //Check what type of FileSystem the caller wants...
  230. if (host == NULL) {
  231. // fs = FileSytem::getLocal(conf);
  232. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "getLocal",
  233. JMETHOD1(JPARAM(HADOOP_CONF),
  234. JPARAM(HADOOP_LOCALFS)),
  235. jConfiguration) != 0) {
  236. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  237. "FileSystem::getLocal");
  238. goto done;
  239. }
  240. jFS = jVal.l;
  241. }
  242. else if (!strcmp(host, "default") && port == 0) {
  243. //fs = FileSystem::get(conf);
  244. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
  245. HADOOP_FS, "get",
  246. JMETHOD1(JPARAM(HADOOP_CONF),
  247. JPARAM(HADOOP_FS)),
  248. jConfiguration) != 0) {
  249. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  250. "FileSystem::get");
  251. goto done;
  252. }
  253. jFS = jVal.l;
  254. }
  255. else {
  256. // fs = FileSystem::get(URI, conf);
  257. cURI = malloc(strlen(host)+16);
  258. sprintf(cURI, "hdfs://%s:%d", host, (int)(port));
  259. jURIString = (*env)->NewStringUTF(env, cURI);
  260. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, JAVA_NET_URI,
  261. "create", "(Ljava/lang/String;)Ljava/net/URI;",
  262. jURIString) != 0) {
  263. errno = errnoFromException(jExc, env, "java.net.URI::create");
  264. goto done;
  265. }
  266. jURI = jVal.l;
  267. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "get",
  268. JMETHOD2(JPARAM(JAVA_NET_URI),
  269. JPARAM(HADOOP_CONF), JPARAM(HADOOP_FS)),
  270. jURI, jConfiguration) != 0) {
  271. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  272. "Filesystem::get(URI, Configuration)");
  273. goto done;
  274. }
  275. jFS = jVal.l;
  276. }
  277. done:
  278. // Release unnecessary local references
  279. destroyLocalReference(env, jConfiguration);
  280. destroyLocalReference(env, jURIString);
  281. destroyLocalReference(env, jURI);
  282. if (cURI) free(cURI);
  283. /* Create a global reference for this fs */
  284. if (jFS) {
  285. gFsRef = (*env)->NewGlobalRef(env, jFS);
  286. destroyLocalReference(env, jFS);
  287. }
  288. return gFsRef;
  289. }
  290. int hdfsDisconnect(hdfsFS fs)
  291. {
  292. // JAVA EQUIVALENT:
  293. // fs.close()
  294. //Get the JNIEnv* corresponding to current thread
  295. JNIEnv* env = getJNIEnv();
  296. if (env == NULL) {
  297. errno = EINTERNAL;
  298. return -2;
  299. }
  300. //Parameters
  301. jobject jFS = (jobject)fs;
  302. //Caught exception
  303. jthrowable jExc = NULL;
  304. //Sanity check
  305. if (fs == NULL) {
  306. errno = EBADF;
  307. return -1;
  308. }
  309. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  310. "close", "()V") != 0) {
  311. errno = errnoFromException(jExc, env, "Filesystem::close");
  312. return -1;
  313. }
  314. //Release unnecessary references
  315. (*env)->DeleteGlobalRef(env, fs);
  316. return 0;
  317. }
  318. hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
  319. int bufferSize, short replication, tSize blockSize)
  320. {
  321. /*
  322. JAVA EQUIVALENT:
  323. File f = new File(path);
  324. FSData{Input|Output}Stream f{is|os} = fs.create(f);
  325. return f{is|os};
  326. */
  327. /* Get the JNIEnv* corresponding to current thread */
  328. JNIEnv* env = getJNIEnv();
  329. if (env == NULL) {
  330. errno = EINTERNAL;
  331. return NULL;
  332. }
  333. jobject jFS = (jobject)fs;
  334. if (flags & O_RDWR) {
  335. fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n");
  336. errno = ENOTSUP;
  337. return NULL;
  338. }
  339. /* The hadoop java api/signature */
  340. const char* method = ((flags & O_WRONLY) == 0) ? "open" : "create";
  341. const char* signature = ((flags & O_WRONLY) == 0) ?
  342. JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM)) :
  343. JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
  344. /* Return value */
  345. hdfsFile file = NULL;
  346. /* Create an object of org.apache.hadoop.fs.Path */
  347. jobject jPath = constructNewObjectOfPath(env, path);
  348. if (jPath == NULL) {
  349. return NULL;
  350. }
  351. /* Get the Configuration object from the FileSystem object */
  352. jvalue jVal;
  353. jobject jConfiguration = NULL;
  354. jthrowable jExc = NULL;
  355. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  356. "getConf", JMETHOD1("", JPARAM(HADOOP_CONF))) != 0) {
  357. errno = errnoFromException(jExc, env, "get configuration object "
  358. "from filesystem");
  359. destroyLocalReference(env, jPath);
  360. return NULL;
  361. }
  362. jConfiguration = jVal.l;
  363. jint jBufferSize = bufferSize;
  364. jshort jReplication = replication;
  365. jlong jBlockSize = blockSize;
  366. jstring jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size");
  367. jstring jStrReplication = (*env)->NewStringUTF(env, "dfs.replication");
  368. jstring jStrBlockSize = (*env)->NewStringUTF(env, "dfs.block.size");
  369. //bufferSize
  370. if (!bufferSize) {
  371. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  372. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  373. jStrBufferSize, 4096) != 0) {
  374. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  375. "Configuration::getInt");
  376. goto done;
  377. }
  378. jBufferSize = jVal.i;
  379. }
  380. if (flags & O_WRONLY) {
  381. //replication
  382. if (!replication) {
  383. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  384. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  385. jStrReplication, 1) != 0) {
  386. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  387. "Configuration::getInt");
  388. goto done;
  389. }
  390. jReplication = jVal.i;
  391. }
  392. //blockSize
  393. if (!blockSize) {
  394. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  395. HADOOP_CONF, "getLong", "(Ljava/lang/String;J)J",
  396. jStrBlockSize, 67108864)) {
  397. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  398. "FileSystem::%s(%s)", method,
  399. signature);
  400. goto done;
  401. }
  402. jBlockSize = jVal.j;
  403. }
  404. }
  405. /* Create and return either the FSDataInputStream or
  406. FSDataOutputStream references jobject jStream */
  407. if ((flags & O_WRONLY) == 0) {
  408. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  409. method, signature, jPath, jBufferSize)) {
  410. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  411. "FileSystem::%s(%s)", method,
  412. signature);
  413. goto done;
  414. }
  415. }
  416. else {
  417. jboolean jOverWrite = 1;
  418. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  419. method, signature, jPath, jOverWrite,
  420. jBufferSize, jReplication, jBlockSize)) {
  421. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  422. "FileSystem::%s(%s)", method,
  423. signature);
  424. goto done;
  425. }
  426. }
  427. file = malloc(sizeof(struct hdfsFile_internal));
  428. if (!file) {
  429. errno = ENOMEM;
  430. return NULL;
  431. }
  432. file->file = (*env)->NewGlobalRef(env, jVal.l);
  433. file->type = (((flags & O_WRONLY) == 0) ? INPUT : OUTPUT);
  434. destroyLocalReference(env, jVal.l);
  435. done:
  436. //Delete unnecessary local references
  437. destroyLocalReference(env, jStrBufferSize);
  438. destroyLocalReference(env, jStrReplication);
  439. destroyLocalReference(env, jStrBlockSize);
  440. destroyLocalReference(env, jConfiguration);
  441. destroyLocalReference(env, jPath);
  442. return file;
  443. }
  444. int hdfsCloseFile(hdfsFS fs, hdfsFile file)
  445. {
  446. // JAVA EQUIVALENT:
  447. // file.close
  448. //Get the JNIEnv* corresponding to current thread
  449. JNIEnv* env = getJNIEnv();
  450. if (env == NULL) {
  451. errno = EINTERNAL;
  452. return -2;
  453. }
  454. //Parameters
  455. jobject jStream = (jobject)(file ? file->file : NULL);
  456. //Caught exception
  457. jthrowable jExc = NULL;
  458. //Sanity check
  459. if (!file || file->type == UNINITIALIZED) {
  460. errno = EBADF;
  461. return -1;
  462. }
  463. //The interface whose 'close' method to be called
  464. const char* interface = (file->type == INPUT) ?
  465. HADOOP_ISTRM : HADOOP_OSTRM;
  466. if (invokeMethod(env, NULL, &jExc, INSTANCE, jStream, interface,
  467. "close", "()V") != 0) {
  468. errno = errnoFromException(jExc, env, "%s::close", interface);
  469. return -1;
  470. }
  471. //De-allocate memory
  472. free(file);
  473. (*env)->DeleteGlobalRef(env, jStream);
  474. return 0;
  475. }
  476. int hdfsExists(hdfsFS fs, const char *path)
  477. {
  478. JNIEnv *env = getJNIEnv();
  479. if (env == NULL) {
  480. errno = EINTERNAL;
  481. return -2;
  482. }
  483. jobject jPath = constructNewObjectOfPath(env, path);
  484. jvalue jVal;
  485. jthrowable jExc = NULL;
  486. jobject jFS = (jobject)fs;
  487. if (jPath == NULL) {
  488. return -1;
  489. }
  490. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  491. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
  492. jPath) != 0) {
  493. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  494. "FileSystem::exists");
  495. return -1;
  496. }
  497. return jVal.z ? 0 : -1;
  498. }
  499. tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
  500. {
  501. // JAVA EQUIVALENT:
  502. // byte [] bR = new byte[length];
  503. // fis.read(bR);
  504. //Get the JNIEnv* corresponding to current thread
  505. JNIEnv* env = getJNIEnv();
  506. if (env == NULL) {
  507. errno = EINTERNAL;
  508. return -1;
  509. }
  510. //Parameters
  511. jobject jInputStream = (jobject)(f ? f->file : NULL);
  512. jbyteArray jbRarray;
  513. jint noReadBytes = 0;
  514. jvalue jVal;
  515. jthrowable jExc = NULL;
  516. //Sanity check
  517. if (!f || f->type == UNINITIALIZED) {
  518. errno = EBADF;
  519. return -1;
  520. }
  521. //Error checking... make sure that this file is 'readable'
  522. if (f->type != INPUT) {
  523. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  524. errno = EINVAL;
  525. return -1;
  526. }
  527. //Read the requisite bytes
  528. jbRarray = (*env)->NewByteArray(env, length);
  529. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  530. "read", "([B)I", jbRarray) != 0) {
  531. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  532. "FSDataInputStream::read");
  533. noReadBytes = -1;
  534. }
  535. else {
  536. noReadBytes = jVal.i;
  537. if (noReadBytes > 0) {
  538. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  539. } else {
  540. //This is a valid case: there aren't any bytes left to read!
  541. if (noReadBytes == 0 || noReadBytes < -1) {
  542. fprintf(stderr, "WARN: FSDataInputStream.read returned invalid return code - libhdfs returning EOF, i.e., 0: %d\n", noReadBytes);
  543. }
  544. noReadBytes = 0;
  545. }
  546. errno = 0;
  547. }
  548. destroyLocalReference(env, jbRarray);
  549. return noReadBytes;
  550. }
  551. tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
  552. void* buffer, tSize length)
  553. {
  554. // JAVA EQUIVALENT:
  555. // byte [] bR = new byte[length];
  556. // fis.read(pos, bR, 0, length);
  557. //Get the JNIEnv* corresponding to current thread
  558. JNIEnv* env = getJNIEnv();
  559. if (env == NULL) {
  560. errno = EINTERNAL;
  561. return -1;
  562. }
  563. //Parameters
  564. jobject jInputStream = (jobject)(f ? f->file : NULL);
  565. jbyteArray jbRarray;
  566. jint noReadBytes = 0;
  567. jvalue jVal;
  568. jthrowable jExc = NULL;
  569. //Sanity check
  570. if (!f || f->type == UNINITIALIZED) {
  571. errno = EBADF;
  572. return -1;
  573. }
  574. //Error checking... make sure that this file is 'readable'
  575. if (f->type != INPUT) {
  576. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  577. errno = EINVAL;
  578. return -1;
  579. }
  580. //Read the requisite bytes
  581. jbRarray = (*env)->NewByteArray(env, length);
  582. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  583. "read", "(J[BII)I", position, jbRarray, 0, length) != 0) {
  584. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  585. "FSDataInputStream::read");
  586. noReadBytes = -1;
  587. }
  588. else {
  589. noReadBytes = jVal.i;
  590. if (noReadBytes > 0) {
  591. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  592. } else {
  593. //This is a valid case: there aren't any bytes left to read!
  594. if (noReadBytes == 0 || noReadBytes < -1) {
  595. fprintf(stderr, "WARN: FSDataInputStream.read returned invalid return code - libhdfs returning EOF, i.e., 0: %d\n", noReadBytes);
  596. }
  597. noReadBytes = 0;
  598. }
  599. errno = 0;
  600. }
  601. destroyLocalReference(env, jbRarray);
  602. return noReadBytes;
  603. }
  604. tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
  605. {
  606. // JAVA EQUIVALENT
  607. // byte b[] = str.getBytes();
  608. // fso.write(b);
  609. //Get the JNIEnv* corresponding to current thread
  610. JNIEnv* env = getJNIEnv();
  611. if (env == NULL) {
  612. errno = EINTERNAL;
  613. return -1;
  614. }
  615. //Parameters
  616. jobject jOutputStream = (jobject)(f ? f->file : 0);
  617. jbyteArray jbWarray;
  618. //Caught exception
  619. jthrowable jExc = NULL;
  620. //Sanity check
  621. if (!f || f->type == UNINITIALIZED) {
  622. errno = EBADF;
  623. return -1;
  624. }
  625. if (length < 0) {
  626. errno = EINVAL;
  627. return -1;
  628. }
  629. //Error checking... make sure that this file is 'writable'
  630. if (f->type != OUTPUT) {
  631. fprintf(stderr, "Cannot write into a non-OutputStream object!\n");
  632. errno = EINVAL;
  633. return -1;
  634. }
  635. // 'length' equals 'zero' is a valid use-case according to Posix!
  636. if (length != 0) {
  637. //Write the requisite bytes into the file
  638. jbWarray = (*env)->NewByteArray(env, length);
  639. (*env)->SetByteArrayRegion(env, jbWarray, 0, length, buffer);
  640. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  641. HADOOP_OSTRM, "write",
  642. "([B)V", jbWarray) != 0) {
  643. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  644. "FSDataOutputStream::write");
  645. length = -1;
  646. }
  647. destroyLocalReference(env, jbWarray);
  648. }
  649. //Return no. of bytes succesfully written (libc way)
  650. //i.e. 'length' itself! ;-)
  651. return length;
  652. }
  653. int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
  654. {
  655. // JAVA EQUIVALENT
  656. // fis.seek(pos);
  657. //Get the JNIEnv* corresponding to current thread
  658. JNIEnv* env = getJNIEnv();
  659. if (env == NULL) {
  660. errno = EINTERNAL;
  661. return -1;
  662. }
  663. //Parameters
  664. jobject jInputStream = (jobject)(f ? f->file : 0);
  665. //Caught exception
  666. jthrowable jExc = NULL;
  667. //Sanity check
  668. if (!f || f->type != INPUT) {
  669. errno = EBADF;
  670. return -1;
  671. }
  672. if (invokeMethod(env, NULL, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  673. "seek", "(J)V", desiredPos) != 0) {
  674. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  675. "FSDataInputStream::seek");
  676. return -1;
  677. }
  678. return 0;
  679. }
  680. tOffset hdfsTell(hdfsFS fs, hdfsFile f)
  681. {
  682. // JAVA EQUIVALENT
  683. // pos = f.getPos();
  684. //Get the JNIEnv* corresponding to current thread
  685. JNIEnv* env = getJNIEnv();
  686. if (env == NULL) {
  687. errno = EINTERNAL;
  688. return -1;
  689. }
  690. //Parameters
  691. jobject jStream = (jobject)(f ? f->file : 0);
  692. //Sanity check
  693. if (!f || f->type == UNINITIALIZED) {
  694. errno = EBADF;
  695. return -1;
  696. }
  697. const char* interface = (f->type == INPUT) ?
  698. HADOOP_ISTRM : HADOOP_OSTRM;
  699. jlong currentPos = -1;
  700. jvalue jVal;
  701. jthrowable jExc = NULL;
  702. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStream,
  703. interface, "getPos", "()J") != 0) {
  704. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  705. "FSDataInputStream::getPos");
  706. return -1;
  707. }
  708. currentPos = jVal.j;
  709. return (tOffset)currentPos;
  710. }
  711. int hdfsFlush(hdfsFS fs, hdfsFile f)
  712. {
  713. // JAVA EQUIVALENT
  714. // fos.flush();
  715. //Get the JNIEnv* corresponding to current thread
  716. JNIEnv* env = getJNIEnv();
  717. if (env == NULL) {
  718. errno = EINTERNAL;
  719. return -1;
  720. }
  721. //Parameters
  722. jobject jOutputStream = (jobject)(f ? f->file : 0);
  723. //Caught exception
  724. jthrowable jExc = NULL;
  725. //Sanity check
  726. if (!f || f->type != OUTPUT) {
  727. errno = EBADF;
  728. return -1;
  729. }
  730. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  731. HADOOP_OSTRM, "flush", "()V") != 0) {
  732. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  733. "FSDataInputStream::flush");
  734. return -1;
  735. }
  736. return 0;
  737. }
  738. int hdfsAvailable(hdfsFS fs, hdfsFile f)
  739. {
  740. // JAVA EQUIVALENT
  741. // fis.available();
  742. //Get the JNIEnv* corresponding to current thread
  743. JNIEnv* env = getJNIEnv();
  744. if (env == NULL) {
  745. errno = EINTERNAL;
  746. return -1;
  747. }
  748. //Parameters
  749. jobject jInputStream = (jobject)(f ? f->file : 0);
  750. //Caught exception
  751. jthrowable jExc = NULL;
  752. //Sanity check
  753. if (!f || f->type != INPUT) {
  754. errno = EBADF;
  755. return -1;
  756. }
  757. jint available = -1;
  758. jvalue jVal;
  759. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream,
  760. HADOOP_ISTRM, "available", "()I") != 0) {
  761. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  762. "FSDataInputStream::available");
  763. return -1;
  764. }
  765. available = jVal.i;
  766. return available;
  767. }
  768. int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  769. {
  770. //JAVA EQUIVALENT
  771. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath,
  772. // deleteSource = false, conf)
  773. //Get the JNIEnv* corresponding to current thread
  774. JNIEnv* env = getJNIEnv();
  775. if (env == NULL) {
  776. errno = EINTERNAL;
  777. return -1;
  778. }
  779. //Parameters
  780. jobject jSrcFS = (jobject)srcFS;
  781. jobject jDstFS = (jobject)dstFS;
  782. jobject jSrcPath = NULL;
  783. jobject jDstPath = NULL;
  784. jSrcPath = constructNewObjectOfPath(env, src);
  785. if (jSrcPath == NULL) {
  786. return -1;
  787. }
  788. jDstPath = constructNewObjectOfPath(env, dst);
  789. if (jDstPath == NULL) {
  790. destroyLocalReference(env, jSrcPath);
  791. return -1;
  792. }
  793. int retval = 0;
  794. //Create the org.apache.hadoop.conf.Configuration object
  795. jobject jConfiguration =
  796. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  797. if (jConfiguration == NULL) {
  798. fprintf(stderr, "Can't construct instance of class "
  799. "org.apache.hadoop.conf.Configuration\n");
  800. errno = EINTERNAL;
  801. destroyLocalReference(env, jSrcPath);
  802. destroyLocalReference(env, jDstPath);
  803. return -1;
  804. }
  805. //FileUtil::copy
  806. jboolean deleteSource = 0; //Only copy
  807. jvalue jVal;
  808. jthrowable jExc = NULL;
  809. if (invokeMethod(env, &jVal, &jExc, STATIC,
  810. NULL, "org/apache/hadoop/fs/FileUtil", "copy",
  811. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  812. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  813. jConfiguration) != 0) {
  814. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  815. "FileUtil::copy");
  816. retval = -1;
  817. goto done;
  818. }
  819. done:
  820. //Delete unnecessary local references
  821. destroyLocalReference(env, jConfiguration);
  822. destroyLocalReference(env, jSrcPath);
  823. destroyLocalReference(env, jDstPath);
  824. return retval;
  825. }
  826. int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  827. {
  828. //JAVA EQUIVALENT
  829. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath,
  830. // deleteSource = true, conf)
  831. //Get the JNIEnv* corresponding to current thread
  832. JNIEnv* env = getJNIEnv();
  833. if (env == NULL) {
  834. errno = EINTERNAL;
  835. return -1;
  836. }
  837. //Parameters
  838. jobject jSrcFS = (jobject)srcFS;
  839. jobject jDstFS = (jobject)dstFS;
  840. jobject jSrcPath = NULL;
  841. jobject jDstPath = NULL;
  842. jSrcPath = constructNewObjectOfPath(env, src);
  843. if (jSrcPath == NULL) {
  844. return -1;
  845. }
  846. jDstPath = constructNewObjectOfPath(env, dst);
  847. if (jDstPath == NULL) {
  848. destroyLocalReference(env, jSrcPath);
  849. return -1;
  850. }
  851. int retval = 0;
  852. //Create the org.apache.hadoop.conf.Configuration object
  853. jobject jConfiguration =
  854. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  855. if (jConfiguration == NULL) {
  856. fprintf(stderr, "Can't construct instance of class "
  857. "org.apache.hadoop.conf.Configuration\n");
  858. errno = EINTERNAL;
  859. destroyLocalReference(env, jSrcPath);
  860. destroyLocalReference(env, jDstPath);
  861. return -1;
  862. }
  863. //FileUtil::copy
  864. jboolean deleteSource = 1; //Delete src after copy
  865. jvalue jVal;
  866. jthrowable jExc = NULL;
  867. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
  868. "org/apache/hadoop/fs/FileUtil", "copy",
  869. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  870. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  871. jConfiguration) != 0) {
  872. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  873. "FileUtil::copy(move)");
  874. retval = -1;
  875. goto done;
  876. }
  877. done:
  878. //Delete unnecessary local references
  879. destroyLocalReference(env, jConfiguration);
  880. destroyLocalReference(env, jSrcPath);
  881. destroyLocalReference(env, jDstPath);
  882. return retval;
  883. }
  884. int hdfsDelete(hdfsFS fs, const char* path)
  885. {
  886. // JAVA EQUIVALENT:
  887. // File f = new File(path);
  888. // bool retval = fs.delete(f);
  889. //Get the JNIEnv* corresponding to current thread
  890. JNIEnv* env = getJNIEnv();
  891. if (env == NULL) {
  892. errno = EINTERNAL;
  893. return -1;
  894. }
  895. jobject jFS = (jobject)fs;
  896. //Create an object of java.io.File
  897. jobject jPath = constructNewObjectOfPath(env, path);
  898. if (jPath == NULL) {
  899. return -1;
  900. }
  901. //Delete the file
  902. jvalue jVal;
  903. jthrowable jExc = NULL;
  904. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  905. "delete", "(Lorg/apache/hadoop/fs/Path;)Z",
  906. jPath) != 0) {
  907. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  908. "FileSystem::delete");
  909. return -1;
  910. }
  911. //Delete unnecessary local references
  912. destroyLocalReference(env, jPath);
  913. return (jVal.z) ? 0 : -1;
  914. }
  915. int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
  916. {
  917. // JAVA EQUIVALENT:
  918. // Path old = new Path(oldPath);
  919. // Path new = new Path(newPath);
  920. // fs.rename(old, new);
  921. //Get the JNIEnv* corresponding to current thread
  922. JNIEnv* env = getJNIEnv();
  923. if (env == NULL) {
  924. errno = EINTERNAL;
  925. return -1;
  926. }
  927. jobject jFS = (jobject)fs;
  928. //Create objects of org.apache.hadoop.fs.Path
  929. jobject jOldPath = NULL;
  930. jobject jNewPath = NULL;
  931. jOldPath = constructNewObjectOfPath(env, oldPath);
  932. if (jOldPath == NULL) {
  933. return -1;
  934. }
  935. jNewPath = constructNewObjectOfPath(env, newPath);
  936. if (jNewPath == NULL) {
  937. destroyLocalReference(env, jOldPath);
  938. return -1;
  939. }
  940. //Rename the file
  941. jvalue jVal;
  942. jthrowable jExc = NULL;
  943. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, "rename",
  944. JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_PATH), "Z"),
  945. jOldPath, jNewPath) != 0) {
  946. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  947. "FileSystem::rename");
  948. return -1;
  949. }
  950. //Delete unnecessary local references
  951. destroyLocalReference(env, jOldPath);
  952. destroyLocalReference(env, jNewPath);
  953. return (jVal.z) ? 0 : -1;
  954. }
  955. char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
  956. {
  957. // JAVA EQUIVALENT:
  958. // Path p = fs.getWorkingDirectory();
  959. // return p.toString()
  960. //Get the JNIEnv* corresponding to current thread
  961. JNIEnv* env = getJNIEnv();
  962. if (env == NULL) {
  963. errno = EINTERNAL;
  964. return NULL;
  965. }
  966. jobject jFS = (jobject)fs;
  967. jobject jPath = NULL;
  968. jvalue jVal;
  969. jthrowable jExc = NULL;
  970. //FileSystem::getWorkingDirectory()
  971. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
  972. HADOOP_FS, "getWorkingDirectory",
  973. "()Lorg/apache/hadoop/fs/Path;") != 0 ||
  974. jVal.l == NULL) {
  975. errno = errnoFromException(jExc, env, "FileSystem::"
  976. "getWorkingDirectory");
  977. return NULL;
  978. }
  979. jPath = jVal.l;
  980. //Path::toString()
  981. jstring jPathString;
  982. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath,
  983. "org/apache/hadoop/fs/Path", "toString",
  984. "()Ljava/lang/String;") != 0) {
  985. errno = errnoFromException(jExc, env, "Path::toString");
  986. destroyLocalReference(env, jPath);
  987. return NULL;
  988. }
  989. jPathString = jVal.l;
  990. const char *jPathChars = (const char*)
  991. ((*env)->GetStringUTFChars(env, jPathString, NULL));
  992. //Copy to user-provided buffer
  993. strncpy(buffer, jPathChars, bufferSize);
  994. //Delete unnecessary local references
  995. (*env)->ReleaseStringUTFChars(env, jPathString, jPathChars);
  996. destroyLocalReference(env, jPathString);
  997. destroyLocalReference(env, jPath);
  998. return buffer;
  999. }
  1000. int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
  1001. {
  1002. // JAVA EQUIVALENT:
  1003. // fs.setWorkingDirectory(Path(path));
  1004. //Get the JNIEnv* corresponding to current thread
  1005. JNIEnv* env = getJNIEnv();
  1006. if (env == NULL) {
  1007. errno = EINTERNAL;
  1008. return -1;
  1009. }
  1010. jobject jFS = (jobject)fs;
  1011. int retval = 0;
  1012. jthrowable jExc = NULL;
  1013. //Create an object of org.apache.hadoop.fs.Path
  1014. jobject jPath = constructNewObjectOfPath(env, path);
  1015. if (jPath == NULL) {
  1016. return -1;
  1017. }
  1018. //FileSystem::setWorkingDirectory()
  1019. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1020. "setWorkingDirectory",
  1021. "(Lorg/apache/hadoop/fs/Path;)V", jPath) != 0) {
  1022. errno = errnoFromException(jExc, env, "FileSystem::"
  1023. "setWorkingDirectory");
  1024. retval = -1;
  1025. }
  1026. //Delete unnecessary local references
  1027. destroyLocalReference(env, jPath);
  1028. return retval;
  1029. }
  1030. int hdfsCreateDirectory(hdfsFS fs, const char* path)
  1031. {
  1032. // JAVA EQUIVALENT:
  1033. // fs.mkdirs(new Path(path));
  1034. //Get the JNIEnv* corresponding to current thread
  1035. JNIEnv* env = getJNIEnv();
  1036. if (env == NULL) {
  1037. errno = EINTERNAL;
  1038. return -1;
  1039. }
  1040. jobject jFS = (jobject)fs;
  1041. //Create an object of org.apache.hadoop.fs.Path
  1042. jobject jPath = constructNewObjectOfPath(env, path);
  1043. if (jPath == NULL) {
  1044. return -1;
  1045. }
  1046. //Create the directory
  1047. jvalue jVal;
  1048. jVal.z = 0;
  1049. jthrowable jExc = NULL;
  1050. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1051. "mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z",
  1052. jPath) != 0) {
  1053. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1054. "FileSystem::mkdirs");
  1055. goto done;
  1056. }
  1057. done:
  1058. //Delete unnecessary local references
  1059. destroyLocalReference(env, jPath);
  1060. return (jVal.z) ? 0 : -1;
  1061. }
  1062. int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
  1063. {
  1064. // JAVA EQUIVALENT:
  1065. // fs.setReplication(new Path(path), replication);
  1066. //Get the JNIEnv* corresponding to current thread
  1067. JNIEnv* env = getJNIEnv();
  1068. if (env == NULL) {
  1069. errno = EINTERNAL;
  1070. return -1;
  1071. }
  1072. jobject jFS = (jobject)fs;
  1073. //Create an object of org.apache.hadoop.fs.Path
  1074. jobject jPath = constructNewObjectOfPath(env, path);
  1075. if (jPath == NULL) {
  1076. return -1;
  1077. }
  1078. //Create the directory
  1079. jvalue jVal;
  1080. jthrowable jExc = NULL;
  1081. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1082. "setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
  1083. jPath, replication) != 0) {
  1084. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1085. "FileSystem::setReplication");
  1086. goto done;
  1087. }
  1088. done:
  1089. //Delete unnecessary local references
  1090. destroyLocalReference(env, jPath);
  1091. return (jVal.z) ? 0 : -1;
  1092. }
  1093. int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
  1094. {
  1095. // JAVA EQUIVALENT:
  1096. // fs.setOwner(path, owner, group)
  1097. //Get the JNIEnv* corresponding to current thread
  1098. JNIEnv* env = getJNIEnv();
  1099. if (env == NULL) {
  1100. errno = EINTERNAL;
  1101. return -1;
  1102. }
  1103. if (owner == NULL && group == NULL) {
  1104. fprintf(stderr, "Both owner and group cannot be null in chown");
  1105. errno = EINVAL;
  1106. return -1;
  1107. }
  1108. jobject jFS = (jobject)fs;
  1109. jobject jPath = constructNewObjectOfPath(env, path);
  1110. if (jPath == NULL) {
  1111. return -1;
  1112. }
  1113. jstring jOwnerString = (*env)->NewStringUTF(env, owner);
  1114. jstring jGroupString = (*env)->NewStringUTF(env, group);
  1115. //Create the directory
  1116. int ret = 0;
  1117. jthrowable jExc = NULL;
  1118. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1119. "setOwner", JMETHOD3(JPARAM(HADOOP_PATH), JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID),
  1120. jPath, jOwnerString, jGroupString) != 0) {
  1121. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1122. "FileSystem::setOwner");
  1123. ret = -1;
  1124. goto done;
  1125. }
  1126. done:
  1127. destroyLocalReference(env, jPath);
  1128. destroyLocalReference(env, jOwnerString);
  1129. destroyLocalReference(env, jGroupString);
  1130. return ret;
  1131. }
  1132. int hdfsChmod(hdfsFS fs, const char* path, short mode)
  1133. {
  1134. // JAVA EQUIVALENT:
  1135. // fs.setPermission(path, FsPermission)
  1136. //Get the JNIEnv* corresponding to current thread
  1137. JNIEnv* env = getJNIEnv();
  1138. if (env == NULL) {
  1139. errno = EINTERNAL;
  1140. return -1;
  1141. }
  1142. jobject jFS = (jobject)fs;
  1143. // construct jPerm = FsPermission.createImmutable(short mode);
  1144. jshort jmode = mode;
  1145. jobject jPermObj =
  1146. constructNewObjectOfClass(env, NULL, HADOOP_FSPERM,"(S)V",jmode);
  1147. if (jPermObj == NULL) {
  1148. return -2;
  1149. }
  1150. //Create an object of org.apache.hadoop.fs.Path
  1151. jobject jPath = constructNewObjectOfPath(env, path);
  1152. if (jPath == NULL) {
  1153. return -3;
  1154. }
  1155. //Create the directory
  1156. int ret = 0;
  1157. jthrowable jExc = NULL;
  1158. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1159. "setPermission", JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSPERM), JAVA_VOID),
  1160. jPath, jPermObj) != 0) {
  1161. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1162. "FileSystem::setPermission");
  1163. ret = -1;
  1164. goto done;
  1165. }
  1166. done:
  1167. destroyLocalReference(env, jPath);
  1168. destroyLocalReference(env, jPermObj);
  1169. return ret;
  1170. }
  1171. int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
  1172. {
  1173. // JAVA EQUIVALENT:
  1174. // fs.setTimes(src, mtime, atime)
  1175. //Get the JNIEnv* corresponding to current thread
  1176. JNIEnv* env = getJNIEnv();
  1177. if (env == NULL) {
  1178. errno = EINTERNAL;
  1179. return -1;
  1180. }
  1181. jobject jFS = (jobject)fs;
  1182. //Create an object of org.apache.hadoop.fs.Path
  1183. jobject jPath = constructNewObjectOfPath(env, path);
  1184. if (jPath == NULL) {
  1185. fprintf(stderr, "could not construct path object\n");
  1186. return -2;
  1187. }
  1188. jlong jmtime = mtime * (jlong)1000;
  1189. jlong jatime = atime * (jlong)1000;
  1190. int ret = 0;
  1191. jthrowable jExc = NULL;
  1192. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1193. "setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J", JAVA_VOID),
  1194. jPath, jmtime, jatime) != 0) {
  1195. fprintf(stderr, "call to setTime failed\n");
  1196. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1197. "FileSystem::setTimes");
  1198. ret = -1;
  1199. goto done;
  1200. }
  1201. done:
  1202. destroyLocalReference(env, jPath);
  1203. return ret;
  1204. }
  1205. char***
  1206. hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
  1207. {
  1208. // JAVA EQUIVALENT:
  1209. // fs.getFileBlockLoctions(new Path(path), start, length);
  1210. //Get the JNIEnv* corresponding to current thread
  1211. JNIEnv* env = getJNIEnv();
  1212. if (env == NULL) {
  1213. errno = EINTERNAL;
  1214. return NULL;
  1215. }
  1216. jobject jFS = (jobject)fs;
  1217. //Create an object of org.apache.hadoop.fs.Path
  1218. jobject jPath = constructNewObjectOfPath(env, path);
  1219. if (jPath == NULL) {
  1220. return NULL;
  1221. }
  1222. jvalue jFSVal;
  1223. jthrowable jFSExc = NULL;
  1224. if (invokeMethod(env, &jFSVal, &jFSExc, INSTANCE, jFS,
  1225. HADOOP_FS, "getFileStatus",
  1226. "(Lorg/apache/hadoop/fs/Path;)"
  1227. "Lorg/apache/hadoop/fs/FileStatus;",
  1228. jPath) != 0) {
  1229. errno = errnoFromException(jFSExc, env, "org.apache.hadoop.fs."
  1230. "FileSystem::getFileStatus");
  1231. destroyLocalReference(env, jPath);
  1232. return NULL;
  1233. }
  1234. jobject jFileStatus = jFSVal.l;
  1235. //org.apache.hadoop.fs.FileSystem::getFileBlockLocations
  1236. char*** blockHosts = NULL;
  1237. jobjectArray jBlockLocations;;
  1238. jvalue jVal;
  1239. jthrowable jExc = NULL;
  1240. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
  1241. HADOOP_FS, "getFileBlockLocations",
  1242. "(Lorg/apache/hadoop/fs/FileStatus;JJ)"
  1243. "[Lorg/apache/hadoop/fs/BlockLocation;",
  1244. jFileStatus, start, length) != 0) {
  1245. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1246. "FileSystem::getFileBlockLocations");
  1247. destroyLocalReference(env, jPath);
  1248. destroyLocalReference(env, jFileStatus);
  1249. return NULL;
  1250. }
  1251. jBlockLocations = jVal.l;
  1252. //Figure out no of entries in jBlockLocations
  1253. //Allocate memory and add NULL at the end
  1254. jsize jNumFileBlocks = (*env)->GetArrayLength(env, jBlockLocations);
  1255. blockHosts = malloc(sizeof(char**) * (jNumFileBlocks+1));
  1256. if (blockHosts == NULL) {
  1257. errno = ENOMEM;
  1258. goto done;
  1259. }
  1260. blockHosts[jNumFileBlocks] = NULL;
  1261. if (jNumFileBlocks == 0) {
  1262. errno = 0;
  1263. goto done;
  1264. }
  1265. //Now parse each block to get hostnames
  1266. int i = 0;
  1267. for (i=0; i < jNumFileBlocks; ++i) {
  1268. jobject jFileBlock =
  1269. (*env)->GetObjectArrayElement(env, jBlockLocations, i);
  1270. jvalue jVal;
  1271. jobjectArray jFileBlockHosts;
  1272. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFileBlock, HADOOP_BLK_LOC,
  1273. "getHosts", "()[Ljava/lang/String;") ||
  1274. jVal.l == NULL) {
  1275. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1276. "BlockLocation::getHosts");
  1277. destroyLocalReference(env, jPath);
  1278. destroyLocalReference(env, jFileStatus);
  1279. destroyLocalReference(env, jBlockLocations);
  1280. return NULL;
  1281. }
  1282. jFileBlockHosts = jVal.l;
  1283. //Figure out no of hosts in jFileBlockHosts
  1284. //Allocate memory and add NULL at the end
  1285. jsize jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts);
  1286. blockHosts[i] = malloc(sizeof(char*) * (jNumBlockHosts+1));
  1287. if (blockHosts[i] == NULL) {
  1288. int x = 0;
  1289. for (x=0; x < i; ++x) {
  1290. free(blockHosts[x]);
  1291. }
  1292. free(blockHosts);
  1293. errno = ENOMEM;
  1294. goto done;
  1295. }
  1296. blockHosts[i][jNumBlockHosts] = NULL;
  1297. //Now parse each hostname
  1298. int j = 0;
  1299. const char *hostName;
  1300. for (j=0; j < jNumBlockHosts; ++j) {
  1301. jstring jHost =
  1302. (*env)->GetObjectArrayElement(env, jFileBlockHosts, j);
  1303. hostName =
  1304. (const char*)((*env)->GetStringUTFChars(env, jHost, NULL));
  1305. blockHosts[i][j] = strdup(hostName);
  1306. (*env)->ReleaseStringUTFChars(env, jHost, hostName);
  1307. destroyLocalReference(env, jHost);
  1308. }
  1309. destroyLocalReference(env, jFileBlockHosts);
  1310. }
  1311. done:
  1312. //Delete unnecessary local references
  1313. destroyLocalReference(env, jPath);
  1314. destroyLocalReference(env, jFileStatus);
  1315. destroyLocalReference(env, jBlockLocations);
  1316. return blockHosts;
  1317. }
  1318. void hdfsFreeHosts(char ***blockHosts)
  1319. {
  1320. int i, j;
  1321. for (i=0; blockHosts[i]; i++) {
  1322. for (j=0; blockHosts[i][j]; j++) {
  1323. free(blockHosts[i][j]);
  1324. }
  1325. free(blockHosts[i]);
  1326. }
  1327. free(blockHosts);
  1328. }
  1329. tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
  1330. {
  1331. // JAVA EQUIVALENT:
  1332. // fs.getDefaultBlockSize();
  1333. //Get the JNIEnv* corresponding to current thread
  1334. JNIEnv* env = getJNIEnv();
  1335. if (env == NULL) {
  1336. errno = EINTERNAL;
  1337. return -1;
  1338. }
  1339. jobject jFS = (jobject)fs;
  1340. //FileSystem::getDefaultBlockSize()
  1341. tOffset blockSize = -1;
  1342. jvalue jVal;
  1343. jthrowable jExc = NULL;
  1344. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1345. "getDefaultBlockSize", "()J") != 0) {
  1346. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1347. "FileSystem::getDefaultBlockSize");
  1348. return -1;
  1349. }
  1350. blockSize = jVal.j;
  1351. return blockSize;
  1352. }
  1353. tOffset hdfsGetCapacity(hdfsFS fs)
  1354. {
  1355. // JAVA EQUIVALENT:
  1356. // fs.getRawCapacity();
  1357. //Get the JNIEnv* corresponding to current thread
  1358. JNIEnv* env = getJNIEnv();
  1359. if (env == NULL) {
  1360. errno = EINTERNAL;
  1361. return -1;
  1362. }
  1363. jobject jFS = (jobject)fs;
  1364. if (!((*env)->IsInstanceOf(env, jFS,
  1365. globalClassReference(HADOOP_DFS, env)))) {
  1366. fprintf(stderr, "hdfsGetCapacity works only on a "
  1367. "DistributedFileSystem!\n");
  1368. return -1;
  1369. }
  1370. //FileSystem::getRawCapacity()
  1371. jvalue jVal;
  1372. jthrowable jExc = NULL;
  1373. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS,
  1374. "getRawCapacity", "()J") != 0) {
  1375. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1376. "FileSystem::getRawCapacity");
  1377. return -1;
  1378. }
  1379. return jVal.j;
  1380. }
  1381. tOffset hdfsGetUsed(hdfsFS fs)
  1382. {
  1383. // JAVA EQUIVALENT:
  1384. // fs.getRawUsed();
  1385. //Get the JNIEnv* corresponding to current thread
  1386. JNIEnv* env = getJNIEnv();
  1387. if (env == NULL) {
  1388. errno = EINTERNAL;
  1389. return -1;
  1390. }
  1391. jobject jFS = (jobject)fs;
  1392. if (!((*env)->IsInstanceOf(env, jFS,
  1393. globalClassReference(HADOOP_DFS, env)))) {
  1394. fprintf(stderr, "hdfsGetUsed works only on a "
  1395. "DistributedFileSystem!\n");
  1396. return -1;
  1397. }
  1398. //FileSystem::getRawUsed()
  1399. jvalue jVal;
  1400. jthrowable jExc = NULL;
  1401. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS,
  1402. "getRawUsed", "()J") != 0) {
  1403. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1404. "FileSystem::getRawUsed");
  1405. return -1;
  1406. }
  1407. return jVal.j;
  1408. }
  1409. static int
  1410. getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
  1411. {
  1412. jvalue jVal;
  1413. jthrowable jExc = NULL;
  1414. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1415. HADOOP_STAT, "isDir", "()Z") != 0) {
  1416. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1417. "FileStatus::isDir");
  1418. return -1;
  1419. }
  1420. fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile;
  1421. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1422. HADOOP_STAT, "getReplication", "()S") != 0) {
  1423. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1424. "FileStatus::getReplication");
  1425. return -1;
  1426. }
  1427. fileInfo->mReplication = jVal.s;
  1428. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1429. HADOOP_STAT, "getBlockSize", "()J") != 0) {
  1430. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1431. "FileStatus::getBlockSize");
  1432. return -1;
  1433. }
  1434. fileInfo->mBlockSize = jVal.j;
  1435. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1436. HADOOP_STAT, "getModificationTime", "()J") != 0) {
  1437. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1438. "FileStatus::getModificationTime");
  1439. return -1;
  1440. }
  1441. fileInfo->mLastMod = (tTime) (jVal.j / 1000);
  1442. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1443. HADOOP_STAT, "getAccessTime", "()J") != 0) {
  1444. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1445. "FileStatus::getAccessTime");
  1446. return -1;
  1447. }
  1448. fileInfo->mLastAccess = (tTime) (jVal.j / 1000);
  1449. if (fileInfo->mKind == kObjectKindFile) {
  1450. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1451. HADOOP_STAT, "getLen", "()J") != 0) {
  1452. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1453. "FileStatus::getLen");
  1454. return -1;
  1455. }
  1456. fileInfo->mSize = jVal.j;
  1457. }
  1458. jobject jPath;
  1459. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1460. "getPath", "()Lorg/apache/hadoop/fs/Path;") ||
  1461. jVal.l == NULL) {
  1462. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1463. "Path::getPath");
  1464. return -1;
  1465. }
  1466. jPath = jVal.l;
  1467. jstring jPathName;
  1468. const char *cPathName;
  1469. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath, HADOOP_PATH,
  1470. "toString", "()Ljava/lang/String;")) {
  1471. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1472. "Path::toString");
  1473. destroyLocalReference(env, jPath);
  1474. return -1;
  1475. }
  1476. jPathName = jVal.l;
  1477. cPathName = (const char*) ((*env)->GetStringUTFChars(env, jPathName, NULL));
  1478. fileInfo->mName = strdup(cPathName);
  1479. (*env)->ReleaseStringUTFChars(env, jPathName, cPathName);
  1480. destroyLocalReference(env, jPath);
  1481. destroyLocalReference(env, jPathName);
  1482. jstring jUserName;
  1483. const char* cUserName;
  1484. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1485. "getOwner", "()Ljava/lang/String;")) {
  1486. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1487. "FileStatus::getOwner failed!\n");
  1488. errno = EINTERNAL;
  1489. return -1;
  1490. }
  1491. jUserName = jVal.l;
  1492. cUserName = (const char*) ((*env)->GetStringUTFChars(env, jUserName, NULL));
  1493. fileInfo->mOwner = strdup(cUserName);
  1494. (*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
  1495. destroyLocalReference(env, jUserName);
  1496. jstring jGroupName;
  1497. const char* cGroupName;
  1498. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1499. "getGroup", "()Ljava/lang/String;")) {
  1500. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1501. "FileStatus::getGroup failed!\n");
  1502. errno = EINTERNAL;
  1503. return -1;
  1504. }
  1505. jGroupName = jVal.l;
  1506. cGroupName = (const char*) ((*env)->GetStringUTFChars(env, jGroupName, NULL));
  1507. fileInfo->mGroup = strdup(cGroupName);
  1508. (*env)->ReleaseStringUTFChars(env, jGroupName, cGroupName);
  1509. destroyLocalReference(env, jGroupName);
  1510. jobject jPermission;
  1511. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1512. "getPermission", "()Lorg/apache/hadoop/fs/permission/FsPermission;") ||
  1513. jVal.l == NULL) {
  1514. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1515. "FileStatus::getPermission failed!\n");
  1516. errno = EINTERNAL;
  1517. return -1;
  1518. }
  1519. jPermission = jVal.l;
  1520. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPermission, HADOOP_FSPERM,
  1521. "toShort", "()S") != 0) {
  1522. fprintf(stderr, "Call to org.apache.hadoop.fs.permission."
  1523. "FsPermission::toShort failed!\n");
  1524. errno = EINTERNAL;
  1525. return -1;
  1526. }
  1527. fileInfo->mPermissions = jVal.s;
  1528. destroyLocalReference(env, jPermission);
  1529. return 0;
  1530. }
  1531. static int
  1532. getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo *fileInfo)
  1533. {
  1534. // JAVA EQUIVALENT:
  1535. // fs.isDirectory(f)
  1536. // fs.getModificationTime()
  1537. // fs.getAccessTime()
  1538. // fs.getLength(f)
  1539. // f.getPath()
  1540. // f.getOwner()
  1541. // f.getGroup()
  1542. // f.getPermission().toShort()
  1543. jobject jStat;
  1544. jvalue jVal;
  1545. jthrowable jExc = NULL;
  1546. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1547. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
  1548. jPath) != 0) {
  1549. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1550. "FileSystem::exists");
  1551. return -1;
  1552. }
  1553. if (jVal.z == 0) {
  1554. errno = ENOENT;
  1555. return -1;
  1556. }
  1557. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1558. "getFileStatus", JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_STAT)),
  1559. jPath) != 0) {
  1560. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1561. "FileSystem::getFileStatus");
  1562. return -1;
  1563. }
  1564. jStat = jVal.l;
  1565. int ret = getFileInfoFromStat(env, jStat, fileInfo);
  1566. destroyLocalReference(env, jStat);
  1567. return ret;
  1568. }
  1569. hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
  1570. {
  1571. // JAVA EQUIVALENT:
  1572. // Path p(path);
  1573. // Path []pathList = fs.listPaths(p)
  1574. // foreach path in pathList
  1575. // getFileInfo(path)
  1576. //Get the JNIEnv* corresponding to current thread
  1577. JNIEnv* env = getJNIEnv();
  1578. if (env == NULL) {
  1579. errno = EINTERNAL;
  1580. return NULL;
  1581. }
  1582. jobject jFS = (jobject)fs;
  1583. //Create an object of org.apache.hadoop.fs.Path
  1584. jobject jPath = constructNewObjectOfPath(env, path);
  1585. if (jPath == NULL) {
  1586. return NULL;
  1587. }
  1588. hdfsFileInfo *pathList = 0;
  1589. jobjectArray jPathList = NULL;
  1590. jvalue jVal;
  1591. jthrowable jExc = NULL;
  1592. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS, "listStatus",
  1593. JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_STAT)),
  1594. jPath) != 0) {
  1595. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1596. "FileSystem::listStatus");
  1597. destroyLocalReference(env, jPath);
  1598. return NULL;
  1599. }
  1600. jPathList = jVal.l;
  1601. //Figure out no of entries in that directory
  1602. jsize jPathListSize = (*env)->GetArrayLength(env, jPathList);
  1603. *numEntries = jPathListSize;
  1604. if (jPathListSize == 0) {
  1605. errno = 0;
  1606. goto done;
  1607. }
  1608. //Allocate memory
  1609. pathList = calloc(jPathListSize, sizeof(hdfsFileInfo));
  1610. if (pathList == NULL) {
  1611. errno = ENOMEM;
  1612. goto done;
  1613. }
  1614. //Save path information in pathList
  1615. jsize i;
  1616. jobject tmpStat;
  1617. for (i=0; i < jPathListSize; ++i) {
  1618. tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i);
  1619. if (getFileInfoFromStat(env, tmpStat, &pathList[i])) {
  1620. hdfsFreeFileInfo(pathList, jPathListSize);
  1621. destroyLocalReference(env, tmpStat);
  1622. pathList = NULL;
  1623. goto done;
  1624. }
  1625. destroyLocalReference(env, tmpStat);
  1626. }
  1627. done:
  1628. //Delete unnecessary local references
  1629. destroyLocalReference(env, jPath);
  1630. destroyLocalReference(env, jPathList);
  1631. return pathList;
  1632. }
  1633. hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
  1634. {
  1635. // JAVA EQUIVALENT:
  1636. // File f(path);
  1637. // fs.isDirectory(f)
  1638. // fs.lastModified() ??
  1639. // fs.getLength(f)
  1640. // f.getPath()
  1641. //Get the JNIEnv* corresponding to current thread
  1642. JNIEnv* env = getJNIEnv();
  1643. if (env == NULL) {
  1644. errno = EINTERNAL;
  1645. return NULL;
  1646. }
  1647. jobject jFS = (jobject)fs;
  1648. //Create an object of org.apache.hadoop.fs.Path
  1649. jobject jPath = constructNewObjectOfPath(env, path);
  1650. if (jPath == NULL) {
  1651. return NULL;
  1652. }
  1653. hdfsFileInfo *fileInfo = calloc(1, sizeof(hdfsFileInfo));
  1654. if (getFileInfo(env, jFS, jPath, fileInfo)) {
  1655. hdfsFreeFileInfo(fileInfo, 1);
  1656. fileInfo = NULL;
  1657. goto done;
  1658. }
  1659. done:
  1660. //Delete unnecessary local references
  1661. destroyLocalReference(env, jPath);
  1662. return fileInfo;
  1663. }
  1664. void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
  1665. {
  1666. //Free the mName
  1667. int i;
  1668. for (i=0; i < numEntries; ++i) {
  1669. if (hdfsFileInfo[i].mName) {
  1670. free(hdfsFileInfo[i].mName);
  1671. }
  1672. }
  1673. //Free entire block
  1674. free(hdfsFileInfo);
  1675. }
  1676. /**
  1677. * vim: ts=4: sw=4: et:
  1678. */