hdfs.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513
  1. /**
  2. * Copyright 2005 The Apache Software Foundation
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "hdfs.h"
  17. #include "hdfsJniHelper.h"
  18. /**
  19. * hdfsJniEnv: A wrapper struct to be used as 'value'
  20. * while saving thread -> JNIEnv* mappings
  21. */
  22. typedef struct
  23. {
  24. JNIEnv* env;
  25. } hdfsJniEnv;
  26. /**
  27. * Helpful macro to convert a pthread_t to a string
  28. */
  29. #define GET_threadID(threadID, key, keySize) \
  30. snprintf(key, keySize, "__hdfs_threadID__%u", (unsigned)(threadID));
  31. #define threadID_SIZE 32
  32. #define CHECK_jExceptionEPTION_IN_METH_INVOC {\
  33. jthrowable _jException_;\
  34. if ((_jException_ = (*env)->jExceptioneptionOccurred(env))) {\
  35. (*env)->jExceptioneptionDescribe(env);\
  36. *jException = _jException_;\
  37. (*env)->jExceptioneptionClear(env);\
  38. va_end(args);\
  39. return -1;\
  40. }\
  41. }
  42. /**
  43. * getJNIEnv: A helper function to get the JNIEnv* for the given thread.
  44. * @param: None.
  45. * @return The JNIEnv* corresponding to the thread.
  46. */
  47. static inline JNIEnv* getJNIEnv()
  48. {
  49. char threadID[threadID_SIZE];
  50. const jsize vmBufLength = 1;
  51. JavaVM* vmBuf[vmBufLength];
  52. JNIEnv *env;
  53. jint rv = 0;
  54. jint noVMs = 0;
  55. //Get the threadID and stringize it
  56. GET_threadID(pthread_self(), threadID, sizeof(threadID));
  57. //See if you already have the JNIEnv* cached...
  58. env = (JNIEnv*)searchEntryFromTable(threadID);
  59. if (env != NULL) {
  60. return env;
  61. }
  62. //All right... some serious work required here!
  63. //1. Initialize the HashTable
  64. //2. LOCK!
  65. //3. Check if any JVMs have been created here
  66. // Yes: Use it (we should only have 1 VM)
  67. // No: Create the JVM
  68. //4. UNLOCK
  69. hashTableInit();
  70. LOCK_HASH_TABLE();
  71. rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs);
  72. if (rv != 0) {
  73. fprintf(stderr,
  74. "Call to JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
  75. exit(1);
  76. }
  77. if (noVMs == 0) {
  78. //Get the environment variables for initializing the JVM
  79. char *hadoopClassPath = getenv("CLASSPATH");
  80. if (hadoopClassPath == NULL) {
  81. fprintf(stderr, "Please set the environment variable $CLASSPATH!\n");
  82. exit(-1);
  83. }
  84. char *optHadoopClassPath = malloc(sizeof(char) * strlen(hadoopClassPath) + 16);
  85. sprintf(optHadoopClassPath, "-Djava.class.path=%s", hadoopClassPath);
  86. //Create the VM
  87. JavaVMInitArgs vm_args;
  88. JavaVMOption options[1];
  89. JavaVM *vm;
  90. // User classes
  91. options[0].optionString = optHadoopClassPath;
  92. // Print JNI-related messages
  93. //options[2].optionString = "-verbose:jni";
  94. vm_args.version = JNI_VERSION_1_2;
  95. vm_args.options = options;
  96. vm_args.nOptions = 1;
  97. vm_args.ignoreUnrecognized = 1;
  98. rv = JNI_CreateJavaVM(&vm, (void**)&env, &vm_args);
  99. if (rv != 0) {
  100. fprintf(stderr,
  101. "Call to JNI_CreateJavaVM failed with error: %d\n");
  102. exit(1);
  103. }
  104. free(optHadoopClassPath);
  105. } else {
  106. //Attach this thread to the VM
  107. JavaVM* vm = vmBuf[0];
  108. rv = (*vm)->AttachCurrentThread(vm, (void**)&env, 0);
  109. if (rv != 0) {
  110. fprintf(stderr,
  111. "Call to AttachCurrentThread failed with error: %d\n");
  112. exit(1);
  113. }
  114. }
  115. //Save the threadID -> env mapping
  116. ENTRY e, *ep;
  117. e.key = threadID;
  118. e.data = (void*)(env);
  119. if ((ep = hsearch(e, ENTER)) == NULL) {
  120. fprintf(stderr, "Call to hsearch(ENTER) failed\n");
  121. exit(1);
  122. }
  123. UNLOCK_HASH_TABLE();
  124. return env;
  125. }
  126. /**
  127. * Helper function to create a java.io.File object.
  128. * @param env: The JNIEnv pointer.
  129. * @param path: The file-path for which to construct java.io.File object.
  130. * @return Returns a jobject on success and NULL on error.
  131. */
  132. static inline jobject constructNewObjectOfJavaIOFile(JNIEnv *env, const char *path)
  133. {
  134. //Construct a java.lang.String object
  135. jstring jPath = (*env)->NewStringUTF(env, path);
  136. //Construct the java.io.File object
  137. jthrowable jException;
  138. jobject jFile = constructNewObjectOfClass(env, &jException,
  139. "java/io/File", "(Ljava/lang/String;)V", jPath);
  140. if (jFile == NULL) {
  141. fprintf(stderr,
  142. "Can't construct instance of class java.io.File for %s\n",
  143. path);
  144. errno = EINTERNAL;
  145. return NULL;
  146. }
  147. //Destroy the java.lang.String object
  148. (*env)->ReleaseStringUTFChars(env, jPath,
  149. (*env)->GetStringUTFChars(env, jPath, 0));
  150. return jFile;
  151. }
  152. /**
  153. * Helper function to create a org.apache.hadoop.fs.Path object.
  154. * @param env: The JNIEnv pointer.
  155. * @param path: The file-path for which to construct org.apache.hadoop.fs.Path object.
  156. * @return Returns a jobject on success and NULL on error.
  157. */
  158. static inline
  159. jobject constructNewObjectOfPath(JNIEnv *env, const char *path)
  160. {
  161. //Construct a java.lang.String object
  162. jstring jPathString = (*env)->NewStringUTF(env, path);
  163. //Construct the org.apache.hadoop.fs.Path object
  164. jthrowable jException;
  165. jobject jPath = constructNewObjectOfClass(env, &jException,
  166. "org/apache/hadoop/fs/Path", "(Ljava/lang/String;)V", jPathString);
  167. if (jPath == NULL) {
  168. fprintf(stderr,
  169. "Can't construct instance of class org.apache.hadoop.fs.Path for %s\n",
  170. path);
  171. errno = EINTERNAL;
  172. return NULL;
  173. }
  174. //Destroy the java.lang.String object
  175. (*env)->ReleaseStringUTFChars(env, jPathString,
  176. (*env)->GetStringUTFChars(env, jPathString, 0));
  177. return jPath;
  178. }
  179. /**
  180. * Helper function to destroy a local reference of java.lang.Object
  181. * @param env: The JNIEnv pointer.
  182. * @param jFile: The local reference of java.lang.Object object
  183. * @return None.
  184. */
  185. static inline void destroyLocalReference(JNIEnv *env, jobject jObject)
  186. {
  187. (*env)->DeleteLocalRef(env, jObject);
  188. }
  189. hdfsFS hdfsConnect(const char* host, tPort port)
  190. {
  191. // JAVA EQUIVALENT:
  192. // FileSystem fs = FileSystem.get(new Configuration());
  193. // return fs;
  194. JNIEnv *env = 0;
  195. jobject jConfiguration;
  196. jobject jFS;
  197. jthrowable jException;
  198. //Get the JNIEnv* corresponding to current thread
  199. env = getJNIEnv();
  200. //Create the org.apache.hadoop.conf.Configuration object
  201. jConfiguration = constructNewObjectOfClass(env, &jException,
  202. "org/apache/hadoop/conf/Configuration", "()V");
  203. if (jConfiguration == NULL) {
  204. fprintf(stderr,
  205. "Can't construct instance of class org.apache.hadoop.conf.Configuration\n");
  206. errno = EINTERNAL;
  207. return NULL;
  208. }
  209. //Check what type of FileSystem the caller wants...
  210. if (host == NULL) {
  211. //fs = new LocalFileSystem(conf);
  212. jFS = constructNewObjectOfClass(env, &jException,
  213. "org/apache/hadoop/fs/LocalFileSystem",
  214. "(Lorg/apache/hadoop/conf/Configuration;)V", jConfiguration);
  215. if (jFS == NULL) {
  216. errno = EINTERNAL;
  217. goto done;
  218. }
  219. } else if (!strcmp(host, "default") && port == 0) {
  220. //fs = FileSystem::get(conf);
  221. if (invokeMethod(env, (RetVal*)&jFS, &jException, STATIC, NULL,
  222. "org/apache/hadoop/fs/FileSystem", "get",
  223. "(Lorg/apache/hadoop/conf/Configuration;)Lorg/apache/hadoop/fs/FileSystem;",
  224. jConfiguration) != 0) {
  225. fprintf(stderr,
  226. "Call to org.apache.hadoop.fs.FileSystem::get failed!\n");
  227. errno = EINTERNAL;
  228. goto done;
  229. }
  230. } else {
  231. //fs = new DistributedFileSystem(new InetSocketAddress(host, port), conf)
  232. jstring jHostName = (*env)->NewStringUTF(env, host);
  233. jobject jNameNode = constructNewObjectOfClass(env, &jException,
  234. "java/net/InetSocketAddress", "(Ljava/lang/String;I)V",
  235. jHostName, port);
  236. (*env)->ReleaseStringUTFChars(env, jHostName,
  237. (*env)->GetStringUTFChars(env, jHostName, NULL));
  238. if (jNameNode == NULL) {
  239. errno = EINTERNAL;
  240. goto done;
  241. }
  242. jFS = constructNewObjectOfClass(env, &jException,
  243. "org/apache/hadoop/dfs/DistributedFileSystem",
  244. "(Ljava/net/InetSocketAddress;Lorg/apache/hadoop/conf/Configuration;)V",
  245. jNameNode, jConfiguration);
  246. destroyLocalReference(env, jNameNode);
  247. if (jFS == NULL) {
  248. errno = EINTERNAL;
  249. goto done;
  250. }
  251. }
  252. done:
  253. //Release unnecessary local references
  254. destroyLocalReference(env, jConfiguration);
  255. return jFS;
  256. }
  257. int hdfsDisconnect(hdfsFS fs)
  258. {
  259. // JAVA EQUIVALENT:
  260. // fs.close()
  261. //Get the JNIEnv* corresponding to current thread
  262. JNIEnv* env = getJNIEnv();
  263. //Parameters
  264. jobject jFS = (jobject)fs;
  265. //jException reference
  266. jthrowable jException;
  267. //Sanity check
  268. if (fs == NULL) {
  269. errno = EBADF;
  270. return -1;
  271. }
  272. if (invokeMethod(env, NULL, &jException, INSTANCE, jFS,
  273. "org/apache/hadoop/fs/FileSystem",
  274. "close", "()V") != 0) {
  275. fprintf(stderr, "Call to FileSystem::close failed!\n");
  276. errno = EINTERNAL;
  277. return -1;
  278. }
  279. //Release unnecessary local references
  280. destroyLocalReference(env, jFS);
  281. return 0;
  282. }
  283. hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
  284. int bufferSize, short replication, tSize blockSize)
  285. {
  286. // JAVA EQUIVALENT:
  287. // File f = new File(path);
  288. // FSData{Input|Output}Stream f{is|os} = fs.create(f);
  289. // return f{is|os};
  290. //Get the JNIEnv* corresponding to current thread
  291. JNIEnv* env = getJNIEnv();
  292. jobject jFS = (jobject)fs;
  293. jthrowable jException;
  294. //The hadoop java api/signature
  295. const char* method = (flags == O_RDONLY) ? "open" : "create";
  296. const char* signature = (flags == O_RDONLY) ?
  297. "(Lorg/apache/hadoop/fs/Path;I)Lorg/apache/hadoop/fs/FSDataInputStream;" :
  298. "(Lorg/apache/hadoop/fs/Path;ZISJ)Lorg/apache/hadoop/fs/FSDataOutputStream;";
  299. //Return value
  300. hdfsFile file = NULL;
  301. //Create an object of org.apache.hadoop.fs.Path
  302. jobject jPath = constructNewObjectOfPath(env, path);
  303. if (jPath == NULL) {
  304. return NULL;
  305. }
  306. //Create the org.apache.hadoop.conf.Configuration object
  307. //and get the configured values if need be
  308. jobject jConfiguration = constructNewObjectOfClass(env, &jException,
  309. "org/apache/hadoop/conf/Configuration", "()V");
  310. if (jConfiguration == NULL) {
  311. fprintf(stderr,
  312. "Can't construct instance of class org.apache.hadoop.conf.Configuration\n");
  313. errno = EINTERNAL;
  314. return NULL;
  315. }
  316. jint jBufferSize = bufferSize;
  317. jshort jReplication = replication;
  318. jlong jBlockSize = blockSize;
  319. jstring jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size");
  320. jstring jStrReplication = (*env)->NewStringUTF(env, "dfs.replication");
  321. jstring jStrBlockSize = (*env)->NewStringUTF(env, "dfs.block.size");
  322. //bufferSize
  323. if(!bufferSize) {
  324. if (invokeMethod(env, (RetVal*)&jBufferSize, &jException, INSTANCE, jConfiguration,
  325. "org/apache/hadoop/conf/Configuration", "getInt",
  326. "(Ljava/lang/String;I)I", jStrBufferSize, 4096)) {
  327. fprintf(stderr,
  328. "Call to org.apache.hadoop.conf.Configuration::getInt failed!\n");
  329. errno = EINTERNAL;
  330. goto done;
  331. }
  332. }
  333. if(flags == O_WRONLY) {
  334. //replication
  335. jint jTmpReplication;
  336. if(!replication) {
  337. if (invokeMethod(env, (RetVal*)&jTmpReplication, &jException, INSTANCE, jConfiguration,
  338. "org/apache/hadoop/conf/Configuration", "getInt",
  339. "(Ljava/lang/String;I)I", jStrReplication, 1)) {
  340. fprintf(stderr,
  341. "Call to org.apache.hadoop.conf.Configuration::getInt failed!\n");
  342. errno = EINTERNAL;
  343. goto done;
  344. }
  345. jReplication = jTmpReplication;
  346. }
  347. //blockSize
  348. if(!blockSize) {
  349. if (invokeMethod(env, (RetVal*)&jBlockSize, &jException, INSTANCE, jConfiguration,
  350. "org/apache/hadoop/conf/Configuration", "getLong",
  351. "(Ljava/lang/String;J)J", jStrBlockSize, 67108864)) {
  352. fprintf(stderr,
  353. "Call to org.apache.hadoop.fs.FileSystem::%s(%s) failed!\n",
  354. method, signature);
  355. errno = EINTERNAL;
  356. goto done;
  357. }
  358. }
  359. }
  360. //Create and return either the FSDataInputStream or FSDataOutputStream references
  361. jobject jStream;
  362. if(flags == O_RDONLY) {
  363. if (invokeMethod(env, (RetVal*)&jStream, &jException, INSTANCE, jFS,
  364. "org/apache/hadoop/fs/FileSystem",
  365. method, signature, jPath, jBufferSize)) {
  366. fprintf(stderr,
  367. "Call to org.apache.hadoop.fs.FileSystem::%s(%s) failed!\n",
  368. method, signature);
  369. errno = EINTERNAL;
  370. goto done;
  371. }
  372. } else {
  373. jboolean jOverWrite = 1;
  374. if (invokeMethod(env, (RetVal*)&jStream, &jException, INSTANCE, jFS,
  375. "org/apache/hadoop/fs/FileSystem",
  376. method, signature, jPath, jOverWrite, jBufferSize, jReplication, jBlockSize)) {
  377. fprintf(stderr,
  378. "Call to org.apache.hadoop.fs.FileSystem::%s(%s) failed!\n",
  379. method, signature);
  380. errno = EINTERNAL;
  381. goto done;
  382. }
  383. }
  384. file = malloc(sizeof(struct hdfsFile_internal));
  385. if (!file) {
  386. errno = ENOMEM;
  387. return NULL;
  388. }
  389. file->file = (void*)jStream;
  390. file->type = ((flags & O_RDONLY) ? INPUT : OUTPUT);
  391. done:
  392. //Delete unnecessary local references
  393. (*env)->ReleaseStringUTFChars(env, jStrBufferSize,
  394. (*env)->GetStringUTFChars(env, jStrBufferSize, 0));
  395. (*env)->ReleaseStringUTFChars(env, jStrReplication,
  396. (*env)->GetStringUTFChars(env, jStrReplication, 0));
  397. (*env)->ReleaseStringUTFChars(env, jStrBlockSize,
  398. (*env)->GetStringUTFChars(env, jStrBlockSize, 0));
  399. destroyLocalReference(env, jConfiguration);
  400. destroyLocalReference(env, jPath);
  401. return file;
  402. }
  403. int hdfsCloseFile(hdfsFS fs, hdfsFile file)
  404. {
  405. // JAVA EQUIVALENT:
  406. // file.close
  407. //Get the JNIEnv* corresponding to current thread
  408. JNIEnv* env = getJNIEnv();
  409. //Parameters
  410. jobject jFS = (jobject)fs;
  411. jobject jStream = (jobject)(file ? file->file : NULL);
  412. //jException reference
  413. jthrowable jException;
  414. //Sanity check
  415. if (!file || file->type == UNINITIALIZED) {
  416. errno = EBADF;
  417. return -1;
  418. }
  419. //The interface whose 'close' method to be called
  420. const char* interface = (file->type == INPUT) ?
  421. "org/apache/hadoop/fs/FSDataInputStream" :
  422. "org/apache/hadoop/fs/FSDataOutputStream";
  423. if (invokeMethod(env, NULL, &jException, INSTANCE, jStream, interface,
  424. "close", "()V") != 0) {
  425. fprintf(stderr, "Call to %s::close failed!\n", interface);
  426. errno = EINTERNAL;
  427. return -1;
  428. }
  429. //De-allocate memory
  430. free(file);
  431. return 0;
  432. }
  433. tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
  434. {
  435. // JAVA EQUIVALENT:
  436. // byte [] bR = new byte[length];
  437. // fis.read(bR);
  438. //Get the JNIEnv* corresponding to current thread
  439. JNIEnv* env = getJNIEnv();
  440. //Parameters
  441. jobject jFS = (jobject)fs;
  442. jobject jInputStream = (jobject)(f ? f->file : NULL);
  443. jthrowable jException;
  444. jbyteArray jbRarray;
  445. jint noReadBytes = 0;
  446. //Sanity check
  447. if (!f || f->type == UNINITIALIZED) {
  448. errno = EBADF;
  449. return -1;
  450. }
  451. //Error checking... make sure that this file is 'readable'
  452. if (f->type != INPUT) {
  453. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  454. errno = EINVAL;
  455. return -1;
  456. }
  457. //Read the requisite bytes
  458. jbRarray = (*env)->NewByteArray(env, length);
  459. if (invokeMethod(env, (RetVal*)&noReadBytes, &jException, INSTANCE,
  460. jInputStream, "org/apache/hadoop/fs/FSDataInputStream",
  461. "read", "([B)I", jbRarray) != 0) {
  462. fprintf(stderr,
  463. "Call to org.apache.hadoop.fs.FSDataInputStream::read failed!\n");
  464. errno = EINTERNAL;
  465. noReadBytes = -1;
  466. } else {
  467. if(noReadBytes > 0) {
  468. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  469. }
  470. //This is a valid case: there aren't any bytes left to read!
  471. errno = 0;
  472. }
  473. (*env)->ReleaseByteArrayElements(env, jbRarray,
  474. (*env)->GetByteArrayElements(env, jbRarray, 0), JNI_ABORT);
  475. return noReadBytes;
  476. }
  477. tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
  478. {
  479. // JAVA EQUIVALENT
  480. // byte b[] = str.getBytes();
  481. // fso.write(b);
  482. //Get the JNIEnv* corresponding to current thread
  483. JNIEnv* env = getJNIEnv();
  484. //Parameters
  485. jobject jFS = (jobject)fs;
  486. jobject jOutputStream = (jobject)(f ? f->file : 0);
  487. jthrowable jException;
  488. jbyteArray jbWarray;
  489. jint noWrittenBytes = 0;
  490. //Sanity check
  491. if (!f || f->type == UNINITIALIZED) {
  492. errno = EBADF;
  493. return -1;
  494. }
  495. //Error checking... make sure that this file is 'writable'
  496. if (f->type != OUTPUT) {
  497. fprintf(stderr, "Cannot write into a non-OutputStream object!\n");
  498. errno = EINVAL;
  499. return -1;
  500. }
  501. //Write the requisite bytes into the file
  502. jbWarray = (*env)->NewByteArray(env, length);
  503. (*env)->SetByteArrayRegion(env, jbWarray, 0, length, buffer);
  504. if (invokeMethod(env, NULL, &jException, INSTANCE, jOutputStream,
  505. "org/apache/hadoop/fs/FSDataOutputStream", "write",
  506. "([B)V", jbWarray)) {
  507. fprintf(stderr,
  508. "Call to org.apache.hadoop.fs.FSDataOutputStream::write failed!\n"
  509. );
  510. errno = EINTERNAL;
  511. noWrittenBytes = -1;
  512. }
  513. (*env)->ReleaseByteArrayElements(env, jbWarray,
  514. (*env)->GetByteArrayElements(env, jbWarray, 0), JNI_ABORT);
  515. //Return no. of bytes succesfully written (libc way)
  516. //i.e. 'length' itself! ;-)
  517. return length;
  518. }
  519. int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
  520. {
  521. // JAVA EQUIVALENT
  522. // fis.seek(pos);
  523. //Get the JNIEnv* corresponding to current thread
  524. JNIEnv* env = getJNIEnv();
  525. //Parameters
  526. jobject jFS = (jobject)fs;
  527. jobject jInputStream = (jobject)(f ? f->file : 0);
  528. jthrowable jException;
  529. //Sanity check
  530. if (!f || f->type != INPUT) {
  531. errno = EBADF;
  532. return -1;
  533. }
  534. if (invokeMethod(env, NULL, &jException, INSTANCE, jInputStream,
  535. "org/apache/hadoop/fs/FSDataInputStream", "seek",
  536. "(J)V", desiredPos) != 0) {
  537. fprintf(stderr,
  538. "Call to org.apache.hadoop.fs.FSDataInputStream::seek failed!\n");
  539. errno = EINTERNAL;
  540. return -1;
  541. }
  542. return 0;
  543. }
  544. tOffset hdfsTell(hdfsFS fs, hdfsFile f)
  545. {
  546. // JAVA EQUIVALENT
  547. // pos = f.getPos();
  548. //Get the JNIEnv* corresponding to current thread
  549. JNIEnv* env = getJNIEnv();
  550. //Parameters
  551. jobject jFS = (jobject)fs;
  552. jobject jStream = (jobject)(f ? f->file : 0);
  553. jthrowable jException;
  554. //Sanity check
  555. if (!f || f->type == UNINITIALIZED) {
  556. errno = EBADF;
  557. return -1;
  558. }
  559. const char* interface = (f->type == INPUT) ?
  560. "org/apache/hadoop/fs/FSDataInputStream" :
  561. "org/apache/hadoop/fs/FSDataOutputStream";
  562. jlong currentPos = -1;
  563. if (invokeMethod(env,(RetVal*)&currentPos, &jException, INSTANCE,
  564. jStream, interface, "getPos", "()J") != 0) {
  565. fprintf(stderr, "Call to org.apache.hadoop.fs.FSDataInputStream::getPos failed!\n");
  566. errno = EINTERNAL;
  567. return -1;
  568. }
  569. return (tOffset)currentPos;
  570. }
  571. int hdfsFlush(hdfsFS fs, hdfsFile f)
  572. {
  573. // JAVA EQUIVALENT
  574. // fos.flush();
  575. //Get the JNIEnv* corresponding to current thread
  576. JNIEnv* env = getJNIEnv();
  577. //Parameters
  578. jobject jFS = (jobject)fs;
  579. jobject jOutputStream = (jobject)(f ? f->file : 0);
  580. jthrowable jException;
  581. //Sanity check
  582. if (!f || f->type != OUTPUT) {
  583. errno = EBADF;
  584. return -1;
  585. }
  586. if (invokeMethod(env, NULL, &jException, INSTANCE, jOutputStream,
  587. "org/apache/hadoop/fs/FSDataOutputStream", "flush",
  588. "()V") != 0) {
  589. fprintf(stderr,
  590. "Call to org.apache.hadoop.fs.FSDataInputStream::flush failed!\n"
  591. );
  592. errno = EINTERNAL;
  593. return -1;
  594. }
  595. return 0;
  596. }
  597. int hdfsAvailable(hdfsFS fs, hdfsFile f)
  598. {
  599. // JAVA EQUIVALENT
  600. // fis.available();
  601. //Get the JNIEnv* corresponding to current thread
  602. JNIEnv* env = getJNIEnv();
  603. //Parameters
  604. jobject jFS = (jobject)fs;
  605. jobject jInputStream = (jobject)(f ? f->file : 0);
  606. jthrowable jException;
  607. //Sanity check
  608. if (!f || f->type != INPUT) {
  609. errno = EBADF;
  610. return -1;
  611. }
  612. jint available = -1;
  613. if (invokeMethod(env, (RetVal*)&available, &jException, INSTANCE, jInputStream,
  614. "org/apache/hadoop/fs/FSDataInputStream", "available",
  615. "()I") != 0) {
  616. fprintf(stderr,
  617. "Call to org.apache.hadoop.fs.FSDataInputStream::available failed!\n"
  618. );
  619. errno = EINTERNAL;
  620. return -1;
  621. }
  622. return available;
  623. }
  624. int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  625. {
  626. //JAVA EQUIVALENT
  627. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath, deleteSource = false, conf)
  628. //Get the JNIEnv* corresponding to current thread
  629. JNIEnv* env = getJNIEnv();
  630. //Parameters
  631. jobject jSrcFS = (jobject)srcFS;
  632. jobject jDstFS = (jobject)dstFS;
  633. jobject jSrcPath = constructNewObjectOfPath(env, src);
  634. jobject jDstPath = constructNewObjectOfPath(env, dst);
  635. if (jSrcPath == NULL || jDstPath == NULL) {
  636. return -1;
  637. }
  638. jthrowable jException;
  639. int retval = 0;
  640. //Create the org.apache.hadoop.conf.Configuration object
  641. jobject jConfiguration = constructNewObjectOfClass(env, &jException,
  642. "org/apache/hadoop/conf/Configuration", "()V");
  643. if (jConfiguration == NULL) {
  644. fprintf(stderr,
  645. "Can't construct instance of class org.apache.hadoop.conf.Configuration\n"
  646. );
  647. errno = EINTERNAL;
  648. return -1;
  649. }
  650. //FileUtil::copy
  651. jboolean deleteSource = 0; //Only copy
  652. jboolean jRetVal = 0;
  653. if (invokeMethod(env, (RetVal*)&jRetVal, &jException, STATIC,
  654. NULL, "org/apache/hadoop/fs/FileUtil", "copy",
  655. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  656. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  657. jConfiguration) != 0) {
  658. fprintf(stderr,
  659. "Call to org.apache.hadoop.fs.FileUtil::copy failed!\n");
  660. errno = EINTERNAL;
  661. retval = -1;
  662. goto done;
  663. }
  664. done:
  665. //Delete unnecessary local references
  666. destroyLocalReference(env, jConfiguration);
  667. destroyLocalReference(env, jSrcPath);
  668. destroyLocalReference(env, jDstPath);
  669. return retval;
  670. }
  671. int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  672. {
  673. //JAVA EQUIVALENT
  674. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath, deleteSource = true, conf)
  675. //Get the JNIEnv* corresponding to current thread
  676. JNIEnv* env = getJNIEnv();
  677. //Parameters
  678. jobject jSrcFS = (jobject)srcFS;
  679. jobject jDstFS = (jobject)dstFS;
  680. jobject jSrcPath = constructNewObjectOfPath(env, src);
  681. jobject jDstPath = constructNewObjectOfPath(env, dst);
  682. if (jSrcPath == NULL || jDstPath == NULL) {
  683. return -1;
  684. }
  685. jthrowable jException;
  686. int retval = 0;
  687. //Create the org.apache.hadoop.conf.Configuration object
  688. jobject jConfiguration = constructNewObjectOfClass(env, &jException,
  689. "org/apache/hadoop/conf/Configuration", "()V");
  690. if (jConfiguration == NULL) {
  691. fprintf(stderr,
  692. "Can't construct instance of class org.apache.hadoop.conf.Configuration\n"
  693. );
  694. errno = EINTERNAL;
  695. return -1;
  696. }
  697. //FileUtil::copy
  698. jboolean deleteSource = 1; //Delete src after copy
  699. jboolean jRetVal = 0;
  700. if (invokeMethod(env, (RetVal*)&jRetVal, &jException, STATIC,
  701. NULL, "org/apache/hadoop/fs/FileUtil", "copy",
  702. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  703. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  704. jConfiguration) != 0) {
  705. fprintf(stderr,
  706. "Call to org.apache.hadoop.fs.FileUtil::copy(move) failed!\n");
  707. errno = EINTERNAL;
  708. retval = -1;
  709. goto done;
  710. }
  711. done:
  712. //Delete unnecessary local references
  713. destroyLocalReference(env, jConfiguration);
  714. destroyLocalReference(env, jSrcPath);
  715. destroyLocalReference(env, jDstPath);
  716. return retval;
  717. }
  718. int hdfsDelete(hdfsFS fs, const char* path)
  719. {
  720. // JAVA EQUIVALENT:
  721. // File f = new File(path);
  722. // bool retval = fs.delete(f);
  723. //Get the JNIEnv* corresponding to current thread
  724. JNIEnv* env = getJNIEnv();
  725. jobject jFS = (jobject)fs;
  726. jthrowable jException;
  727. //Create an object of java.io.File
  728. jobject jPath = constructNewObjectOfPath(env, path);
  729. if (jPath == NULL) {
  730. return -1;
  731. }
  732. //Delete the file
  733. jboolean retval = 1;
  734. if (invokeMethod(env, (RetVal*)&retval, &jException, INSTANCE, jFS,
  735. "org/apache/hadoop/fs/FileSystem", "delete",
  736. "(Lorg/apache/hadoop/fs/Path;)Z", jPath)) {
  737. fprintf(stderr,
  738. "Call to org.apache.hadoop.fs.FileSystem::delete failed!\n");
  739. errno = EINTERNAL;
  740. return -1;
  741. }
  742. //Delete unnecessary local references
  743. destroyLocalReference(env, jPath);
  744. return (retval) ? 0 : -1;
  745. }
  746. int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
  747. {
  748. // JAVA EQUIVALENT:
  749. // Path old = new Path(oldPath);
  750. // Path new = new Path(newPath);
  751. // fs.rename(old, new);
  752. //Get the JNIEnv* corresponding to current thread
  753. JNIEnv* env = getJNIEnv();
  754. jobject jFS = (jobject)fs;
  755. jthrowable jException;
  756. //Create objects of org.apache.hadoop.fs.Path
  757. jobject jOldPath = constructNewObjectOfPath(env, oldPath);
  758. jobject jNewPath = constructNewObjectOfPath(env, newPath);
  759. if (jOldPath == NULL || jNewPath == NULL) {
  760. return -1;
  761. }
  762. //Rename the file
  763. jboolean retval = 1;
  764. if (invokeMethod(env, (RetVal*)&retval, &jException, INSTANCE, jFS,
  765. "org/apache/hadoop/fs/FileSystem", "rename",
  766. "(Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/Path;)Z",
  767. jOldPath, jNewPath)) {
  768. fprintf(stderr,
  769. "Call to org.apache.hadoop.fs.FileSystem::rename failed!\n");
  770. errno = EINTERNAL;
  771. return -1;
  772. }
  773. //Delete unnecessary local references
  774. destroyLocalReference(env, jOldPath);
  775. destroyLocalReference(env, jNewPath);
  776. return (retval) ? 0 : -1;
  777. }
  778. int hdfsLock(hdfsFS fs, const char* path, int shared)
  779. {
  780. // JAVA EQUIVALENT:
  781. // Path p = new Path(path);
  782. // fs.lock(p);
  783. //Get the JNIEnv* corresponding to current thread
  784. JNIEnv* env = getJNIEnv();
  785. //Parameters
  786. jobject jFS = (jobject)fs;
  787. jboolean jb_shared = shared;
  788. jthrowable jException;
  789. //Create an object of org.apache.hadoop.fs.Path
  790. jobject jPath = constructNewObjectOfPath(env, path);
  791. if (jPath == NULL) {
  792. return -1;
  793. }
  794. //Lock the file
  795. int retval = 0;
  796. if (invokeMethod(env, NULL, &jException, INSTANCE, jFS,
  797. "org/apache/hadoop/fs/FileSystem", "lock",
  798. "(Lorg/apache/hadoop/fs/Path;Z)V", jPath, jb_shared)) {
  799. fprintf(stderr,
  800. "Call to org.apache.hadoop.fs.FileSystem::lock failed!\n");
  801. errno = EINTERNAL;
  802. retval = -1;
  803. }
  804. done:
  805. //Delete unnecessary local references
  806. destroyLocalReference(env, jPath);
  807. return retval;
  808. }
  809. int hdfsReleaseLock(hdfsFS fs, const char* path)
  810. {
  811. // JAVA EQUIVALENT:
  812. // Path f = new Path(path);
  813. // fs.release(f);
  814. //Get the JNIEnv* corresponding to current thread
  815. JNIEnv* env = getJNIEnv();
  816. jobject jFS = (jobject)fs;
  817. jthrowable jException;
  818. //Create an object of java.io.File
  819. jobject jPath = constructNewObjectOfPath(env, path);
  820. if (jPath == NULL) {
  821. return -1;
  822. }
  823. //Release the lock on the file
  824. int retval = 0;
  825. if (invokeMethod(env, NULL, &jException, INSTANCE, jFS,
  826. "org/apache/hadoop/fs/FileSystem", "release",
  827. "(Lorg/apache/hadoop/fs/Path;)V", jPath)) {
  828. fprintf(stderr,
  829. "Call to org.apache.hadoop.fs.FileSystem::release failed!\n");
  830. errno = EINTERNAL;
  831. retval = -1;
  832. goto done;
  833. }
  834. done:
  835. //Delete unnecessary local references
  836. destroyLocalReference(env, jPath);
  837. return retval;
  838. }
  839. char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
  840. {
  841. // JAVA EQUIVALENT:
  842. // Path p = fs.getWorkingDirectory();
  843. // return p.toString()
  844. //Get the JNIEnv* corresponding to current thread
  845. JNIEnv* env = getJNIEnv();
  846. jobject jFS = (jobject)fs;
  847. jobject jPath = NULL;
  848. jthrowable jException;
  849. //FileSystem::getWorkingDirectory()
  850. if (invokeMethod(env, (RetVal*)&jPath, &jException, INSTANCE, jFS,
  851. "org/apache/hadoop/fs/FileSystem", "getWorkingDirectory",
  852. "()Lorg/apache/hadoop/fs/Path;") || jPath == NULL) {
  853. fprintf(stderr, "Call to FileSystem::getWorkingDirectory failed!\n");
  854. errno = EINTERNAL;
  855. return NULL;
  856. }
  857. //Path::toString()
  858. jstring jPathString;
  859. if (invokeMethod(env, (RetVal*)&jPathString, &jException, INSTANCE, jPath,
  860. "org/apache/hadoop/fs/Path", "toString", "()Ljava/lang/String;")) {
  861. fprintf(stderr, "Call to Path::toString failed!\n");
  862. errno = EINTERNAL;
  863. destroyLocalReference(env, jPath);
  864. return NULL;
  865. }
  866. //Copy to user-provided buffer
  867. strncpy(buffer, (char*)(*env)->GetStringUTFChars(env, jPathString, NULL),
  868. bufferSize);
  869. //Delete unnecessary local references
  870. (*env)->ReleaseStringUTFChars(env, jPathString,
  871. (*env)->GetStringUTFChars(env, jPathString, NULL));
  872. destroyLocalReference(env, jPath);
  873. return buffer;
  874. }
  875. int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
  876. {
  877. // JAVA EQUIVALENT:
  878. // fs.setWorkingDirectory(Path(path));
  879. //Get the JNIEnv* corresponding to current thread
  880. JNIEnv* env = getJNIEnv();
  881. jobject jFS = (jobject)fs;
  882. jthrowable jException;
  883. int retval = 0;
  884. //Create an object of org.apache.hadoop.fs.Path
  885. jobject jPath = constructNewObjectOfPath(env, path);
  886. if (jPath == NULL) {
  887. return -1;
  888. }
  889. //FileSystem::setWorkingDirectory()
  890. if (invokeMethod(env, NULL, &jException, INSTANCE, jFS,
  891. "org/apache/hadoop/fs/FileSystem", "setWorkingDirectory",
  892. "(Lorg/apache/hadoop/fs/Path;)V", jPath) || jPath == NULL) {
  893. fprintf(stderr, "Call to FileSystem::setWorkingDirectory failed!\n");
  894. errno = EINTERNAL;
  895. retval = -1;
  896. }
  897. done:
  898. //Delete unnecessary local references
  899. destroyLocalReference(env, jPath);
  900. return retval;
  901. }
  902. int hdfsCreateDirectory(hdfsFS fs, const char* path)
  903. {
  904. // JAVA EQUIVALENT:
  905. // fs.mkdirs(new Path(path));
  906. //Get the JNIEnv* corresponding to current thread
  907. JNIEnv* env = getJNIEnv();
  908. jobject jFS = (jobject)fs;
  909. jthrowable jException;
  910. //Create an object of org.apache.hadoop.fs.Path
  911. jobject jPath = constructNewObjectOfPath(env, path);
  912. if (jPath == NULL) {
  913. return -1;
  914. }
  915. //Create the directory
  916. jboolean jRetVal = 0;
  917. if (invokeMethod(env, (RetVal*)&jRetVal, &jException, INSTANCE, jFS,
  918. "org/apache/hadoop/fs/FileSystem", "mkdirs",
  919. "(Lorg/apache/hadoop/fs/Path;)Z", jPath)) {
  920. fprintf(stderr,
  921. "Call to org.apache.hadoop.fs.FileSystem::mkdirs failed!\n");
  922. errno = EINTERNAL;
  923. goto done;
  924. }
  925. done:
  926. //Delete unnecessary local references
  927. destroyLocalReference(env, jPath);
  928. return (jRetVal) ? 0 : -1;
  929. }
  930. char*** hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
  931. {
  932. // JAVA EQUIVALENT:
  933. // fs.getFileCacheHints(new Path(path), start, length);
  934. //Get the JNIEnv* corresponding to current thread
  935. JNIEnv* env = getJNIEnv();
  936. jobject jFS = (jobject)fs;
  937. jthrowable jException;
  938. //Create an object of org.apache.hadoop.fs.Path
  939. jobject jPath = constructNewObjectOfPath(env, path);
  940. if (jPath == NULL) {
  941. return NULL;
  942. }
  943. //org.apache.hadoop.fs.FileSystem::getFileCacheHints
  944. char*** blockHosts = NULL;
  945. jobjectArray jFileCacheHints;
  946. if (invokeMethod(env, (RetVal*)&jFileCacheHints, &jException, INSTANCE,
  947. jFS, "org/apache/hadoop/fs/FileSystem", "getFileCacheHints",
  948. "(Lorg/apache/hadoop/fs/Path;JJ)[[Ljava/lang/String;", jPath,
  949. start, length)) {
  950. fprintf(stderr,
  951. "Call to org.apache.hadoop.fs.FileSystem::getFileCacheHints failed!\n"
  952. );
  953. errno = EINTERNAL;
  954. goto done;
  955. }
  956. //Figure out no of entries in jFileCacheHints
  957. //Allocate memory and add NULL at the end
  958. jsize jNumFileBlocks = (*env)->GetArrayLength(env, jFileCacheHints);
  959. blockHosts = malloc(sizeof(char**) * (jNumFileBlocks+1));
  960. if (blockHosts == NULL) {
  961. errno = ENOMEM;
  962. goto done;
  963. }
  964. blockHosts[jNumFileBlocks] = NULL;
  965. if (jNumFileBlocks == 0) {
  966. errno = 0;
  967. goto done;
  968. }
  969. //Now parse each block to get hostnames
  970. int i = 0;
  971. for(i=0; i < jNumFileBlocks; ++i) {
  972. jobjectArray jFileBlockHosts = (*env)->GetObjectArrayElement(env,
  973. jFileCacheHints, i);
  974. //Figure out no of entries in jFileCacheHints
  975. //Allocate memory and add NULL at the end
  976. jsize jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts);
  977. blockHosts[i] = malloc(sizeof(char*) * (jNumBlockHosts+1));
  978. if (blockHosts[i] == NULL) {
  979. int x = 0;
  980. for(x=0; x < i; ++x) {
  981. free(blockHosts[x]);
  982. }
  983. free(blockHosts);
  984. errno = ENOMEM;
  985. goto done;
  986. }
  987. blockHosts[i][jNumBlockHosts] = NULL;
  988. //Now parse each hostname
  989. int j = 0;
  990. for(j=0; j < jNumBlockHosts; ++j) {
  991. jstring jHost = (*env)->GetObjectArrayElement(env,
  992. jFileBlockHosts, j);
  993. blockHosts[i][j] = strdup((char*)(*env)->GetStringUTFChars(env,
  994. jHost, NULL));
  995. (*env)->ReleaseStringUTFChars(env, jHost,
  996. (*env)->GetStringUTFChars(env, jHost, NULL));
  997. }
  998. }
  999. done:
  1000. //Delete unnecessary local references
  1001. destroyLocalReference(env, jPath);
  1002. return blockHosts;
  1003. }
  1004. tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
  1005. {
  1006. // JAVA EQUIVALENT:
  1007. // fs.getDefaultBlockSize();
  1008. //Get the JNIEnv* corresponding to current thread
  1009. JNIEnv* env = getJNIEnv();
  1010. jobject jFS = (jobject)fs;
  1011. jthrowable jException;
  1012. //FileSystem::getDefaultBlockSize()
  1013. tOffset blockSize = -1;
  1014. if (invokeMethod(env, (RetVal*)&blockSize, &jException, INSTANCE, jFS,
  1015. "org/apache/hadoop/fs/FileSystem", "getDefaultBlockSize",
  1016. "()J") != 0) {
  1017. fprintf(stderr,
  1018. "Call to org.apache.hadoop.fs.FileSystem::getDefaultBlockSize failed!\n"
  1019. );
  1020. errno = EINTERNAL;
  1021. return -1;
  1022. }
  1023. return blockSize;
  1024. }
  1025. tOffset hdfsGetCapacity(hdfsFS fs)
  1026. {
  1027. // JAVA EQUIVALENT:
  1028. // fs.getRawCapacity();
  1029. //Get the JNIEnv* corresponding to current thread
  1030. JNIEnv* env = getJNIEnv();
  1031. jobject jFS = (jobject)fs;
  1032. jthrowable jException;
  1033. if (!((*env)->IsInstanceOf(env, jFS,
  1034. globalClassReference("org/apache/hadoop/dfs/DistributedFileSystem",
  1035. env)))) {
  1036. fprintf(stderr,
  1037. "hdfsGetCapacity works only on a DistributedFileSystem!\n");
  1038. return -1;
  1039. }
  1040. //FileSystem::getRawCapacity()
  1041. tOffset rawCapacity = -1;
  1042. if (invokeMethod(env, (RetVal*)&rawCapacity, &jException, INSTANCE, jFS,
  1043. "org/apache/hadoop/dfs/DistributedFileSystem",
  1044. "getRawCapacity", "()J") != 0) {
  1045. fprintf(stderr,
  1046. "Call to org.apache.hadoop.fs.FileSystem::getRawCapacity failed!\n"
  1047. );
  1048. errno = EINTERNAL;
  1049. return -1;
  1050. }
  1051. return rawCapacity;
  1052. }
  1053. tOffset hdfsGetUsed(hdfsFS fs)
  1054. {
  1055. // JAVA EQUIVALENT:
  1056. // fs.getRawUsed();
  1057. //Get the JNIEnv* corresponding to current thread
  1058. JNIEnv* env = getJNIEnv();
  1059. jobject jFS = (jobject)fs;
  1060. jthrowable jException;
  1061. if (!((*env)->IsInstanceOf(env, jFS,
  1062. globalClassReference("org/apache/hadoop/dfs/DistributedFileSystem",
  1063. env)))) {
  1064. fprintf(stderr,
  1065. "hdfsGetUsed works only on a DistributedFileSystem!\n");
  1066. return -1;
  1067. }
  1068. //FileSystem::getRawUsed()
  1069. tOffset rawUsed = -1;
  1070. if (invokeMethod(env, (RetVal*)&rawUsed, &jException, INSTANCE, jFS,
  1071. "org/apache/hadoop/dfs/DistributedFileSystem", "getRawUsed",
  1072. "()J") != 0) {
  1073. fprintf(stderr,
  1074. "Call to org.apache.hadoop.fs.FileSystem::getRawUsed failed!\n");
  1075. errno = EINTERNAL;
  1076. return -1;
  1077. }
  1078. return rawUsed;
  1079. }
  1080. static int getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo *fileInfo)
  1081. {
  1082. // JAVA EQUIVALENT:
  1083. // fs.isDirectory(f)
  1084. // fs.lastModified() ??
  1085. // fs.getLength(f)
  1086. // f.getPath()
  1087. jthrowable jException;
  1088. jboolean jIsDir;
  1089. if (invokeMethod(env, (RetVal*)&jIsDir, &jException, INSTANCE, jFS,
  1090. "org/apache/hadoop/fs/FileSystem", "isDirectory",
  1091. "(Lorg/apache/hadoop/fs/Path;)Z", jPath) != 0) {
  1092. fprintf(stderr,
  1093. "Call to org.apache.hadoop.fs.FileSystem::isDirectory failed!\n"
  1094. );
  1095. errno = EINTERNAL;
  1096. return -1;
  1097. }
  1098. /*
  1099. jlong jModTime = 0;
  1100. if (invokeMethod(env, (RetVal*)&jModTime, &jException, INSTANCE, jFS,
  1101. "org/apache/hadoop/fs/FileSystem", "lastModified",
  1102. "(Lorg/apache/hadoop/fs/Path;)J", jPath) != 0) {
  1103. fprintf(stderr,
  1104. "Call to org.apache.hadoop.fs.FileSystem::lastModified failed!\n"
  1105. );
  1106. errno = EINTERNAL;
  1107. return -1;
  1108. }
  1109. */
  1110. jlong jFileLength = 0;
  1111. if (!jIsDir) {
  1112. if (invokeMethod(env, (RetVal*)&jFileLength, &jException, INSTANCE,
  1113. jFS, "org/apache/hadoop/fs/FileSystem", "getLength",
  1114. "(Lorg/apache/hadoop/fs/Path;)J", jPath) != 0) {
  1115. fprintf(stderr,
  1116. "Call to org.apache.hadoop.fs.FileSystem::getLength failed!\n"
  1117. );
  1118. errno = EINTERNAL;
  1119. return -1;
  1120. }
  1121. }
  1122. jstring jPathName;
  1123. if (invokeMethod(env, (RetVal*)&jPathName, &jException, INSTANCE, jPath,
  1124. "org/apache/hadoop/fs/Path", "toString", "()Ljava/lang/String;")) {
  1125. fprintf(stderr, "Call to org.apache.hadoop.fs.Path::toString failed!\n");
  1126. errno = EINTERNAL;
  1127. return -1;
  1128. }
  1129. fileInfo->mKind = (jIsDir ? kObjectKindDirectory : kObjectKindFile);
  1130. //fileInfo->mCreationTime = jModTime;
  1131. fileInfo->mSize = jFileLength;
  1132. fileInfo->mName = strdup((char*)(*env)->GetStringUTFChars(env,
  1133. jPathName, NULL));
  1134. (*env)->ReleaseStringUTFChars(env, jPathName,
  1135. (*env)->GetStringUTFChars(env, jPathName, NULL));
  1136. return 0;
  1137. }
  1138. hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
  1139. {
  1140. // JAVA EQUIVALENT:
  1141. // Path p(path);
  1142. // Path []pathList = fs.listPaths(p)
  1143. // foreach path in pathList
  1144. // getFileInfo(path)
  1145. //Get the JNIEnv* corresponding to current thread
  1146. JNIEnv* env = getJNIEnv();
  1147. jobject jFS = (jobject)fs;
  1148. jthrowable jException;
  1149. //Create an object of org.apache.hadoop.fs.Path
  1150. jobject jPath = constructNewObjectOfPath(env, path);
  1151. if (jPath == NULL) {
  1152. return NULL;
  1153. }
  1154. hdfsFileInfo *pathList = 0;
  1155. jobjectArray jPathList;
  1156. if (invokeMethod(env, (RetVal*)&jPathList, &jException, INSTANCE, jFS,
  1157. "org/apache/hadoop/fs/FileSystem", "listPaths",
  1158. "(Lorg/apache/hadoop/fs/Path;)[Lorg/apache/hadoop/fs/Path;", jPath) != 0) {
  1159. fprintf(stderr,
  1160. "Call to org.apache.hadoop.fs.FileSystem::listPaths failed!\n"
  1161. );
  1162. errno = EINTERNAL;
  1163. goto done;
  1164. }
  1165. //Figure out no of entries in that directory
  1166. jsize jPathListSize = (*env)->GetArrayLength(env, jPathList);
  1167. *numEntries = jPathListSize;
  1168. if (jPathListSize == 0) {
  1169. errno = 0;
  1170. goto done;
  1171. }
  1172. //Allocate memory
  1173. pathList = malloc(sizeof(hdfsFileInfo) * jPathListSize);
  1174. if (pathList == NULL) {
  1175. errno = ENOMEM;
  1176. goto done;
  1177. }
  1178. //Save path information in pathList
  1179. jsize i;
  1180. for(i=0; i < jPathListSize; ++i) {
  1181. if (getFileInfo(env, jFS, (*env)->GetObjectArrayElement(env,
  1182. jPathList, i), &pathList[i])) {
  1183. errno = EINTERNAL;
  1184. free(pathList);
  1185. goto done;
  1186. }
  1187. }
  1188. done:
  1189. //Delete unnecessary local references
  1190. destroyLocalReference(env, jPath);
  1191. return pathList;
  1192. }
  1193. hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
  1194. {
  1195. // JAVA EQUIVALENT:
  1196. // File f(path);
  1197. // fs.isDirectory(f)
  1198. // fs.lastModified() ??
  1199. // fs.getLength(f)
  1200. // f.getPath()
  1201. //Get the JNIEnv* corresponding to current thread
  1202. JNIEnv* env = getJNIEnv();
  1203. jobject jFS = (jobject)fs;
  1204. jthrowable jException;
  1205. //Create an object of org.apache.hadoop.fs.Path
  1206. jobject jPath = constructNewObjectOfPath(env, path);
  1207. if (jPath == NULL) {
  1208. return NULL;
  1209. }
  1210. hdfsFileInfo *fileInfo = malloc(sizeof(hdfsFileInfo));
  1211. bzero(fileInfo, sizeof(hdfsFileInfo));
  1212. if (getFileInfo(env, jFS, jPath, fileInfo)) {
  1213. hdfsFreeFileInfo(fileInfo, 1);
  1214. fileInfo = NULL;
  1215. goto done;
  1216. }
  1217. done:
  1218. //Delete unnecessary local references
  1219. destroyLocalReference(env, jPath);
  1220. return fileInfo;
  1221. }
  1222. void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
  1223. {
  1224. //Free the mName
  1225. int i;
  1226. for (i=0; i < numEntries; ++i) {
  1227. if (hdfsFileInfo[i].mName) {
  1228. free(hdfsFileInfo[i].mName);
  1229. }
  1230. }
  1231. //Free entire block
  1232. free(hdfsFileInfo);
  1233. }
  1234. jobject hdfsConvertToGlobalRef(jobject localRef)
  1235. {
  1236. //Get the JNIEnv* corresponding to current thread
  1237. JNIEnv* env = getJNIEnv();
  1238. //Create the global reference
  1239. jobject globalRef = (*env)->NewGlobalRef(env, localRef);
  1240. if(globalRef == NULL) {
  1241. (*env)->ExceptionDescribe(env);
  1242. return NULL;
  1243. }
  1244. //Destroy the local reference
  1245. (*env)->DeleteLocalRef(env, globalRef);
  1246. return globalRef;
  1247. }
  1248. void hdfsDeleteGlobalRef(jobject globalRef)
  1249. {
  1250. //Get the JNIEnv* corresponding to current thread
  1251. JNIEnv* env = getJNIEnv();
  1252. //Destroy the global reference
  1253. (*env)->DeleteGlobalRef(env, globalRef);
  1254. }
  1255. /**
  1256. * vim: ts=4: sw=4: et:
  1257. */