();
- for (Object obj : selection.toList()) {
- if (clazz.isAssignableFrom(obj.getClass())) {
- list.add((T) obj);
- }
- }
- return list;
- }
-
- private static int computeUploadWork(File file) {
- if (file.isDirectory()) {
- int contentWork = 1;
- for (File child : file.listFiles())
- contentWork += computeUploadWork(child);
- return contentWork;
-
- } else if (file.isFile()) {
- return 1 + (int) (file.length() / 1024);
-
- } else {
- return 0;
- }
- }
-
-}
-
-/**
- * Adapter to allow the viewing of a DfsFile in the Editor window
- */
-class DFSFileEditorInput extends PlatformObject implements
- IStorageEditorInput {
-
- private DFSFile file;
-
- /**
- * Constructor
- *
- * @param file
- */
- DFSFileEditorInput(DFSFile file) {
- this.file = file;
- }
-
- /* @inheritDoc */
- public String getToolTipText() {
- return file.toDetailedString();
- }
-
- /* @inheritDoc */
- public IPersistableElement getPersistable() {
- return null;
- }
-
- /* @inheritDoc */
- public String getName() {
- return file.toString();
- }
-
- /* @inheritDoc */
- public ImageDescriptor getImageDescriptor() {
- return ImageLibrary.get("dfs.file.editor");
- }
-
- /* @inheritDoc */
- public boolean exists() {
- return true;
- }
-
- /* @inheritDoc */
- public IStorage getStorage() throws CoreException {
- return file.getIStorage();
- }
-};
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/EditLocationAction.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/EditLocationAction.java
deleted file mode 100644
index cdfbe93474d..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/EditLocationAction.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.actions;
-
-import org.apache.hadoop.eclipse.ImageLibrary;
-import org.apache.hadoop.eclipse.server.HadoopServer;
-import org.apache.hadoop.eclipse.servers.HadoopLocationWizard;
-import org.apache.hadoop.eclipse.view.servers.ServerView;
-import org.eclipse.jface.action.Action;
-import org.eclipse.jface.wizard.Wizard;
-import org.eclipse.jface.wizard.WizardDialog;
-
-/**
- * Editing server properties action
- */
-public class EditLocationAction extends Action {
-
- private ServerView serverView;
-
- public EditLocationAction(ServerView serverView) {
- this.serverView = serverView;
-
- setText("Edit Hadoop location...");
- setImageDescriptor(ImageLibrary.get("server.view.action.location.edit"));
- }
-
- @Override
- public void run() {
-
- final HadoopServer server = serverView.getSelectedServer();
- if (server == null)
- return;
-
- WizardDialog dialog = new WizardDialog(null, new Wizard() {
- private HadoopLocationWizard page = new HadoopLocationWizard(server);
-
- @Override
- public void addPages() {
- super.addPages();
- setWindowTitle("Edit Hadoop location...");
- addPage(page);
- }
-
- @Override
- public boolean performFinish() {
- page.performFinish();
- return true;
- }
- });
-
- dialog.create();
- dialog.setBlockOnOpen(true);
- dialog.open();
-
- super.run();
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/NewLocationAction.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/NewLocationAction.java
deleted file mode 100644
index 5db0bc56daa..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/NewLocationAction.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.actions;
-
-import org.apache.hadoop.eclipse.ImageLibrary;
-import org.apache.hadoop.eclipse.servers.HadoopLocationWizard;
-import org.eclipse.jface.action.Action;
-import org.eclipse.jface.wizard.Wizard;
-import org.eclipse.jface.wizard.WizardDialog;
-
-
-/**
- * Action corresponding to creating a new MapReduce Server.
- */
-
-public class NewLocationAction extends Action {
- public NewLocationAction() {
- setText("New Hadoop location...");
- setImageDescriptor(ImageLibrary.get("server.view.action.location.new"));
- }
-
- @Override
- public void run() {
- WizardDialog dialog = new WizardDialog(null, new Wizard() {
- private HadoopLocationWizard page = new HadoopLocationWizard();
-
- @Override
- public void addPages() {
- super.addPages();
- setWindowTitle("New Hadoop location...");
- addPage(page);
- }
-
- @Override
- public boolean performFinish() {
- page.performFinish();
- return true;
- }
-
- });
-
- dialog.create();
- dialog.setBlockOnOpen(true);
- dialog.open();
-
- super.run();
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRClassWizardAction.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRClassWizardAction.java
deleted file mode 100644
index cc1f9ecb6cf..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRClassWizardAction.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.actions;
-
-import java.util.logging.Logger;
-
-import org.apache.hadoop.eclipse.NewDriverWizard;
-import org.apache.hadoop.eclipse.NewMapperWizard;
-import org.apache.hadoop.eclipse.NewReducerWizard;
-import org.eclipse.jface.action.Action;
-import org.eclipse.jface.viewers.StructuredSelection;
-import org.eclipse.jface.window.Window;
-import org.eclipse.jface.wizard.WizardDialog;
-import org.eclipse.ui.INewWizard;
-import org.eclipse.ui.IWorkbench;
-import org.eclipse.ui.PlatformUI;
-import org.eclipse.ui.cheatsheets.ICheatSheetAction;
-import org.eclipse.ui.cheatsheets.ICheatSheetManager;
-
-
-/**
- * Action to open a new MapReduce Class.
- */
-
-public class OpenNewMRClassWizardAction extends Action implements
- ICheatSheetAction {
-
- static Logger log = Logger.getLogger(OpenNewMRClassWizardAction.class
- .getName());
-
- public void run(String[] params, ICheatSheetManager manager) {
-
- if ((params != null) && (params.length > 0)) {
- IWorkbench workbench = PlatformUI.getWorkbench();
- INewWizard wizard = getWizard(params[0]);
- wizard.init(workbench, new StructuredSelection());
- WizardDialog dialog = new WizardDialog(PlatformUI.getWorkbench()
- .getActiveWorkbenchWindow().getShell(), wizard);
- dialog.create();
- dialog.open();
-
- // did the wizard succeed ?
- notifyResult(dialog.getReturnCode() == Window.OK);
- }
- }
-
- private INewWizard getWizard(String typeName) {
- if (typeName.equals("Mapper")) {
- return new NewMapperWizard();
- } else if (typeName.equals("Reducer")) {
- return new NewReducerWizard();
- } else if (typeName.equals("Driver")) {
- return new NewDriverWizard();
- } else {
- log.severe("Invalid Wizard requested");
- return null;
- }
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRProjectAction.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRProjectAction.java
deleted file mode 100644
index c7fde10c09a..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRProjectAction.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.actions;
-
-import org.apache.hadoop.eclipse.NewMapReduceProjectWizard;
-import org.eclipse.jface.action.Action;
-import org.eclipse.jface.viewers.StructuredSelection;
-import org.eclipse.jface.window.Window;
-import org.eclipse.jface.wizard.WizardDialog;
-import org.eclipse.swt.widgets.Shell;
-import org.eclipse.ui.IWorkbench;
-import org.eclipse.ui.PlatformUI;
-
-/**
- * Action to open a new Map/Reduce project.
- */
-
-public class OpenNewMRProjectAction extends Action {
-
- @Override
- public void run() {
- IWorkbench workbench = PlatformUI.getWorkbench();
- Shell shell = workbench.getActiveWorkbenchWindow().getShell();
- NewMapReduceProjectWizard wizard = new NewMapReduceProjectWizard();
- wizard.init(workbench, new StructuredSelection());
- WizardDialog dialog = new WizardDialog(shell, wizard);
- dialog.create();
- dialog.open();
- // did the wizard succeed?
- notifyResult(dialog.getReturnCode() == Window.OK);
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/ActionProvider.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/ActionProvider.java
deleted file mode 100644
index 65436ac106a..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/ActionProvider.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.dfs;
-
-import org.apache.hadoop.eclipse.ImageLibrary;
-import org.apache.hadoop.eclipse.actions.DFSActionImpl;
-import org.eclipse.jface.action.Action;
-import org.eclipse.jface.action.IMenuManager;
-import org.eclipse.jface.resource.ImageDescriptor;
-import org.eclipse.jface.viewers.ISelection;
-import org.eclipse.jface.viewers.IStructuredSelection;
-import org.eclipse.ui.IActionBars;
-import org.eclipse.ui.PlatformUI;
-import org.eclipse.ui.actions.ActionFactory;
-import org.eclipse.ui.navigator.CommonActionProvider;
-import org.eclipse.ui.navigator.ICommonActionConstants;
-import org.eclipse.ui.navigator.ICommonActionExtensionSite;
-import org.eclipse.ui.navigator.ICommonMenuConstants;
-
-/**
- * Allows the user to delete and refresh items in the DFS tree
- */
-
-public class ActionProvider extends CommonActionProvider {
-
- private static ICommonActionExtensionSite site;
-
- public ActionProvider() {
- }
-
- /* @inheritDoc */
- @Override
- public void init(ICommonActionExtensionSite site) {
- if (ActionProvider.site != null) {
- System.err.printf("%s: Multiple init()\n", this.getClass()
- .getCanonicalName());
- return;
- }
- super.init(site);
- ActionProvider.site = site;
- }
-
- /* @inheritDoc */
- @Override
- public void fillActionBars(IActionBars actionBars) {
- actionBars.setGlobalActionHandler(ActionFactory.DELETE.getId(),
- new DFSAction(DFSActions.DELETE));
- actionBars.setGlobalActionHandler(ActionFactory.REFRESH.getId(),
- new DFSAction(DFSActions.REFRESH));
-
- if (site == null)
- return;
-
- if ((site.getStructuredViewer().getSelection() instanceof IStructuredSelection)
- && (((IStructuredSelection) site.getStructuredViewer()
- .getSelection()).size() == 1)
- && (((IStructuredSelection) site.getStructuredViewer()
- .getSelection()).getFirstElement() instanceof DFSFile)) {
-
- actionBars.setGlobalActionHandler(ICommonActionConstants.OPEN,
- new DFSAction(DFSActions.OPEN));
- }
-
- actionBars.updateActionBars();
- }
-
- /* @inheritDoc */
- @Override
- public void fillContextMenu(IMenuManager menu) {
- /*
- * Actions on multiple selections
- */
- menu.appendToGroup(ICommonMenuConstants.GROUP_EDIT, new DFSAction(
- DFSActions.DELETE));
-
- menu.appendToGroup(ICommonMenuConstants.GROUP_OPEN, new DFSAction(
- DFSActions.REFRESH));
-
- menu.appendToGroup(ICommonMenuConstants.GROUP_NEW, new DFSAction(
- DFSActions.DOWNLOAD));
-
- if (site == null)
- return;
-
- ISelection isel = site.getStructuredViewer().getSelection();
- if (!(isel instanceof IStructuredSelection))
- return;
-
- /*
- * Actions on single selections only
- */
-
- IStructuredSelection issel = (IStructuredSelection) isel;
- if (issel.size() != 1)
- return;
- Object element = issel.getFirstElement();
-
- if (element instanceof DFSFile) {
- menu.appendToGroup(ICommonMenuConstants.GROUP_OPEN, new DFSAction(
- DFSActions.OPEN));
-
- } else if (element instanceof DFSFolder) {
- menu.appendToGroup(ICommonMenuConstants.GROUP_NEW, new DFSAction(
- DFSActions.MKDIR));
- menu.appendToGroup(ICommonMenuConstants.GROUP_NEW, new DFSAction(
- DFSActions.UPLOAD_FILES));
- menu.appendToGroup(ICommonMenuConstants.GROUP_NEW, new DFSAction(
- DFSActions.UPLOAD_DIR));
-
- } else if (element instanceof DFSLocation) {
- menu.appendToGroup(ICommonMenuConstants.GROUP_OPEN, new DFSAction(
- DFSActions.RECONNECT));
-
- } else if (element instanceof DFSLocationsRoot) {
- menu.appendToGroup(ICommonMenuConstants.GROUP_OPEN, new DFSAction(
- DFSActions.DISCONNECT));
- }
-
- }
-
- /**
- * Representation of an action on a DFS entry in the browser
- */
- public static class DFSAction extends Action {
-
- private final String id;
-
- private final String title;
-
- private DFSActions action;
-
- public DFSAction(String id, String title) {
- this.id = id;
- this.title = title;
- }
-
- public DFSAction(DFSActions action) {
- this.id = action.id;
- this.title = action.title;
- }
-
- /* @inheritDoc */
- @Override
- public String getText() {
- return this.title;
- }
-
- /* @inheritDoc */
- @Override
- public ImageDescriptor getImageDescriptor() {
- return ImageLibrary.get(getActionDefinitionId());
- }
-
- /* @inheritDoc */
- @Override
- public String getActionDefinitionId() {
- return id;
- }
-
- /* @inheritDoc */
- @Override
- public void run() {
- DFSActionImpl action = new DFSActionImpl();
- action.setActivePart(this, PlatformUI.getWorkbench()
- .getActiveWorkbenchWindow().getActivePage().getActivePart());
- action.selectionChanged(this, site.getStructuredViewer()
- .getSelection());
- action.run(this);
- }
-
- /* @inheritDoc */
- @Override
- public boolean isEnabled() {
- return true;
- }
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSActions.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSActions.java
deleted file mode 100644
index 038497ae893..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSActions.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.dfs;
-
-public enum DFSActions {
-
- DELETE("Delete"), REFRESH("Refresh"), DOWNLOAD("Download from DFS..."), OPEN(
- "View"), MKDIR("Create new directory..."), UPLOAD_FILES(
- "Upload files to DFS..."), UPLOAD_DIR("Upload directory to DFS..."), RECONNECT(
- "Reconnect"), DISCONNECT("Disconnect");
-
- final String title;
-
- final String id;
-
- private static final String PREFIX = "dfs.browser.action.";
-
- public static DFSActions getById(String def) {
- if (!def.startsWith(PREFIX))
- return null;
- return valueOf(def.substring(PREFIX.length()).toUpperCase());
- }
-
- DFSActions(String title) {
- this.title = title;
- this.id = PREFIX + this.name().toLowerCase();
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContent.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContent.java
deleted file mode 100644
index bea94d53697..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContent.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.dfs;
-
-/**
- * Interface to define content entities in the DFS browser
- */
-public interface DFSContent {
-
- boolean hasChildren();
-
- DFSContent[] getChildren();
-
- void refresh();
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContentProvider.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContentProvider.java
deleted file mode 100644
index fca7d46916b..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContentProvider.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.dfs;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.eclipse.ImageLibrary;
-import org.apache.hadoop.eclipse.server.HadoopServer;
-import org.apache.hadoop.eclipse.servers.ServerRegistry;
-import org.eclipse.jface.viewers.ILabelProvider;
-import org.eclipse.jface.viewers.ILabelProviderListener;
-import org.eclipse.jface.viewers.ITreeContentProvider;
-import org.eclipse.jface.viewers.StructuredViewer;
-import org.eclipse.jface.viewers.Viewer;
-import org.eclipse.swt.graphics.Image;
-import org.eclipse.swt.widgets.Display;
-
-/**
- * Handles viewing of DFS locations
- *
- *
- * The content handled by this provider is a tree:
- *
- *
- *
DFSLocationsRoot
- *
\_HadoopServer
- *
| \_DfsFolder
- *
| | \_DfsFile
- *
| \_DfsFolder
- *
| ...
- *
\_HadoopServer...
- *
- *
- * The code should not block here: blocking operations need to be done
- * asynchronously so as not to freeze the UI!
- */
-public class DFSContentProvider implements ITreeContentProvider,
- ILabelProvider {
-
- /**
- * The viewer that displays this Tree content
- */
- private Viewer viewer;
-
- private StructuredViewer sviewer;
-
- private Map rootFolders =
- new HashMap();
-
- /**
- * Constructor: load resources (icons).
- */
- public DFSContentProvider() {
- }
-
- private final DFSLocationsRoot locationsRoot = new DFSLocationsRoot(this);
-
- /*
- * ITreeContentProvider implementation
- */
-
- /* @inheritDoc */
- public Object[] getChildren(Object parent) {
-
- if (!(parent instanceof DFSContent))
- return null;
- DFSContent content = (DFSContent) parent;
- return content.getChildren();
- }
-
- public Object[] test(Object parentElement) {
- if (parentElement instanceof DFSLocationsRoot) {
- return ServerRegistry.getInstance().getServers().toArray();
-
- } else if (parentElement instanceof HadoopServer) {
- final HadoopServer location = (HadoopServer) parentElement;
- Object root = rootFolders.get(location);
- if (root != null)
- return new Object[] { root };
-
- return new Object[] { "Connecting to DFS..." };
-
- } else if (parentElement instanceof DFSFolder) {
- DFSFolder folder = (DFSFolder) parentElement;
- return folder.getChildren();
- }
-
- return new Object[] { "" };
- }
-
- /* @inheritDoc */
- public Object getParent(Object element) {
-
- if (element instanceof DFSPath) {
- return ((DFSPath) element).getParent();
-
- } else if (element instanceof HadoopServer) {
- return locationsRoot;
- }
-
- return null;
- }
-
- /* @inheritDoc */
- public boolean hasChildren(Object element) {
- if (element instanceof DFSContent) {
- DFSContent content = (DFSContent) element;
- return content.hasChildren();
- }
- return false;
- }
-
- /*
- * IStructureContentProvider implementation
- */
-
- /* @inheritDoc */
- public Object[] getElements(final Object inputElement) {
- return new Object[] { locationsRoot };
- // return ServerRegistry.getInstance().getServers().toArray();
- }
-
- /*
- * ILabelProvider implementation
- */
-
- /* @inheritDoc */
- public Image getImage(Object element) {
- if (element instanceof DFSLocationsRoot)
- return ImageLibrary.getImage("dfs.browser.root.entry");
-
- else if (element instanceof DFSLocation)
- return ImageLibrary.getImage("dfs.browser.location.entry");
-
- else if (element instanceof DFSFolder)
- return ImageLibrary.getImage("dfs.browser.folder.entry");
-
- else if (element instanceof DFSFile)
- return ImageLibrary.getImage("dfs.browser.file.entry");
-
- return null;
- }
-
- /* @inheritDoc */
- public String getText(Object element) {
- if (element instanceof DFSFile)
- return ((DFSFile) element).toDetailedString();
-
- return element.toString();
- }
-
- /*
- * IBaseLabelProvider implementation
- */
-
- /* @inheritDoc */
- public void addListener(ILabelProviderListener listener) {
- }
-
- /* @inheritDoc */
- public void removeListener(ILabelProviderListener listener) {
- }
-
- /* @inheritDoc */
- public boolean isLabelProperty(Object element, String property) {
- return false;
- }
-
- /*
- * IContentProvider implementation
- */
-
- /* @inheritDoc */
- public void dispose() {
- }
-
- /* @inheritDoc */
- public void inputChanged(Viewer viewer, Object oldInput, Object newInput) {
- this.viewer = viewer;
- if ((viewer != null) && (viewer instanceof StructuredViewer))
- this.sviewer = (StructuredViewer) viewer;
- else
- this.sviewer = null;
- }
-
- /*
- * Miscellaneous
- */
-
- /**
- * Ask the viewer for this content to refresh
- */
- void refresh() {
- // no display, nothing to update
- if (this.viewer == null)
- return;
-
- Display.getDefault().asyncExec(new Runnable() {
- public void run() {
- DFSContentProvider.this.viewer.refresh();
- }
- });
- }
-
- /**
- * Ask the viewer to refresh a single element
- *
- * @param content what to refresh
- */
- void refresh(final DFSContent content) {
- if (this.sviewer != null) {
- Display.getDefault().asyncExec(new Runnable() {
- public void run() {
- DFSContentProvider.this.sviewer.refresh(content);
- }
- });
-
- } else {
- refresh();
- }
- }
-
- Viewer getViewer() {
- return this.viewer;
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFile.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFile.java
deleted file mode 100644
index af8e6c183b1..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFile.java
+++ /dev/null
@@ -1,350 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.dfs;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.reflect.InvocationTargetException;
-
-import org.apache.hadoop.eclipse.Activator;
-import org.apache.hadoop.eclipse.ErrorMessageDialog;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.eclipse.core.resources.IStorage;
-import org.eclipse.core.runtime.CoreException;
-import org.eclipse.core.runtime.IPath;
-import org.eclipse.core.runtime.IProgressMonitor;
-import org.eclipse.core.runtime.PlatformObject;
-import org.eclipse.core.runtime.Status;
-import org.eclipse.jface.dialogs.MessageDialog;
-import org.eclipse.jface.operation.IRunnableWithProgress;
-import org.eclipse.ui.PlatformUI;
-
-/**
- * File handling methods for the DFS
- */
-public class DFSFile extends DFSPath implements DFSContent {
-
- protected long length;
-
- protected short replication;
-
- /**
- * Constructor to upload a file on the distributed file system
- *
- * @param parent
- * @param path
- * @param file
- * @param monitor
- */
- public DFSFile(DFSPath parent, Path path, File file,
- IProgressMonitor monitor) {
-
- super(parent, path);
- this.upload(monitor, file);
- }
-
- public DFSFile(DFSPath parent, Path path) {
- super(parent, path);
-
- try {
- FileStatus fs = getDFS().getFileStatus(path);
- this.length = fs.getLen();
- this.replication = fs.getReplication();
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
-
- /**
- * Download and view contents of a file
- *
- * @return a InputStream for the file
- */
- public InputStream open() throws IOException {
-
- return getDFS().open(this.path);
- }
-
- /**
- * Download this file to the local file system. This creates a download
- * status monitor.
- *
- * @param file
- * @throws JSchException
- * @throws IOException
- * @throws InvocationTargetException
- * @throws InterruptedException
- *
- * @deprecated
- */
- public void downloadToLocalFile(final File file)
- throws InvocationTargetException, InterruptedException {
-
- PlatformUI.getWorkbench().getProgressService().busyCursorWhile(
- new IRunnableWithProgress() {
- public void run(IProgressMonitor monitor)
- throws InvocationTargetException {
-
- DFSFile.this.downloadToLocalFile(monitor, file);
- }
- });
- }
-
- /* @inheritDoc */
- @Override
- public void downloadToLocalDirectory(IProgressMonitor monitor, File dir) {
-
- File dfsPath = new File(this.getPath().toString());
- File destination = new File(dir, dfsPath.getName());
-
- if (destination.exists()) {
- boolean answer =
- MessageDialog.openQuestion(null, "Overwrite existing local file?",
- "The file you are attempting to download from the DFS "
- + this.getPath()
- + ", already exists in your local directory as "
- + destination + ".\n" + "Overwrite the existing file?");
- if (!answer)
- return;
- }
-
- try {
- this.downloadToLocalFile(monitor, destination);
-
- } catch (Exception e) {
- e.printStackTrace();
- MessageDialog.openWarning(null, "Download to local file system",
- "Downloading of file \"" + this.path + "\" to local directory \""
- + dir + "\" has failed.\n" + e);
- }
- }
-
- /**
- * Provides a detailed string for this file
- *
- * @return the string formatted as
- * <filename> (<size>, r<replication>)
- */
- public String toDetailedString() {
- final String[] units = { "b", "Kb", "Mb", "Gb", "Tb" };
- int unit = 0;
- double l = this.length;
- while ((l >= 1024.0) && (unit < units.length)) {
- unit += 1;
- l /= 1024.0;
- }
-
- return String.format("%s (%.1f %s, r%d)", super.toString(), l,
- units[unit], this.replication);
- }
-
- /* @inheritDoc */
- @Override
- public String toString() {
- return this.path.toString();
- }
-
- /*
- *
- */
-
- /**
- * Download the DfsFile to a local file. Use the given monitor to report
- * status of operation.
- *
- * @param monitor the status monitor
- * @param file the local file where to put the downloaded file
- * @throws InvocationTargetException
- */
- public void downloadToLocalFile(IProgressMonitor monitor, File file)
- throws InvocationTargetException {
-
- final int taskSize = 1024;
-
- monitor.setTaskName("Download file " + this.path);
-
- BufferedOutputStream ostream = null;
- DataInputStream istream = null;
-
- try {
- istream = getDFS().open(this.path);
- ostream = new BufferedOutputStream(new FileOutputStream(file));
-
- int bytes;
- byte[] buffer = new byte[taskSize];
-
- while ((bytes = istream.read(buffer)) >= 0) {
- if (monitor.isCanceled())
- return;
- ostream.write(buffer, 0, bytes);
- monitor.worked(1);
- }
-
- } catch (Exception e) {
- throw new InvocationTargetException(e);
-
- } finally {
- // Clean all opened resources
- if (istream != null) {
- try {
- istream.close();
- } catch (IOException e) {
- e.printStackTrace();
- // nothing we can do here
- }
- }
- try {
- ostream.close();
- } catch (IOException e) {
- e.printStackTrace();
- // nothing we can do here
- }
- }
- }
-
- /**
- * Upload a local file to this file on the distributed file system
- *
- * @param monitor
- * @param file
- */
- public void upload(IProgressMonitor monitor, File file) {
-
- final int taskSize = 1024;
-
- monitor.setTaskName("Upload file " + this.path);
-
- BufferedInputStream istream = null;
- DataOutputStream ostream = null;
-
- try {
- istream = new BufferedInputStream(new FileInputStream(file));
- ostream = getDFS().create(this.path);
-
- int bytes;
- byte[] buffer = new byte[taskSize];
-
- while ((bytes = istream.read(buffer)) >= 0) {
- if (monitor.isCanceled())
- return;
- ostream.write(buffer, 0, bytes);
- monitor.worked(1);
- }
-
- } catch (Exception e) {
- ErrorMessageDialog.display(String.format(
- "Unable to uploade file %s to %s", file, this.path), e
- .getLocalizedMessage());
-
- } finally {
- try {
- if (istream != null)
- istream.close();
- } catch (IOException e) {
- e.printStackTrace();
- // nothing we can do here
- }
- try {
- if (ostream != null)
- ostream.close();
- } catch (IOException e) {
- e.printStackTrace();
- // nothing we can do here
- }
- }
- }
-
- /* @inheritDoc */
- @Override
- public void refresh() {
- getParent().refresh();
- }
-
- /* @inheritDoc */
- @Override
- public int computeDownloadWork() {
- return 1 + (int) (this.length / 1024);
- }
-
- /**
- * Creates an adapter for the file to open it in the Editor
- *
- * @return the IStorage
- */
- public IStorage getIStorage() {
- return new IStorageAdapter();
- }
-
- /**
- * IStorage adapter to open the file in the Editor
- */
- private class IStorageAdapter extends PlatformObject implements IStorage {
-
- /* @inheritDoc */
- public InputStream getContents() throws CoreException {
- try {
- return DFSFile.this.open();
-
- } catch (IOException ioe) {
- throw new CoreException(new Status(Status.ERROR,
- Activator.PLUGIN_ID, 0, "Unable to open file \""
- + DFSFile.this.path + "\"", ioe));
- }
- }
-
- /* @inheritDoc */
- public IPath getFullPath() {
- return new org.eclipse.core.runtime.Path(DFSFile.this.path.toString());
- }
-
- /* @inheritDoc */
- public String getName() {
- return DFSFile.this.path.getName();
- }
-
- /* @inheritDoc */
- public boolean isReadOnly() {
- return true;
- }
-
- }
-
- /*
- * Implementation of DFSContent
- */
-
- /* @inheritDoc */
- public DFSContent[] getChildren() {
- return null;
- }
-
- /* @inheritDoc */
- public boolean hasChildren() {
- return false;
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFolder.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFolder.java
deleted file mode 100644
index 7dc72a7bf3b..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFolder.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.dfs;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.logging.Logger;
-
-import org.apache.hadoop.eclipse.server.HadoopServer;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.eclipse.core.runtime.IProgressMonitor;
-import org.eclipse.core.runtime.IStatus;
-import org.eclipse.core.runtime.Status;
-import org.eclipse.core.runtime.jobs.Job;
-import org.eclipse.jface.dialogs.MessageDialog;
-
-/**
- * Local representation of a folder in the DFS.
- *
- * The constructor creates an empty representation of the folder and spawn a
- * thread that will fill
- */
-public class DFSFolder extends DFSPath implements DFSContent {
-
- static Logger log = Logger.getLogger(DFSFolder.class.getName());
-
- private DFSContent[] children;
-
- protected DFSFolder(DFSContentProvider provider, HadoopServer location)
- throws IOException {
-
- super(provider, location);
- }
-
- private DFSFolder(DFSPath parent, Path path) {
- super(parent, path);
- }
-
- protected void loadDFSFolderChildren() throws IOException {
- List list = new ArrayList();
-
- for (FileStatus status : getDFS().listStatus(this.getPath())) {
- if (status.isDir()) {
- list.add(new DFSFolder(this, status.getPath()));
- } else {
- list.add(new DFSFile(this, status.getPath()));
- }
- }
-
- this.children = list.toArray(new DFSContent[list.size()]);
- }
-
- /**
- * Upload the given file or directory into this DfsFolder
- *
- * @param file
- * @throws IOException
- */
- public void upload(IProgressMonitor monitor, final File file)
- throws IOException {
-
- if (file.isDirectory()) {
- Path filePath = new Path(this.path, file.getName());
- getDFS().mkdirs(filePath);
- DFSFolder newFolder = new DFSFolder(this, filePath);
- monitor.worked(1);
- for (File child : file.listFiles()) {
- if (monitor.isCanceled())
- return;
- newFolder.upload(monitor, child);
- }
-
- } else if (file.isFile()) {
- Path filePath = new Path(this.path, file.getName());
- DFSFile newFile = new DFSFile(this, filePath, file, monitor);
-
- } else {
- // XXX don't know what the file is?
- }
- }
-
- /* @inheritDoc */
- @Override
- public void downloadToLocalDirectory(IProgressMonitor monitor, File dir) {
- if (!dir.exists())
- dir.mkdirs();
-
- if (!dir.isDirectory()) {
- MessageDialog.openError(null, "Download to local file system",
- "Invalid directory location: \"" + dir + "\"");
- return;
- }
-
- File dfsPath = new File(this.getPath().toString());
- File destination = new File(dir, dfsPath.getName());
-
- if (!destination.exists()) {
- if (!destination.mkdir()) {
- MessageDialog.openError(null, "Download to local directory",
- "Unable to create directory " + destination.getAbsolutePath());
- return;
- }
- }
-
- // Download all DfsPath children
- for (Object childObj : getChildren()) {
- if (childObj instanceof DFSPath) {
- ((DFSPath) childObj).downloadToLocalDirectory(monitor, destination);
- monitor.worked(1);
- }
- }
- }
-
- /* @inheritDoc */
- @Override
- public int computeDownloadWork() {
- int work = 1;
- for (DFSContent child : getChildren()) {
- if (child instanceof DFSPath)
- work += ((DFSPath) child).computeDownloadWork();
- }
-
- return work;
- }
-
- /**
- * Create a new sub directory into this directory
- *
- * @param folderName
- */
- public void mkdir(String folderName) {
- try {
- getDFS().mkdirs(new Path(this.path, folderName));
- } catch (IOException ioe) {
- ioe.printStackTrace();
- }
- doRefresh();
- }
-
- /*
- * Implementation of DFSContent
- */
-
- /* @inheritDoc */
- public boolean hasChildren() {
- if (this.children == null)
- return true;
- else
- return (this.children.length > 0);
- }
-
- /* @inheritDoc */
- public DFSContent[] getChildren() {
- if (children == null) {
- new Job("Connecting to DFS " + location) {
- @Override
- protected IStatus run(IProgressMonitor monitor) {
- try {
- loadDFSFolderChildren();
- return Status.OK_STATUS;
-
- } catch (IOException ioe) {
- children =
- new DFSContent[] { new DFSMessage("Error: "
- + ioe.getLocalizedMessage()) };
- return Status.CANCEL_STATUS;
-
- } finally {
- // Under all circumstances, update the UI
- provider.refresh(DFSFolder.this);
- }
- }
- }.schedule();
-
- return new DFSContent[] { new DFSMessage("Listing folder content...") };
- }
- return this.children;
- }
-
- /* @inheritDoc */
- @Override
- public void refresh() {
- this.children = null;
- this.doRefresh();
- }
-
- /* @inheritDoc */
- @Override
- public String toString() {
- return String.format("%s (%s)", super.toString(),
- this.getChildren().length);
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocation.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocation.java
deleted file mode 100644
index 31c8fb30e15..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocation.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.dfs;
-
-import java.io.IOException;
-
-import org.apache.hadoop.eclipse.server.HadoopServer;
-import org.eclipse.core.runtime.IProgressMonitor;
-import org.eclipse.core.runtime.IStatus;
-import org.eclipse.core.runtime.Status;
-import org.eclipse.core.runtime.jobs.Job;
-
-/**
- * DFS Content representation of a HDFS location
- */
-public class DFSLocation implements DFSContent {
-
- private final DFSContentProvider provider;
-
- private final HadoopServer location;
-
- private DFSContent rootFolder = null;
-
- DFSLocation(DFSContentProvider provider, HadoopServer server) {
- this.provider = provider;
- this.location = server;
- }
-
- /* @inheritDoc */
- @Override
- public String toString() {
- return this.location.getLocationName();
- }
-
- /*
- * Implementation of DFSContent
- */
-
- /* @inheritDoc */
- public DFSContent[] getChildren() {
- if (this.rootFolder == null) {
- /*
- * DfsFolder constructor might block as it contacts the NameNode: work
- * asynchronously here or this will potentially freeze the UI
- */
- new Job("Connecting to DFS " + location) {
- @Override
- protected IStatus run(IProgressMonitor monitor) {
- try {
- rootFolder = new DFSFolder(provider, location);
- return Status.OK_STATUS;
-
- } catch (IOException ioe) {
- rootFolder =
- new DFSMessage("Error: " + ioe.getLocalizedMessage());
- return Status.CANCEL_STATUS;
-
- } finally {
- // Under all circumstances, update the UI
- provider.refresh(DFSLocation.this);
- }
- }
- }.schedule();
-
- return new DFSContent[] { new DFSMessage("Connecting to DFS "
- + toString()) };
- }
- return new DFSContent[] { this.rootFolder };
- }
-
- /* @inheritDoc */
- public boolean hasChildren() {
- return true;
- }
-
- /* @inheritDoc */
- public void refresh() {
- this.rootFolder = null;
- this.provider.refresh(this);
- }
-
- /*
- * Actions
- */
-
- /**
- * Refresh the location using a new connection
- */
- public void reconnect() {
- this.refresh();
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocationsRoot.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocationsRoot.java
deleted file mode 100644
index 9d9a60909eb..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocationsRoot.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.dfs;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.eclipse.server.HadoopServer;
-import org.apache.hadoop.eclipse.servers.IHadoopServerListener;
-import org.apache.hadoop.eclipse.servers.ServerRegistry;
-import org.apache.hadoop.fs.FileSystem;
-
-/**
- * Representation of the root element containing all DFS servers. This
- * content registers an observer on Hadoop servers so as to update itself
- * when servers are updated.
- */
-public class DFSLocationsRoot implements DFSContent, IHadoopServerListener {
-
- /**
- *
- */
- private final DFSContentProvider provider;
-
- private Map map =
- new HashMap();
-
- /**
- * Register a listeners to track DFS locations updates
- *
- * @param provider the content provider this content is the root of
- */
- DFSLocationsRoot(DFSContentProvider provider) {
- this.provider = provider;
- ServerRegistry.getInstance().addListener(this);
- this.refresh();
- }
-
- /*
- * Implementation of IHadoopServerListener
- */
-
- /* @inheritDoc */
- public synchronized void serverChanged(final HadoopServer location,
- final int type) {
-
- switch (type) {
- case ServerRegistry.SERVER_STATE_CHANGED: {
- this.provider.refresh(map.get(location));
- break;
- }
-
- case ServerRegistry.SERVER_ADDED: {
- DFSLocation dfsLoc = new DFSLocation(provider, location);
- map.put(location, dfsLoc);
- this.provider.refresh(this);
- break;
- }
-
- case ServerRegistry.SERVER_REMOVED: {
- map.remove(location);
- this.provider.refresh(this);
- break;
- }
- }
- }
-
- /**
- * Recompute the map of Hadoop locations
- */
- private synchronized void reloadLocations() {
- map.clear();
- for (HadoopServer location : ServerRegistry.getInstance().getServers())
- map.put(location, new DFSLocation(provider, location));
- }
-
- /* @inheritDoc */
- @Override
- public String toString() {
- return "DFS Locations";
- }
-
- /*
- * Implementation of DFSContent
- */
-
- /* @inheritDoc */
- public synchronized DFSContent[] getChildren() {
- return this.map.values().toArray(new DFSContent[this.map.size()]);
- }
-
- /* @inheritDoc */
- public boolean hasChildren() {
- return (this.map.size() > 0);
- }
-
- /* @inheritDoc */
- public void refresh() {
- reloadLocations();
- this.provider.refresh(this);
- }
-
- /*
- * Actions
- */
-
- public void disconnect() {
- Thread closeThread = new Thread() {
- /* @inheritDoc */
- @Override
- public void run() {
- try {
- System.out.printf("Closing all opened File Systems...\n");
- FileSystem.closeAll();
- System.out.printf("File Systems closed\n");
-
- } catch (IOException ioe) {
- ioe.printStackTrace();
- }
- }
- };
-
- // Wait 5 seconds for the connections to be closed
- closeThread.start();
- try {
- closeThread.join(5000);
-
- } catch (InterruptedException ie) {
- // Ignore
- }
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSMessage.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSMessage.java
deleted file mode 100644
index ce83b9aa260..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSMessage.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.dfs;
-
-/**
- * DFS Content that displays a message.
- */
-class DFSMessage implements DFSContent {
-
- private String message;
-
- DFSMessage(String message) {
- this.message = message;
- }
-
- /* @inheritDoc */
- @Override
- public String toString() {
- return this.message;
- }
-
- /*
- * Implementation of DFSContent
- */
-
- /* @inheritDoc */
- public DFSContent[] getChildren() {
- return null;
- }
-
- /* @inheritDoc */
- public boolean hasChildren() {
- return false;
- }
-
- /* @inheritDoc */
- public void refresh() {
- // Nothing to do
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSPath.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSPath.java
deleted file mode 100644
index 0abd53815f7..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSPath.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.dfs;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.logging.Logger;
-
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.eclipse.ErrorMessageDialog;
-import org.apache.hadoop.eclipse.server.ConfProp;
-import org.apache.hadoop.eclipse.server.HadoopServer;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.eclipse.core.runtime.IProgressMonitor;
-import org.eclipse.jface.dialogs.MessageDialog;
-
-/**
- * DFS Path handling for DFS
- */
-public abstract class DFSPath implements DFSContent {
-
- protected final DFSContentProvider provider;
-
- protected HadoopServer location;
-
- private DistributedFileSystem dfs = null;
-
- protected final Path path;
-
- protected final DFSPath parent;
-
- /**
- * For debugging purpose
- */
- static Logger log = Logger.getLogger(DFSPath.class.getName());
-
- /**
- * Create a path representation for the given location in the given viewer
- *
- * @param location
- * @param path
- * @param viewer
- */
- public DFSPath(DFSContentProvider provider, HadoopServer location)
- throws IOException {
-
- this.provider = provider;
- this.location = location;
- this.path = new Path("/");
- this.parent = null;
- }
-
- /**
- * Create a sub-path representation for the given parent path
- *
- * @param parent
- * @param path
- */
- protected DFSPath(DFSPath parent, Path path) {
- this.provider = parent.provider;
- this.location = parent.location;
- this.dfs = parent.dfs;
- this.parent = parent;
- this.path = path;
- }
-
- protected void dispose() {
- // Free the DFS connection
- }
-
- /* @inheritDoc */
- @Override
- public String toString() {
- if (path.equals("/")) {
- return location.getConfProp(ConfProp.FS_DEFAULT_URI);
-
- } else {
- return this.path.getName();
- }
- }
-
- /**
- * Does a recursive delete of the remote directory tree at this node.
- */
- public void delete() {
- try {
- getDFS().delete(this.path, true);
-
- } catch (IOException e) {
- e.printStackTrace();
- MessageDialog.openWarning(null, "Delete file",
- "Unable to delete file \"" + this.path + "\"\n" + e);
- }
- }
-
- public DFSPath getParent() {
- return parent;
- }
-
- public abstract void refresh();
-
- /**
- * Refresh the UI element for this content
- */
- public void doRefresh() {
- provider.refresh(this);
- }
-
- /**
- * Copy the DfsPath to the given local directory
- *
- * @param directory the local directory
- */
- public abstract void downloadToLocalDirectory(IProgressMonitor monitor,
- File dir);
-
- public Path getPath() {
- return this.path;
- }
-
- /**
- * Gets a connection to the DFS
- *
- * @return a connection to the DFS
- * @throws IOException
- */
- DistributedFileSystem getDFS() throws IOException {
- if (this.dfs == null) {
- FileSystem fs = location.getDFS();
- if (!(fs instanceof DistributedFileSystem)) {
- ErrorMessageDialog.display("DFS Browser",
- "The DFS Browser cannot browse anything else "
- + "but a Distributed File System!");
- throw new IOException("DFS Browser expects a DistributedFileSystem!");
- }
- this.dfs = (DistributedFileSystem) fs;
- }
- return this.dfs;
- }
-
- public abstract int computeDownloadWork();
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java
deleted file mode 100644
index 1678e0d1b63..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.launch;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.logging.Logger;
-
-import org.apache.hadoop.eclipse.servers.RunOnHadoopWizard;
-import org.eclipse.core.resources.IFile;
-import org.eclipse.core.resources.IResource;
-import org.eclipse.core.runtime.CoreException;
-import org.eclipse.debug.core.ILaunchConfiguration;
-import org.eclipse.debug.core.ILaunchConfigurationType;
-import org.eclipse.debug.core.ILaunchConfigurationWorkingCopy;
-import org.eclipse.jdt.core.IJavaProject;
-import org.eclipse.jdt.core.IType;
-import org.eclipse.jdt.core.JavaCore;
-import org.eclipse.jdt.debug.ui.launchConfigurations.JavaApplicationLaunchShortcut;
-import org.eclipse.jdt.launching.IJavaLaunchConfigurationConstants;
-import org.eclipse.jdt.launching.IRuntimeClasspathEntry;
-import org.eclipse.jdt.launching.JavaRuntime;
-import org.eclipse.jface.wizard.IWizard;
-import org.eclipse.jface.wizard.WizardDialog;
-import org.eclipse.swt.widgets.Display;
-import org.eclipse.swt.widgets.Shell;
-
-/**
- * Add a shortcut "Run on Hadoop" to the Run menu
- */
-
-public class HadoopApplicationLaunchShortcut extends
- JavaApplicationLaunchShortcut {
-
- static Logger log =
- Logger.getLogger(HadoopApplicationLaunchShortcut.class.getName());
-
- // private ActionDelegate delegate = new RunOnHadoopActionDelegate();
-
- public HadoopApplicationLaunchShortcut() {
- }
-
- /* @inheritDoc */
- @Override
- protected ILaunchConfiguration findLaunchConfiguration(IType type,
- ILaunchConfigurationType configType) {
-
- // Find an existing or create a launch configuration (Standard way)
- ILaunchConfiguration iConf =
- super.findLaunchConfiguration(type, configType);
- if (iConf == null) iConf = super.createConfiguration(type);
- ILaunchConfigurationWorkingCopy iConfWC;
- try {
- /*
- * Tune the default launch configuration: setup run-time classpath
- * manually
- */
- iConfWC = iConf.getWorkingCopy();
-
- iConfWC.setAttribute(
- IJavaLaunchConfigurationConstants.ATTR_DEFAULT_CLASSPATH, false);
-
- List classPath = new ArrayList();
- IResource resource = type.getResource();
- IJavaProject project =
- (IJavaProject) resource.getProject().getNature(JavaCore.NATURE_ID);
- IRuntimeClasspathEntry cpEntry =
- JavaRuntime.newDefaultProjectClasspathEntry(project);
- classPath.add(0, cpEntry.getMemento());
-
- iConfWC.setAttribute(IJavaLaunchConfigurationConstants.ATTR_CLASSPATH,
- classPath);
-
- } catch (CoreException e) {
- e.printStackTrace();
- // FIXME Error dialog
- return null;
- }
-
- /*
- * Update the selected configuration with a specific Hadoop location
- * target
- */
- IResource resource = type.getResource();
- if (!(resource instanceof IFile))
- return null;
- RunOnHadoopWizard wizard =
- new RunOnHadoopWizard((IFile) resource, iConfWC);
- WizardDialog dialog =
- new WizardDialog(Display.getDefault().getActiveShell(), wizard);
-
- dialog.create();
- dialog.setBlockOnOpen(true);
- if (dialog.open() != WizardDialog.OK)
- return null;
-
- try {
-
- // Only save if some configuration is different.
- if(!iConfWC.contentsEqual(iConf))
- iConfWC.doSave();
-
- } catch (CoreException e) {
- e.printStackTrace();
- // FIXME Error dialog
- return null;
- }
-
- return iConfWC;
- }
-
- /**
- * Was used to run the RunOnHadoopWizard inside and provide it a
- * ProgressMonitor
- */
- static class Dialog extends WizardDialog {
- public Dialog(Shell parentShell, IWizard newWizard) {
- super(parentShell, newWizard);
- }
-
- @Override
- public void create() {
- super.create();
-
- ((RunOnHadoopWizard) getWizard())
- .setProgressMonitor(getProgressMonitor());
- }
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/LocalMapReduceLaunchTabGroup.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/LocalMapReduceLaunchTabGroup.java
deleted file mode 100644
index 66db5d2d3cf..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/LocalMapReduceLaunchTabGroup.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.launch;
-
-import org.eclipse.core.runtime.CoreException;
-import org.eclipse.debug.core.ILaunchConfiguration;
-import org.eclipse.debug.core.ILaunchConfigurationWorkingCopy;
-import org.eclipse.debug.ui.AbstractLaunchConfigurationTab;
-import org.eclipse.debug.ui.AbstractLaunchConfigurationTabGroup;
-import org.eclipse.debug.ui.CommonTab;
-import org.eclipse.debug.ui.ILaunchConfigurationDialog;
-import org.eclipse.debug.ui.ILaunchConfigurationTab;
-import org.eclipse.jdt.core.IType;
-import org.eclipse.jdt.core.JavaModelException;
-import org.eclipse.jdt.core.dom.AST;
-import org.eclipse.jdt.core.search.SearchEngine;
-import org.eclipse.jdt.debug.ui.launchConfigurations.JavaArgumentsTab;
-import org.eclipse.jdt.debug.ui.launchConfigurations.JavaClasspathTab;
-import org.eclipse.jdt.debug.ui.launchConfigurations.JavaJRETab;
-import org.eclipse.jdt.ui.IJavaElementSearchConstants;
-import org.eclipse.jdt.ui.JavaUI;
-import org.eclipse.jface.dialogs.ProgressMonitorDialog;
-import org.eclipse.jface.window.Window;
-import org.eclipse.swt.SWT;
-import org.eclipse.swt.layout.GridData;
-import org.eclipse.swt.layout.GridLayout;
-import org.eclipse.swt.widgets.Button;
-import org.eclipse.swt.widgets.Composite;
-import org.eclipse.swt.widgets.Event;
-import org.eclipse.swt.widgets.Label;
-import org.eclipse.swt.widgets.Listener;
-import org.eclipse.swt.widgets.Text;
-import org.eclipse.ui.dialogs.SelectionDialog;
-
-/**
- *
- * Handler for Local MapReduce job launches
- *
- * TODO(jz) this may not be needed as we almost always deploy to a remote server
- * and not locally, where we do do it locally we may just be able to exec
- * scripts without going to java
- *
- */
-public class LocalMapReduceLaunchTabGroup extends
- AbstractLaunchConfigurationTabGroup {
-
- public LocalMapReduceLaunchTabGroup() {
- // TODO Auto-generated constructor stub
- }
-
- public void createTabs(ILaunchConfigurationDialog dialog, String mode) {
- setTabs(new ILaunchConfigurationTab[] { new MapReduceLaunchTab(),
- new JavaArgumentsTab(), new JavaJRETab(), new JavaClasspathTab(),
- new CommonTab() });
- }
-
- public static class MapReduceLaunchTab extends AbstractLaunchConfigurationTab {
- private Text combinerClass;
-
- private Text reducerClass;
-
- private Text mapperClass;
-
- @Override
- public boolean canSave() {
- return true;
- }
-
- @Override
- public boolean isValid(ILaunchConfiguration launchConfig) {
- // todo: only if all classes are of proper types
- return true;
- }
-
- public void createControl(final Composite parent) {
- Composite panel = new Composite(parent, SWT.NONE);
- GridLayout layout = new GridLayout(3, false);
- panel.setLayout(layout);
-
- Label mapperLabel = new Label(panel, SWT.NONE);
- mapperLabel.setText("Mapper");
- mapperClass = new Text(panel, SWT.SINGLE | SWT.BORDER);
- createRow(parent, panel, mapperClass);
-
- Label reducerLabel = new Label(panel, SWT.NONE);
- reducerLabel.setText("Reducer");
- reducerClass = new Text(panel, SWT.SINGLE | SWT.BORDER);
- createRow(parent, panel, reducerClass);
-
- Label combinerLabel = new Label(panel, SWT.NONE);
- combinerLabel.setText("Combiner");
- combinerClass = new Text(panel, SWT.SINGLE | SWT.BORDER);
- createRow(parent, panel, combinerClass);
-
- panel.pack();
- setControl(panel);
- }
-
- private void createRow(final Composite parent, Composite panel,
- final Text text) {
- text.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
- Button button = new Button(panel, SWT.BORDER);
- button.setText("Browse...");
- button.addListener(SWT.Selection, new Listener() {
- public void handleEvent(Event arg0) {
- try {
- AST ast = AST.newAST(3);
-
- SelectionDialog dialog = JavaUI.createTypeDialog(parent.getShell(),
- new ProgressMonitorDialog(parent.getShell()), SearchEngine
- .createWorkspaceScope(),
- IJavaElementSearchConstants.CONSIDER_CLASSES, false);
- dialog.setMessage("Select Mapper type (implementing )");
- dialog.setBlockOnOpen(true);
- dialog.setTitle("Select Mapper Type");
- dialog.open();
-
- if ((dialog.getReturnCode() == Window.OK)
- && (dialog.getResult().length > 0)) {
- IType type = (IType) dialog.getResult()[0];
- text.setText(type.getFullyQualifiedName());
- setDirty(true);
- }
- } catch (JavaModelException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- }
- });
- }
-
- public String getName() {
- return "Hadoop";
- }
-
- public void initializeFrom(ILaunchConfiguration configuration) {
- try {
- mapperClass.setText(configuration.getAttribute(
- "org.apache.hadoop.eclipse.launch.mapper", ""));
- reducerClass.setText(configuration.getAttribute(
- "org.apache.hadoop.eclipse.launch.reducer", ""));
- combinerClass.setText(configuration.getAttribute(
- "org.apache.hadoop.eclipse.launch.combiner", ""));
- } catch (CoreException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- setErrorMessage(e.getMessage());
- }
- }
-
- public void performApply(ILaunchConfigurationWorkingCopy configuration) {
- configuration.setAttribute("org.apache.hadoop.eclipse.launch.mapper",
- mapperClass.getText());
- configuration.setAttribute(
- "org.apache.hadoop.eclipse.launch.reducer", reducerClass
- .getText());
- configuration.setAttribute(
- "org.apache.hadoop.eclipse.launch.combiner", combinerClass
- .getText());
- }
-
- public void setDefaults(ILaunchConfigurationWorkingCopy configuration) {
-
- }
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/MutexRule.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/MutexRule.java
deleted file mode 100644
index 46df4491056..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/MutexRule.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.launch;
-
-import org.eclipse.core.runtime.jobs.ISchedulingRule;
-
-public class MutexRule implements ISchedulingRule {
- private final String id;
-
- public MutexRule(String id) {
- this.id = id;
- }
-
- public boolean contains(ISchedulingRule rule) {
- return (rule instanceof MutexRule) && ((MutexRule) rule).id.equals(id);
- }
-
- public boolean isConflicting(ISchedulingRule rule) {
- return (rule instanceof MutexRule) && ((MutexRule) rule).id.equals(id);
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/StartHadoopLaunchTabGroup.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/StartHadoopLaunchTabGroup.java
deleted file mode 100644
index 047ba179a61..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/StartHadoopLaunchTabGroup.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.launch;
-
-import org.eclipse.debug.ui.AbstractLaunchConfigurationTabGroup;
-import org.eclipse.debug.ui.CommonTab;
-import org.eclipse.debug.ui.ILaunchConfigurationDialog;
-import org.eclipse.debug.ui.ILaunchConfigurationTab;
-import org.eclipse.jdt.debug.ui.launchConfigurations.JavaArgumentsTab;
-import org.eclipse.jdt.debug.ui.launchConfigurations.JavaClasspathTab;
-import org.eclipse.jdt.debug.ui.launchConfigurations.JavaJRETab;
-
-/**
- * Create the tab group for the dialog window for starting a Hadoop job.
- */
-
-public class StartHadoopLaunchTabGroup extends
- AbstractLaunchConfigurationTabGroup {
-
- public StartHadoopLaunchTabGroup() {
- // TODO Auto-generated constructor stub
- }
-
- /**
- * TODO(jz) consider the appropriate tabs for this case
- */
- public void createTabs(ILaunchConfigurationDialog dialog, String mode) {
- setTabs(new ILaunchConfigurationTab[] { new JavaArgumentsTab(),
- new JavaJRETab(), new JavaClasspathTab(), new CommonTab() });
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/MapReducePreferencePage.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/MapReducePreferencePage.java
deleted file mode 100644
index cef50a3475b..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/MapReducePreferencePage.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.eclipse.preferences;
-
-import org.apache.hadoop.eclipse.Activator;
-import org.eclipse.jface.preference.DirectoryFieldEditor;
-import org.eclipse.jface.preference.FieldEditorPreferencePage;
-import org.eclipse.ui.IWorkbench;
-import org.eclipse.ui.IWorkbenchPreferencePage;
-
-/**
- * This class represents a preference page that is contributed to the
- * Preferences dialog. By sub-classing FieldEditorPreferencePage,
- * we can use the field support built into JFace that allows us to create a
- * page that is small and knows how to save, restore and apply itself.
- *
- *
- * This page is used to modify preferences only. They are stored in the
- * preference store that belongs to the main plug-in class. That way,
- * preferences can be accessed directly via the preference store.
- */
-
-public class MapReducePreferencePage extends FieldEditorPreferencePage
- implements IWorkbenchPreferencePage {
-
- public MapReducePreferencePage() {
- super(GRID);
- setPreferenceStore(Activator.getDefault().getPreferenceStore());
- setTitle("Hadoop Map/Reduce Tools");
- // setDescription("Hadoop Map/Reduce Preferences");
- }
-
- /**
- * Creates the field editors. Field editors are abstractions of the common
- * GUI blocks needed to manipulate various types of preferences. Each field
- * editor knows how to save and restore itself.
- */
- @Override
- public void createFieldEditors() {
- addField(new DirectoryFieldEditor(PreferenceConstants.P_PATH,
- "&Hadoop installation directory:", getFieldEditorParent()));
-
- }
-
- /* @inheritDoc */
- public void init(IWorkbench workbench) {
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceConstants.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceConstants.java
deleted file mode 100644
index 74641bb28a7..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceConstants.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.preferences;
-
-/**
- * Constant definitions for plug-in preferences
- */
-public class PreferenceConstants {
-
- public static final String P_PATH = "pathPreference";
-
- // public static final String P_BOOLEAN = "booleanPreference";
- //
- // public static final String P_CHOICE = "choicePreference";
- //
- // public static final String P_STRING = "stringPreference";
- //
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceInitializer.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceInitializer.java
deleted file mode 100644
index 444050a9920..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceInitializer.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.preferences;
-
-import org.eclipse.core.runtime.preferences.AbstractPreferenceInitializer;
-
-/**
- * Class used to initialize default preference values.
- */
-public class PreferenceInitializer extends AbstractPreferenceInitializer {
-
- /* @inheritDoc */
- @Override
- public void initializeDefaultPreferences() {
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/ConfProp.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/ConfProp.java
deleted file mode 100644
index efc441b1d38..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/ConfProp.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.server;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-
-public enum ConfProp {
- /**
- * Property name for the Hadoop location name
- */
- PI_LOCATION_NAME(true, "location.name", "New Hadoop location"),
-
- /**
- * Property name for the master host name (the Job tracker)
- */
- PI_JOB_TRACKER_HOST(true, "jobtracker.host", "localhost"),
-
- /**
- * Property name for the DFS master host name (the Name node)
- */
- PI_NAME_NODE_HOST(true, "namenode.host", "localhost"),
-
- /**
- * Property name for the installation directory on the master node
- */
- // PI_INSTALL_DIR(true, "install.dir", "/dir/hadoop-version/"),
- /**
- * User name to use for Hadoop operations
- */
- PI_USER_NAME(true, "user.name", System.getProperty("user.name",
- "who are you?")),
-
- /**
- * Property name for SOCKS proxy activation
- */
- PI_SOCKS_PROXY_ENABLE(true, "socks.proxy.enable", "no"),
-
- /**
- * Property name for the SOCKS proxy host
- */
- PI_SOCKS_PROXY_HOST(true, "socks.proxy.host", "host"),
-
- /**
- * Property name for the SOCKS proxy port
- */
- PI_SOCKS_PROXY_PORT(true, "socks.proxy.port", "1080"),
-
- /**
- * TCP port number for the name node
- */
- PI_NAME_NODE_PORT(true, "namenode.port", "50040"),
-
- /**
- * TCP port number for the job tracker
- */
- PI_JOB_TRACKER_PORT(true, "jobtracker.port", "50020"),
-
- /**
- * Are the Map/Reduce and the Distributed FS masters hosted on the same
- * machine?
- */
- PI_COLOCATE_MASTERS(true, "masters.colocate", "yes"),
-
- /**
- * Property name for naming the job tracker (URI). This property is related
- * to {@link #PI_MASTER_HOST_NAME}
- */
- JOB_TRACKER_URI(false, "mapreduce.jobtracker.address", "localhost:50020"),
-
- /**
- * Property name for naming the default file system (URI).
- */
- FS_DEFAULT_URI(false, "fs.default.name", "hdfs://localhost:50040/"),
-
- /**
- * Property name for the default socket factory:
- */
- SOCKET_FACTORY_DEFAULT(false, "hadoop.rpc.socket.factory.class.default",
- "org.apache.hadoop.net.StandardSocketFactory"),
-
- /**
- * Property name for the SOCKS server URI.
- */
- SOCKS_SERVER(false, "hadoop.socks.server", "host:1080"),
-
- ;
-
- /**
- * Map -> ConfProp
- */
- private static Map map;
-
- private static synchronized void registerProperty(String name,
- ConfProp prop) {
-
- if (ConfProp.map == null)
- ConfProp.map = new HashMap();
-
- ConfProp.map.put(name, prop);
- }
-
- public static ConfProp getByName(String propName) {
- return map.get(propName);
- }
-
- public final String name;
-
- public final String defVal;
-
- ConfProp(boolean internal, String name, String defVal) {
- if (internal)
- name = "eclipse.plug-in." + name;
- this.name = name;
- this.defVal = defVal;
-
- ConfProp.registerProperty(name, this);
- }
-
- String get(Configuration conf) {
- return conf.get(name);
- }
-
- void set(Configuration conf, String value) {
- assert value != null;
- conf.set(name, value);
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopJob.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopJob.java
deleted file mode 100644
index c24253e4800..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopJob.java
+++ /dev/null
@@ -1,349 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.server;
-
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.Counters;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobID;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.RunningJob;
-
-/**
- * Representation of a Map/Reduce running job on a given location
- */
-
-public class HadoopJob {
-
- /**
- * Enum representation of a Job state
- */
- public enum JobState {
- PREPARE(JobStatus.PREP), RUNNING(JobStatus.RUNNING), FAILED(
- JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED);
-
- final int state;
-
- JobState(int state) {
- this.state = state;
- }
-
- static JobState ofInt(int state) {
- if (state == JobStatus.PREP) {
- return PREPARE;
- }
- else if (state == JobStatus.RUNNING) {
- return RUNNING;
- }
- else if (state == JobStatus.FAILED) {
- return FAILED;
- }
- else if (state == JobStatus.SUCCEEDED) {
- return SUCCEEDED;
- }
- else {
- return null;
- }
- }
- }
-
- /**
- * Location this Job runs on
- */
- private final HadoopServer location;
-
- /**
- * Unique identifier of this Job
- */
- final JobID jobId;
-
- /**
- * Status representation of a running job. This actually contains a
- * reference to a JobClient. Its methods might block.
- */
- RunningJob running;
-
- /**
- * Last polled status
- *
- * @deprecated should apparently not be used
- */
- JobStatus status;
-
- /**
- * Last polled counters
- */
- Counters counters;
-
- /**
- * Job Configuration
- */
- JobConf jobConf = null;
-
- boolean completed = false;
-
- boolean successful = false;
-
- boolean killed = false;
-
- int totalMaps;
-
- int totalReduces;
-
- int completedMaps;
-
- int completedReduces;
-
- float mapProgress;
-
- float reduceProgress;
-
- /**
- * Constructor for a Hadoop job representation
- *
- * @param location
- * @param id
- * @param running
- * @param status
- */
- public HadoopJob(HadoopServer location, JobID id, RunningJob running,
- JobStatus status) {
-
- this.location = location;
- this.jobId = id;
- this.running = running;
-
- loadJobFile();
-
- update(status);
- }
-
- /**
- * Try to locate and load the JobConf file for this job so to get more
- * details on the job (number of maps and of reduces)
- */
- private void loadJobFile() {
- try {
- String jobFile = getJobFile();
- FileSystem fs = location.getDFS();
- File tmp = File.createTempFile(getJobID().toString(), ".xml");
- if (FileUtil.copy(fs, new Path(jobFile), tmp, false, location
- .getConfiguration())) {
- this.jobConf = new JobConf(tmp.toString());
-
- this.totalMaps = jobConf.getNumMapTasks();
- this.totalReduces = jobConf.getNumReduceTasks();
- }
-
- } catch (IOException ioe) {
- ioe.printStackTrace();
- }
- }
-
- /* @inheritDoc */
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + ((jobId == null) ? 0 : jobId.hashCode());
- result = prime * result + ((location == null) ? 0 : location.hashCode());
- return result;
- }
-
- /* @inheritDoc */
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (obj == null)
- return false;
- if (!(obj instanceof HadoopJob))
- return false;
- final HadoopJob other = (HadoopJob) obj;
- if (jobId == null) {
- if (other.jobId != null)
- return false;
- } else if (!jobId.equals(other.jobId))
- return false;
- if (location == null) {
- if (other.location != null)
- return false;
- } else if (!location.equals(other.location))
- return false;
- return true;
- }
-
- /**
- * Get the running status of the Job (@see {@link JobStatus}).
- *
- * @return
- */
- public JobState getState() {
- if (this.completed) {
- if (this.successful) {
- return JobState.SUCCEEDED;
- } else {
- return JobState.FAILED;
- }
- } else {
- return JobState.RUNNING;
- }
- // return JobState.ofInt(this.status.getRunState());
- }
-
- /**
- * @return
- */
- public JobID getJobID() {
- return this.jobId;
- }
-
- /**
- * @return
- */
- public HadoopServer getLocation() {
- return this.location;
- }
-
- /**
- * @return
- */
- public boolean isCompleted() {
- return this.completed;
- }
-
- /**
- * @return
- */
- public String getJobName() {
- return this.running.getJobName();
- }
-
- /**
- * @return
- */
- public String getJobFile() {
- return this.running.getJobFile();
- }
-
- /**
- * Return the tracking URL for this Job.
- *
- * @return string representation of the tracking URL for this Job
- */
- public String getTrackingURL() {
- return this.running.getTrackingURL();
- }
-
- /**
- * Returns a string representation of this job status
- *
- * @return string representation of this job status
- */
- public String getStatus() {
-
- StringBuffer s = new StringBuffer();
-
- s.append("Maps : " + completedMaps + "/" + totalMaps);
- s.append(" (" + mapProgress + ")");
- s.append(" Reduces : " + completedReduces + "/" + totalReduces);
- s.append(" (" + reduceProgress + ")");
-
- return s.toString();
- }
-
- /**
- * Update this job status according to the given JobStatus
- *
- * @param status
- */
- void update(JobStatus status) {
- this.status = status;
- try {
- this.counters = running.getCounters();
- this.completed = running.isComplete();
- this.successful = running.isSuccessful();
- this.mapProgress = running.mapProgress();
- this.reduceProgress = running.reduceProgress();
- // running.getTaskCompletionEvents(fromEvent);
-
- } catch (IOException ioe) {
- ioe.printStackTrace();
- }
-
- this.completedMaps = (int) (this.totalMaps * this.mapProgress);
- this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
- }
-
- /**
- * Print this job counters (for debugging purpose)
- */
- void printCounters() {
- System.out.printf("New Job:\n", counters);
- for (String groupName : counters.getGroupNames()) {
- Counters.Group group = counters.getGroup(groupName);
- System.out.printf("\t%s[%s]\n", groupName, group.getDisplayName());
-
- for (Counters.Counter counter : group) {
- System.out.printf("\t\t%s: %s\n", counter.getDisplayName(),
- counter.getCounter());
- }
- }
- System.out.printf("\n");
- }
-
- /**
- * Kill this job
- */
- public void kill() {
- try {
- this.running.killJob();
- this.killed = true;
-
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
-
- /**
- * Print this job status (for debugging purpose)
- */
- public void display() {
- System.out.printf("Job id=%s, name=%s\n", getJobID(), getJobName());
- System.out.printf("Configuration file: %s\n", getJobID());
- System.out.printf("Tracking URL: %s\n", getTrackingURL());
-
- System.out.printf("Completion: map: %f reduce %f\n",
- 100.0 * this.mapProgress, 100.0 * this.reduceProgress);
-
- System.out.println("Job total maps = " + totalMaps);
- System.out.println("Job completed maps = " + completedMaps);
- System.out.println("Map percentage complete = " + mapProgress);
- System.out.println("Job total reduces = " + totalReduces);
- System.out.println("Job completed reduces = " + completedReduces);
- System.out.println("Reduce percentage complete = " + reduceProgress);
- System.out.flush();
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopPathPage.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopPathPage.java
deleted file mode 100644
index cf58b9c25c8..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopPathPage.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.server;
-
-import org.eclipse.core.runtime.IProgressMonitor;
-import org.eclipse.swt.graphics.Image;
-import org.eclipse.swt.widgets.Composite;
-import org.eclipse.ui.IEditorInput;
-import org.eclipse.ui.IEditorPart;
-import org.eclipse.ui.IEditorSite;
-import org.eclipse.ui.IPropertyListener;
-import org.eclipse.ui.IWorkbenchPartSite;
-import org.eclipse.ui.PartInitException;
-
-public class HadoopPathPage implements IEditorPart {
-
- public IEditorInput getEditorInput() {
- // TODO Auto-generated method stub
- return null;
- }
-
- public IEditorSite getEditorSite() {
- // TODO Auto-generated method stub
- return null;
- }
-
- public void init(IEditorSite site, IEditorInput input)
- throws PartInitException {
- // TODO Auto-generated method stub
-
- }
-
- public void addPropertyListener(IPropertyListener listener) {
- // TODO Auto-generated method stub
-
- }
-
- public void createPartControl(Composite parent) {
- // TODO Auto-generated method stub
-
- }
-
- public void dispose() {
- // TODO Auto-generated method stub
-
- }
-
- public IWorkbenchPartSite getSite() {
- // TODO Auto-generated method stub
- return null;
- }
-
- public String getTitle() {
- // TODO Auto-generated method stub
- return null;
- }
-
- public Image getTitleImage() {
- // TODO Auto-generated method stub
- return null;
- }
-
- public String getTitleToolTip() {
- // TODO Auto-generated method stub
- return null;
- }
-
- public void removePropertyListener(IPropertyListener listener) {
- // TODO Auto-generated method stub
-
- }
-
- public void setFocus() {
- // TODO Auto-generated method stub
-
- }
-
- public Object getAdapter(Class adapter) {
- // TODO Auto-generated method stub
- return null;
- }
-
- public void doSave(IProgressMonitor monitor) {
- // TODO Auto-generated method stub
-
- }
-
- public void doSaveAs() {
- // TODO Auto-generated method stub
-
- }
-
- public boolean isDirty() {
- // TODO Auto-generated method stub
- return false;
- }
-
- public boolean isSaveAsAllowed() {
- // TODO Auto-generated method stub
- return false;
- }
-
- public boolean isSaveOnCloseNeeded() {
- // TODO Auto-generated method stub
- return false;
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java
deleted file mode 100644
index f7252d504bb..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java
+++ /dev/null
@@ -1,517 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.server;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.logging.Logger;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.eclipse.Activator;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobID;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.RunningJob;
-import org.eclipse.core.runtime.IProgressMonitor;
-import org.eclipse.core.runtime.IStatus;
-import org.eclipse.core.runtime.Status;
-import org.eclipse.core.runtime.jobs.Job;
-import org.eclipse.swt.widgets.Display;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.w3c.dom.Text;
-import org.xml.sax.SAXException;
-
-/**
- * Representation of a Hadoop location, meaning of the master node (NameNode,
- * JobTracker).
- *
- *
- * This class does not create any SSH connection anymore. Tunneling must be
- * setup outside of Eclipse for now (using Putty or ssh -D<port>
- * <host>)
- *
- *
- * TODO
- *
Disable the updater if a location becomes unreachable or fails for
- * tool long
- * Stop the updater on location's disposal/removal
- */
-
-public class HadoopServer {
-
- /**
- * Frequency of location status observations expressed as the delay in ms
- * between each observation
- *
- * TODO Add a preference parameter for this
- */
- protected static final long STATUS_OBSERVATION_DELAY = 1500;
-
- /**
- *
- */
- public class LocationStatusUpdater extends Job {
-
- JobClient client = null;
-
- /**
- * Setup the updater
- */
- public LocationStatusUpdater() {
- super("Map/Reduce location status updater");
- this.setSystem(true);
- }
-
- /* @inheritDoc */
- @Override
- protected IStatus run(IProgressMonitor monitor) {
- if (client == null) {
- try {
- client = HadoopServer.this.getJobClient();
-
- } catch (IOException ioe) {
- client = null;
- return new Status(Status.ERROR, Activator.PLUGIN_ID, 0,
- "Cannot connect to the Map/Reduce location: "
- + HadoopServer.this.getLocationName(),
- ioe);
- }
- }
-
- try {
- // Set of all known existing Job IDs we want fresh info of
- Set missingJobIds =
- new HashSet(runningJobs.keySet());
-
- JobStatus[] jstatus = client.jobsToComplete();
- for (JobStatus status : jstatus) {
-
- JobID jobId = status.getJobID();
- missingJobIds.remove(jobId);
-
- HadoopJob hJob;
- synchronized (HadoopServer.this.runningJobs) {
- hJob = runningJobs.get(jobId);
- if (hJob == null) {
- // Unknown job, create an entry
- RunningJob running = client.getJob(jobId);
- hJob =
- new HadoopJob(HadoopServer.this, jobId, running, status);
- newJob(hJob);
- }
- }
-
- // Update HadoopJob with fresh infos
- updateJob(hJob, status);
- }
-
- // Ask explicitly for fresh info for these Job IDs
- for (JobID jobId : missingJobIds) {
- HadoopJob hJob = runningJobs.get(jobId);
- if (!hJob.isCompleted())
- updateJob(hJob, null);
- }
-
- } catch (IOException ioe) {
- client = null;
- return new Status(Status.ERROR, Activator.PLUGIN_ID, 0,
- "Cannot retrieve running Jobs on location: "
- + HadoopServer.this.getLocationName(), ioe);
- }
-
- // Schedule the next observation
- schedule(STATUS_OBSERVATION_DELAY);
-
- return Status.OK_STATUS;
- }
-
- /**
- * Stores and make the new job available
- *
- * @param data
- */
- private void newJob(final HadoopJob data) {
- runningJobs.put(data.getJobID(), data);
-
- Display.getDefault().asyncExec(new Runnable() {
- public void run() {
- fireJobAdded(data);
- }
- });
- }
-
- /**
- * Updates the status of a job
- *
- * @param job the job to update
- */
- private void updateJob(final HadoopJob job, JobStatus status) {
- job.update(status);
-
- Display.getDefault().asyncExec(new Runnable() {
- public void run() {
- fireJobChanged(job);
- }
- });
- }
-
- }
-
- static Logger log = Logger.getLogger(HadoopServer.class.getName());
-
- /**
- * Hadoop configuration of the location. Also contains specific parameters
- * for the plug-in. These parameters are prefix with eclipse.plug-in.*
- */
- private Configuration conf;
-
- /**
- * Jobs listeners
- */
- private Set jobListeners = new HashSet();
-
- /**
- * Jobs running on this location. The keys of this map are the Job IDs.
- */
- private transient Map runningJobs =
- Collections.synchronizedMap(new TreeMap());
-
- /**
- * Status updater for this location
- */
- private LocationStatusUpdater statusUpdater;
-
- // state and status - transient
- private transient String state = "";
-
- /**
- * Creates a new default Hadoop location
- */
- public HadoopServer() {
- this.conf = new Configuration();
- this.addPluginConfigDefaultProperties();
- }
-
- /**
- * Creates a location from a file
- *
- * @throws IOException
- * @throws SAXException
- * @throws ParserConfigurationException
- */
- public HadoopServer(File file) throws ParserConfigurationException,
- SAXException, IOException {
-
- this.conf = new Configuration();
- this.addPluginConfigDefaultProperties();
- this.loadFromXML(file);
- }
-
- /**
- * Create a new Hadoop location by copying an already existing one.
- *
- * @param source the location to copy
- */
- public HadoopServer(HadoopServer existing) {
- this();
- this.load(existing);
- }
-
- public void addJobListener(IJobListener l) {
- jobListeners.add(l);
- }
-
- public void dispose() {
- // TODO close DFS connections?
- }
-
- /**
- * List all elements that should be present in the Server window (all
- * servers and all jobs running on each servers)
- *
- * @return collection of jobs for this location
- */
- public Collection getJobs() {
- startStatusUpdater();
- return this.runningJobs.values();
- }
-
- /**
- * Remove the given job from the currently running jobs map
- *
- * @param job the job to remove
- */
- public void purgeJob(final HadoopJob job) {
- runningJobs.remove(job.getJobID());
- Display.getDefault().asyncExec(new Runnable() {
- public void run() {
- fireJobRemoved(job);
- }
- });
- }
-
- /**
- * Returns the {@link Configuration} defining this location.
- *
- * @return the location configuration
- */
- public Configuration getConfiguration() {
- return this.conf;
- }
-
- /**
- * Gets a Hadoop configuration property value
- *
- * @param prop the configuration property
- * @return the property value
- */
- public String getConfProp(ConfProp prop) {
- return prop.get(conf);
- }
-
- /**
- * Gets a Hadoop configuration property value
- *
- * @param propName the property name
- * @return the property value
- */
- public String getConfProp(String propName) {
- return this.conf.get(propName);
- }
-
- public String getLocationName() {
- return ConfProp.PI_LOCATION_NAME.get(conf);
- }
-
- /**
- * Returns the master host name of the Hadoop location (the Job tracker)
- *
- * @return the host name of the Job tracker
- */
- public String getMasterHostName() {
- return getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
- }
-
- public String getState() {
- return state;
- }
-
- /**
- * Overwrite this location with the given existing location
- *
- * @param existing the existing location
- */
- public void load(HadoopServer existing) {
- this.conf = new Configuration(existing.conf);
- }
-
- /**
- * Overwrite this location with settings available in the given XML file.
- * The existing configuration is preserved if the XML file is invalid.
- *
- * @param file the file path of the XML file
- * @return validity of the XML file
- * @throws ParserConfigurationException
- * @throws IOException
- * @throws SAXException
- */
- public boolean loadFromXML(File file) throws ParserConfigurationException,
- SAXException, IOException {
-
- Configuration newConf = new Configuration(this.conf);
-
- DocumentBuilder builder =
- DocumentBuilderFactory.newInstance().newDocumentBuilder();
- Document document = builder.parse(file);
-
- Element root = document.getDocumentElement();
- if (!"configuration".equals(root.getTagName()))
- return false;
- NodeList props = root.getChildNodes();
- for (int i = 0; i < props.getLength(); i++) {
- Node propNode = props.item(i);
- if (!(propNode instanceof Element))
- continue;
- Element prop = (Element) propNode;
- if (!"property".equals(prop.getTagName()))
- return false;
- NodeList fields = prop.getChildNodes();
- String attr = null;
- String value = null;
- for (int j = 0; j < fields.getLength(); j++) {
- Node fieldNode = fields.item(j);
- if (!(fieldNode instanceof Element))
- continue;
- Element field = (Element) fieldNode;
- if ("name".equals(field.getTagName()))
- attr = ((Text) field.getFirstChild()).getData();
- if ("value".equals(field.getTagName()) && field.hasChildNodes())
- value = ((Text) field.getFirstChild()).getData();
- }
- if (attr != null && value != null)
- newConf.set(attr, value);
- }
-
- this.conf = newConf;
- return true;
- }
-
- /**
- * Sets a Hadoop configuration property value
- *
- * @param prop the property
- * @param propvalue the property value
- */
- public void setConfProp(ConfProp prop, String propValue) {
- prop.set(conf, propValue);
- }
-
- /**
- * Sets a Hadoop configuration property value
- *
- * @param propName the property name
- * @param propValue the property value
- */
- public void setConfProp(String propName, String propValue) {
- this.conf.set(propName, propValue);
- }
-
- public void setLocationName(String newName) {
- ConfProp.PI_LOCATION_NAME.set(conf, newName);
- }
-
- /**
- * Write this location settings to the given output stream
- *
- * @param out the output stream
- * @throws IOException
- */
- public void storeSettingsToFile(File file) throws IOException {
- FileOutputStream fos = new FileOutputStream(file);
- try {
- this.conf.writeXml(fos);
- fos.close();
- fos = null;
- } finally {
- IOUtils.closeStream(fos);
- }
-
- }
-
- /* @inheritDoc */
- @Override
- public String toString() {
- return this.getLocationName();
- }
-
- /**
- * Fill the configuration with valid default values
- */
- private void addPluginConfigDefaultProperties() {
- for (ConfProp prop : ConfProp.values()) {
- if (conf.get(prop.name) == null)
- conf.set(prop.name, prop.defVal);
- }
- }
-
- /**
- * Starts the location status updater
- */
- private synchronized void startStatusUpdater() {
- if (statusUpdater == null) {
- statusUpdater = new LocationStatusUpdater();
- statusUpdater.schedule();
- }
- }
-
- /*
- * Rewrite of the connecting and tunneling to the Hadoop location
- */
-
- /**
- * Provides access to the default file system of this location.
- *
- * @return a {@link FileSystem}
- */
- public FileSystem getDFS() throws IOException {
- return FileSystem.get(this.conf);
- }
-
- /**
- * Provides access to the Job tracking system of this location
- *
- * @return a {@link JobClient}
- */
- public JobClient getJobClient() throws IOException {
- JobConf jconf = new JobConf(this.conf);
- return new JobClient(jconf);
- }
-
- /*
- * Listeners handling
- */
-
- protected void fireJarPublishDone(JarModule jar) {
- for (IJobListener listener : jobListeners) {
- listener.publishDone(jar);
- }
- }
-
- protected void fireJarPublishStart(JarModule jar) {
- for (IJobListener listener : jobListeners) {
- listener.publishStart(jar);
- }
- }
-
- protected void fireJobAdded(HadoopJob job) {
- for (IJobListener listener : jobListeners) {
- listener.jobAdded(job);
- }
- }
-
- protected void fireJobRemoved(HadoopJob job) {
- for (IJobListener listener : jobListeners) {
- listener.jobRemoved(job);
- }
- }
-
- protected void fireJobChanged(HadoopJob job) {
- for (IJobListener listener : jobListeners) {
- listener.jobChanged(job);
- }
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java
deleted file mode 100644
index 1668e29622e..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.server;
-
-/**
- * Interface for updating/adding jobs to the MapReduce Server view.
- */
-public interface IJobListener {
-
- void jobChanged(HadoopJob job);
-
- void jobAdded(HadoopJob job);
-
- void jobRemoved(HadoopJob job);
-
- void publishStart(JarModule jar);
-
- void publishDone(JarModule jar);
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java
deleted file mode 100644
index 828e205cefe..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.server;
-
-import java.io.File;
-import java.util.logging.Logger;
-
-import org.apache.hadoop.eclipse.Activator;
-import org.apache.hadoop.eclipse.ErrorMessageDialog;
-import org.eclipse.core.resources.IResource;
-import org.eclipse.core.runtime.IProgressMonitor;
-import org.eclipse.core.runtime.Path;
-import org.eclipse.jdt.core.ICompilationUnit;
-import org.eclipse.jdt.core.IJavaElement;
-import org.eclipse.jdt.core.IType;
-import org.eclipse.jdt.ui.jarpackager.IJarExportRunnable;
-import org.eclipse.jdt.ui.jarpackager.JarPackageData;
-import org.eclipse.jface.operation.IRunnableWithProgress;
-import org.eclipse.swt.widgets.Display;
-import org.eclipse.ui.PlatformUI;
-
-/**
- * Methods for interacting with the jar file containing the
- * Mapper/Reducer/Driver classes for a MapReduce job.
- */
-
-public class JarModule implements IRunnableWithProgress {
-
- static Logger log = Logger.getLogger(JarModule.class.getName());
-
- private IResource resource;
-
- private File jarFile;
-
- public JarModule(IResource resource) {
- this.resource = resource;
- }
-
- public String getName() {
- return resource.getProject().getName() + "/" + resource.getName();
- }
-
- /**
- * Creates a JAR file containing the given resource (Java class with
- * main()) and all associated resources
- *
- * @param resource the resource
- * @return a file designing the created package
- */
- public void run(IProgressMonitor monitor) {
-
- log.fine("Build jar");
- JarPackageData jarrer = new JarPackageData();
-
- jarrer.setExportJavaFiles(true);
- jarrer.setExportClassFiles(true);
- jarrer.setExportOutputFolders(true);
- jarrer.setOverwrite(true);
-
- try {
- // IJavaProject project =
- // (IJavaProject) resource.getProject().getNature(JavaCore.NATURE_ID);
-
- // check this is the case before letting this method get called
- Object element = resource.getAdapter(IJavaElement.class);
- IType type = ((ICompilationUnit) element).findPrimaryType();
- jarrer.setManifestMainClass(type);
-
- // Create a temporary JAR file name
- File baseDir = Activator.getDefault().getStateLocation().toFile();
-
- String prefix =
- String.format("%s_%s-", resource.getProject().getName(), resource
- .getName());
- File jarFile = File.createTempFile(prefix, ".jar", baseDir);
- jarrer.setJarLocation(new Path(jarFile.getAbsolutePath()));
-
- jarrer.setElements(resource.getProject().members(IResource.FILE));
- IJarExportRunnable runnable =
- jarrer.createJarExportRunnable(Display.getDefault()
- .getActiveShell());
- runnable.run(monitor);
-
- this.jarFile = jarFile;
-
- } catch (Exception e) {
- e.printStackTrace();
- throw new RuntimeException(e);
- }
- }
-
- /**
- * Allow the retrieval of the resulting JAR file
- *
- * @return the generated JAR file
- */
- public File getJarFile() {
- return this.jarFile;
- }
-
- /**
- * Static way to create a JAR package for the given resource and showing a
- * progress bar
- *
- * @param resource
- * @return
- */
- public static File createJarPackage(IResource resource) {
-
- JarModule jarModule = new JarModule(resource);
- try {
- PlatformUI.getWorkbench().getProgressService().run(false, true,
- jarModule);
-
- } catch (Exception e) {
- e.printStackTrace();
- return null;
- }
-
- File jarFile = jarModule.getJarFile();
- if (jarFile == null) {
- ErrorMessageDialog.display("Run on Hadoop",
- "Unable to create or locate the JAR file for the Job");
- return null;
- }
-
- return jarFile;
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopLocationWizard.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopLocationWizard.java
deleted file mode 100644
index 8fdd19b005b..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopLocationWizard.java
+++ /dev/null
@@ -1,972 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.servers;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.Map.Entry;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.eclipse.server.ConfProp;
-import org.apache.hadoop.eclipse.server.HadoopServer;
-import org.eclipse.jface.dialogs.IMessageProvider;
-import org.eclipse.jface.wizard.WizardPage;
-import org.eclipse.swt.SWT;
-import org.eclipse.swt.custom.ScrolledComposite;
-import org.eclipse.swt.events.ModifyEvent;
-import org.eclipse.swt.events.ModifyListener;
-import org.eclipse.swt.events.SelectionEvent;
-import org.eclipse.swt.events.SelectionListener;
-import org.eclipse.swt.graphics.Image;
-import org.eclipse.swt.layout.GridData;
-import org.eclipse.swt.layout.GridLayout;
-import org.eclipse.swt.widgets.Button;
-import org.eclipse.swt.widgets.Composite;
-import org.eclipse.swt.widgets.Control;
-import org.eclipse.swt.widgets.Display;
-import org.eclipse.swt.widgets.Event;
-import org.eclipse.swt.widgets.Group;
-import org.eclipse.swt.widgets.Label;
-import org.eclipse.swt.widgets.Listener;
-import org.eclipse.swt.widgets.TabFolder;
-import org.eclipse.swt.widgets.TabItem;
-import org.eclipse.swt.widgets.Text;
-
-/**
- * Wizard for editing the settings of a Hadoop location
- *
- * The wizard contains 3 tabs: General, Tunneling and Advanced. It edits
- * parameters of the location member which either a new location or a copy of
- * an existing registered location.
- */
-
-public class HadoopLocationWizard extends WizardPage {
-
- Image circle;
-
- /**
- * The location effectively edited by the wizard. This location is a copy
- * or a new one.
- */
- private HadoopServer location;
-
- /**
- * The original location being edited by the wizard (null if we create a
- * new instance).
- */
- private HadoopServer original;
-
- /**
- * New Hadoop location wizard
- */
- public HadoopLocationWizard() {
- super("Hadoop Server", "New Hadoop Location", null);
-
- this.original = null;
- this.location = new HadoopServer();
- this.location.setLocationName("");
- }
-
- /**
- * Constructor to edit the parameters of an existing Hadoop server
- *
- * @param server
- */
- public HadoopLocationWizard(HadoopServer server) {
- super("Create a new Hadoop location", "Edit Hadoop Location", null);
-
- this.original = server;
- this.location = new HadoopServer(server);
- }
-
- /**
- * Performs any actions appropriate in response to the user having pressed
- * the Finish button, or refuse if finishing now is not permitted.
- *
- * @return the created or updated Hadoop location
- */
-
- public HadoopServer performFinish() {
- try {
- if (this.original == null) {
- // New location
- Display.getDefault().syncExec(new Runnable() {
- public void run() {
- ServerRegistry.getInstance().addServer(
- HadoopLocationWizard.this.location);
- }
- });
- return this.location;
-
- } else {
- // Update location
- final String originalName = this.original.getLocationName();
- this.original.load(this.location);
-
- Display.getDefault().syncExec(new Runnable() {
- public void run() {
- ServerRegistry.getInstance().updateServer(originalName,
- HadoopLocationWizard.this.location);
- }
- });
- return this.original;
-
- }
- } catch (Exception e) {
- e.printStackTrace();
- setMessage("Invalid server location values", IMessageProvider.ERROR);
- return null;
- }
- }
-
- /**
- * Validates the current Hadoop location settings (look for Hadoop
- * installation directory).
- *
- */
- private void testLocation() {
- setMessage("Not implemented yet", IMessageProvider.WARNING);
- }
-
- /**
- * Location is not complete (and finish button not available) until a host
- * name is specified.
- *
- * @inheritDoc
- */
- @Override
- public boolean isPageComplete() {
-
- {
- String locName = location.getConfProp(ConfProp.PI_LOCATION_NAME);
- if ((locName == null) || (locName.length() == 0)
- || locName.contains("/")) {
-
- setMessage("Bad location name: "
- + "the location name should not contain "
- + "any character prohibited in a file name.", WARNING);
-
- return false;
- }
- }
-
- {
- String master = location.getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
- if ((master == null) || (master.length() == 0)) {
-
- setMessage("Bad master host name: "
- + "the master host name refers to the machine "
- + "that runs the Job tracker.", WARNING);
-
- return false;
- }
- }
-
- {
- String jobTracker = location.getConfProp(ConfProp.JOB_TRACKER_URI);
- String[] strs = jobTracker.split(":");
- boolean ok = (strs.length == 2);
- if (ok) {
- try {
- int port = Integer.parseInt(strs[1]);
- ok = (port >= 0) && (port < 65536);
- } catch (NumberFormatException nfe) {
- ok = false;
- }
- }
- if (!ok) {
- setMessage("The job tracker information ("
- + ConfProp.JOB_TRACKER_URI.name + ") is invalid. "
- + "This usually looks like \"host:port\"", WARNING);
- return false;
- }
- }
-
- {
- String fsDefaultURI = location.getConfProp(ConfProp.FS_DEFAULT_URI);
- try {
- URI uri = new URI(fsDefaultURI);
- } catch (URISyntaxException e) {
-
- setMessage("The default file system URI is invalid. "
- + "This usually looks like \"hdfs://host:port/\" "
- + "or \"file:///dir/\"", WARNING);
- }
- }
-
- setMessage("Define the location of a Hadoop infrastructure "
- + "for running MapReduce applications.");
- return true;
- }
-
- /**
- * Create the wizard
- */
- /* @inheritDoc */
- public void createControl(Composite parent) {
- setTitle("Define Hadoop location");
- setDescription("Define the location of a Hadoop infrastructure "
- + "for running MapReduce applications.");
-
- Composite panel = new Composite(parent, SWT.FILL);
- GridLayout glayout = new GridLayout(2, false);
- panel.setLayout(glayout);
-
- TabMediator mediator = new TabMediator(panel);
- {
- GridData gdata = new GridData(GridData.FILL_BOTH);
- gdata.horizontalSpan = 2;
- mediator.folder.setLayoutData(gdata);
- }
- this.setControl(panel /* mediator.folder */);
- {
- final Button btn = new Button(panel, SWT.NONE);
- btn.setText("&Load from file");
- btn.setEnabled(false);
- btn.setToolTipText("Not yet implemented");
- btn.addListener(SWT.Selection, new Listener() {
- public void handleEvent(Event e) {
- // TODO
- }
- });
- }
- {
- final Button validate = new Button(panel, SWT.NONE);
- validate.setText("&Validate location");
- validate.setEnabled(false);
- validate.setToolTipText("Not yet implemented");
- validate.addListener(SWT.Selection, new Listener() {
- public void handleEvent(Event e) {
- testLocation();
- }
- });
- }
- }
-
- private interface TabListener {
- void notifyChange(ConfProp prop, String propValue);
- }
-
- /*
- * Mediator pattern to keep tabs synchronized with each other and with the
- * location state.
- */
-
- private class TabMediator {
- TabFolder folder;
-
- private Set tabs = new HashSet();
-
- TabMediator(Composite parent) {
- folder = new TabFolder(parent, SWT.NONE);
- tabs.add(new TabMain(this));
- tabs.add(new TabAdvanced(this));
- }
-
- /**
- * Access to current configuration settings
- *
- * @param propName the property name
- * @return the current property value
- */
- String get(String propName) {
- return location.getConfProp(propName);
- }
-
- String get(ConfProp prop) {
- return location.getConfProp(prop);
- }
-
- /**
- * Implements change notifications from any tab: update the location
- * state and other tabs
- *
- * @param source origin of the notification (one of the tree tabs)
- * @param propName modified property
- * @param propValue new value
- */
- void notifyChange(TabListener source, final ConfProp prop,
- final String propValue) {
- // Ignore notification when no change
- String oldValue = location.getConfProp(prop);
- if ((oldValue != null) && oldValue.equals(propValue))
- return;
-
- location.setConfProp(prop, propValue);
- Display.getDefault().syncExec(new Runnable() {
- public void run() {
- getContainer().updateButtons();
- }
- });
-
- this.fireChange(source, prop, propValue);
-
- /*
- * Now we deal with dependencies between settings
- */
- final String jobTrackerHost =
- location.getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
- final String jobTrackerPort =
- location.getConfProp(ConfProp.PI_JOB_TRACKER_PORT);
- final String nameNodeHost =
- location.getConfProp(ConfProp.PI_NAME_NODE_HOST);
- final String nameNodePort =
- location.getConfProp(ConfProp.PI_NAME_NODE_PORT);
- final boolean colocate =
- location.getConfProp(ConfProp.PI_COLOCATE_MASTERS)
- .equalsIgnoreCase("yes");
- final String jobTrackerURI =
- location.getConfProp(ConfProp.JOB_TRACKER_URI);
- final String fsDefaultURI =
- location.getConfProp(ConfProp.FS_DEFAULT_URI);
- final String socksServerURI =
- location.getConfProp(ConfProp.SOCKS_SERVER);
- final boolean socksProxyEnable =
- location.getConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE)
- .equalsIgnoreCase("yes");
- final String socksProxyHost =
- location.getConfProp(ConfProp.PI_SOCKS_PROXY_HOST);
- final String socksProxyPort =
- location.getConfProp(ConfProp.PI_SOCKS_PROXY_PORT);
-
- Display.getDefault().syncExec(new Runnable() {
- public void run() {
- switch (prop) {
- case PI_JOB_TRACKER_HOST: {
- if (colocate)
- notifyChange(null, ConfProp.PI_NAME_NODE_HOST,
- jobTrackerHost);
- String newJobTrackerURI =
- String.format("%s:%s", jobTrackerHost, jobTrackerPort);
- notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
- break;
- }
- case PI_JOB_TRACKER_PORT: {
- String newJobTrackerURI =
- String.format("%s:%s", jobTrackerHost, jobTrackerPort);
- notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
- break;
- }
- case PI_NAME_NODE_HOST: {
- String newHDFSURI =
- String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
- notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
-
- // Break colocation if someone force the DFS Master
- if (!colocate && !nameNodeHost.equals(jobTrackerHost))
- notifyChange(null, ConfProp.PI_COLOCATE_MASTERS, "no");
- break;
- }
- case PI_NAME_NODE_PORT: {
- String newHDFSURI =
- String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
- notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
- break;
- }
- case PI_SOCKS_PROXY_HOST: {
- String newSocksProxyURI =
- String.format("%s:%s", socksProxyHost, socksProxyPort);
- notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
- break;
- }
- case PI_SOCKS_PROXY_PORT: {
- String newSocksProxyURI =
- String.format("%s:%s", socksProxyHost, socksProxyPort);
- notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
- break;
- }
- case JOB_TRACKER_URI: {
- String[] strs = jobTrackerURI.split(":", 2);
- String host = strs[0];
- String port = (strs.length == 2) ? strs[1] : "";
- notifyChange(null, ConfProp.PI_JOB_TRACKER_HOST, host);
- notifyChange(null, ConfProp.PI_JOB_TRACKER_PORT, port);
- break;
- }
- case FS_DEFAULT_URI: {
- try {
- URI uri = new URI(fsDefaultURI);
- if (uri.getScheme().equals("hdfs")) {
- String host = uri.getHost();
- String port = Integer.toString(uri.getPort());
- notifyChange(null, ConfProp.PI_NAME_NODE_HOST, host);
- notifyChange(null, ConfProp.PI_NAME_NODE_PORT, port);
- }
- } catch (URISyntaxException use) {
- // Ignore the update!
- }
- break;
- }
- case SOCKS_SERVER: {
- String[] strs = socksServerURI.split(":", 2);
- String host = strs[0];
- String port = (strs.length == 2) ? strs[1] : "";
- notifyChange(null, ConfProp.PI_SOCKS_PROXY_HOST, host);
- notifyChange(null, ConfProp.PI_SOCKS_PROXY_PORT, port);
- break;
- }
- case PI_COLOCATE_MASTERS: {
- if (colocate)
- notifyChange(null, ConfProp.PI_NAME_NODE_HOST,
- jobTrackerHost);
- break;
- }
- case PI_SOCKS_PROXY_ENABLE: {
- if (socksProxyEnable) {
- notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT,
- "org.apache.hadoop.net.SocksSocketFactory");
- } else {
- notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT,
- "org.apache.hadoop.net.StandardSocketFactory");
- }
- break;
- }
- }
- }
- });
-
- }
-
- /**
- * Change notifications on properties (by name). A property might not be
- * reflected as a ConfProp enum. If it is, the notification is forwarded
- * to the ConfProp notifyChange method. If not, it is processed here.
- *
- * @param source
- * @param propName
- * @param propValue
- */
- void notifyChange(TabListener source, String propName, String propValue) {
-
- ConfProp prop = ConfProp.getByName(propName);
- if (prop != null)
- notifyChange(source, prop, propValue);
-
- location.setConfProp(propName, propValue);
- }
-
- /**
- * Broadcast a property change to all registered tabs. If a tab is
- * identified as the source of the change, this tab will not be notified.
- *
- * @param source TODO
- * @param prop
- * @param value
- */
- private void fireChange(TabListener source, ConfProp prop, String value) {
- for (TabListener tab : tabs) {
- if (tab != source)
- tab.notifyChange(prop, value);
- }
- }
-
- }
-
- /**
- * Create a SWT Text component for the given {@link ConfProp} text
- * configuration property.
- *
- * @param listener
- * @param parent
- * @param prop
- * @return
- */
- private Text createConfText(ModifyListener listener, Composite parent,
- ConfProp prop) {
-
- Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
- GridData data = new GridData(GridData.FILL_HORIZONTAL);
- text.setLayoutData(data);
- text.setData("hProp", prop);
- text.setText(location.getConfProp(prop));
- text.addModifyListener(listener);
-
- return text;
- }
-
- /**
- * Create a SWT Checked Button component for the given {@link ConfProp}
- * boolean configuration property.
- *
- * @param listener
- * @param parent
- * @param prop
- * @return
- */
- private Button createConfCheckButton(SelectionListener listener,
- Composite parent, ConfProp prop, String text) {
-
- Button button = new Button(parent, SWT.CHECK);
- button.setText(text);
- button.setData("hProp", prop);
- button.setSelection(location.getConfProp(prop).equalsIgnoreCase("yes"));
- button.addSelectionListener(listener);
-
- return button;
- }
-
- /**
- * Create editor entry for the given configuration property. The editor is
- * a couple (Label, Text).
- *
- * @param listener the listener to trigger on property change
- * @param parent the SWT parent container
- * @param prop the property to create an editor for
- * @param labelText a label (null will defaults to the property name)
- *
- * @return a SWT Text field
- */
- private Text createConfLabelText(ModifyListener listener,
- Composite parent, ConfProp prop, String labelText) {
-
- Label label = new Label(parent, SWT.NONE);
- if (labelText == null)
- labelText = prop.name;
- label.setText(labelText);
-
- return createConfText(listener, parent, prop);
- }
-
- /**
- * Create an editor entry for the given configuration name
- *
- * @param listener the listener to trigger on property change
- * @param parent the SWT parent container
- * @param propName the name of the property to create an editor for
- * @param labelText a label (null will defaults to the property name)
- *
- * @return a SWT Text field
- */
- private Text createConfNameEditor(ModifyListener listener,
- Composite parent, String propName, String labelText) {
-
- {
- ConfProp prop = ConfProp.getByName(propName);
- if (prop != null)
- return createConfLabelText(listener, parent, prop, labelText);
- }
-
- Label label = new Label(parent, SWT.NONE);
- if (labelText == null)
- labelText = propName;
- label.setText(labelText);
-
- Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
- GridData data = new GridData(GridData.FILL_HORIZONTAL);
- text.setLayoutData(data);
- text.setData("hPropName", propName);
- text.setText(location.getConfProp(propName));
- text.addModifyListener(listener);
-
- return text;
- }
-
- /**
- * Main parameters of the Hadoop location:
- * host and port of the Map/Reduce master (Job tracker)
- * host and port of the DFS master (Name node)
- * SOCKS proxy
- */
- private class TabMain implements TabListener, ModifyListener,
- SelectionListener {
-
- TabMediator mediator;
-
- Text locationName;
-
- Text textJTHost;
-
- Text textNNHost;
-
- Button colocateMasters;
-
- Text textJTPort;
-
- Text textNNPort;
-
- Text userName;
-
- Button useSocksProxy;
-
- Text socksProxyHost;
-
- Text socksProxyPort;
-
- TabMain(TabMediator mediator) {
- this.mediator = mediator;
- TabItem tab = new TabItem(mediator.folder, SWT.NONE);
- tab.setText("General");
- tab.setToolTipText("General location parameters");
- tab.setImage(circle);
- tab.setControl(createControl(mediator.folder));
- }
-
- private Control createControl(Composite parent) {
-
- Composite panel = new Composite(parent, SWT.FILL);
- panel.setLayout(new GridLayout(2, false));
-
- GridData data;
-
- /*
- * Location name
- */
- {
- Composite subpanel = new Composite(panel, SWT.FILL);
- subpanel.setLayout(new GridLayout(2, false));
- data = new GridData();
- data.horizontalSpan = 2;
- data.horizontalAlignment = SWT.FILL;
- subpanel.setLayoutData(data);
-
- locationName =
- createConfLabelText(this, subpanel, ConfProp.PI_LOCATION_NAME,
- "&Location name:");
- }
-
- /*
- * Map/Reduce group
- */
- {
- Group groupMR = new Group(panel, SWT.SHADOW_NONE);
- groupMR.setText("Map/Reduce Master");
- groupMR.setToolTipText("Address of the Map/Reduce master node "
- + "(the Job Tracker).");
- GridLayout layout = new GridLayout(2, false);
- groupMR.setLayout(layout);
- data = new GridData();
- data.verticalAlignment = SWT.FILL;
- data.horizontalAlignment = SWT.CENTER;
- data.widthHint = 250;
- groupMR.setLayoutData(data);
-
- // Job Tracker host
- Label label = new Label(groupMR, SWT.NONE);
- label.setText("Host:");
- data =
- new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
- label.setLayoutData(data);
-
- textJTHost =
- createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_HOST);
- data = new GridData(GridData.FILL, GridData.CENTER, true, true);
- textJTHost.setLayoutData(data);
-
- // Job Tracker port
- label = new Label(groupMR, SWT.NONE);
- label.setText("Port:");
- data =
- new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
- label.setLayoutData(data);
-
- textJTPort =
- createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_PORT);
- data = new GridData(GridData.FILL, GridData.CENTER, true, true);
- textJTPort.setLayoutData(data);
- }
-
- /*
- * DFS group
- */
- {
- Group groupDFS = new Group(panel, SWT.SHADOW_NONE);
- groupDFS.setText("DFS Master");
- groupDFS.setToolTipText("Address of the Distributed FileSystem "
- + "master node (the Name Node).");
- GridLayout layout = new GridLayout(2, false);
- groupDFS.setLayout(layout);
- data = new GridData();
- data.horizontalAlignment = SWT.CENTER;
- data.widthHint = 250;
- groupDFS.setLayoutData(data);
-
- colocateMasters =
- createConfCheckButton(this, groupDFS,
- ConfProp.PI_COLOCATE_MASTERS, "Use M/R Master host");
- data = new GridData();
- data.horizontalSpan = 2;
- colocateMasters.setLayoutData(data);
-
- // Job Tracker host
- Label label = new Label(groupDFS, SWT.NONE);
- data = new GridData();
- label.setText("Host:");
- label.setLayoutData(data);
-
- textNNHost =
- createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_HOST);
-
- // Job Tracker port
- label = new Label(groupDFS, SWT.NONE);
- data = new GridData();
- label.setText("Port:");
- label.setLayoutData(data);
-
- textNNPort =
- createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_PORT);
- }
-
- {
- Composite subpanel = new Composite(panel, SWT.FILL);
- subpanel.setLayout(new GridLayout(2, false));
- data = new GridData();
- data.horizontalSpan = 2;
- data.horizontalAlignment = SWT.FILL;
- subpanel.setLayoutData(data);
-
- userName =
- createConfLabelText(this, subpanel, ConfProp.PI_USER_NAME,
- "&User name:");
- }
-
- // SOCKS proxy group
- {
- Group groupSOCKS = new Group(panel, SWT.SHADOW_NONE);
- groupSOCKS.setText("SOCKS proxy");
- groupSOCKS.setToolTipText("Address of the SOCKS proxy to use "
- + "to connect to the infrastructure.");
- GridLayout layout = new GridLayout(2, false);
- groupSOCKS.setLayout(layout);
- data = new GridData();
- data.horizontalAlignment = SWT.CENTER;
- data.horizontalSpan = 2;
- data.widthHint = 250;
- groupSOCKS.setLayoutData(data);
-
- useSocksProxy =
- createConfCheckButton(this, groupSOCKS,
- ConfProp.PI_SOCKS_PROXY_ENABLE, "Enable SOCKS proxy");
- data = new GridData();
- data.horizontalSpan = 2;
- useSocksProxy.setLayoutData(data);
-
- // SOCKS proxy host
- Label label = new Label(groupSOCKS, SWT.NONE);
- data = new GridData();
- label.setText("Host:");
- label.setLayoutData(data);
-
- socksProxyHost =
- createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_HOST);
-
- // SOCKS proxy port
- label = new Label(groupSOCKS, SWT.NONE);
- data = new GridData();
- label.setText("Port:");
- label.setLayoutData(data);
-
- socksProxyPort =
- createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_PORT);
- }
-
- // Update the state of all widgets according to the current values!
- reloadConfProp(ConfProp.PI_COLOCATE_MASTERS);
- reloadConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE);
- reloadConfProp(ConfProp.PI_JOB_TRACKER_HOST);
-
- return panel;
- }
-
- /**
- * Reload the given configuration property value
- *
- * @param prop
- */
- private void reloadConfProp(ConfProp prop) {
- this.notifyChange(prop, location.getConfProp(prop));
- }
-
- public void notifyChange(ConfProp prop, String propValue) {
- switch (prop) {
- case PI_JOB_TRACKER_HOST: {
- textJTHost.setText(propValue);
- break;
- }
- case PI_JOB_TRACKER_PORT: {
- textJTPort.setText(propValue);
- break;
- }
- case PI_LOCATION_NAME: {
- locationName.setText(propValue);
- break;
- }
- case PI_USER_NAME: {
- userName.setText(propValue);
- break;
- }
- case PI_COLOCATE_MASTERS: {
- if (colocateMasters != null) {
- boolean colocate = propValue.equalsIgnoreCase("yes");
- colocateMasters.setSelection(colocate);
- if (textNNHost != null) {
- textNNHost.setEnabled(!colocate);
- }
- }
- break;
- }
- case PI_NAME_NODE_HOST: {
- textNNHost.setText(propValue);
- break;
- }
- case PI_NAME_NODE_PORT: {
- textNNPort.setText(propValue);
- break;
- }
- case PI_SOCKS_PROXY_ENABLE: {
- if (useSocksProxy != null) {
- boolean useProxy = propValue.equalsIgnoreCase("yes");
- useSocksProxy.setSelection(useProxy);
- if (socksProxyHost != null)
- socksProxyHost.setEnabled(useProxy);
- if (socksProxyPort != null)
- socksProxyPort.setEnabled(useProxy);
- }
- break;
- }
- case PI_SOCKS_PROXY_HOST: {
- socksProxyHost.setText(propValue);
- break;
- }
- case PI_SOCKS_PROXY_PORT: {
- socksProxyPort.setText(propValue);
- break;
- }
- }
- }
-
- /* @inheritDoc */
- public void modifyText(ModifyEvent e) {
- final Text text = (Text) e.widget;
- final ConfProp prop = (ConfProp) text.getData("hProp");
- Display.getDefault().syncExec(new Runnable() {
- public void run() {
- mediator.notifyChange(TabMain.this, prop, text.getText());
- }
- });
- }
-
- /* @inheritDoc */
- public void widgetDefaultSelected(SelectionEvent e) {
- this.widgetSelected(e);
- }
-
- /* @inheritDoc */
- public void widgetSelected(SelectionEvent e) {
- final Button button = (Button) e.widget;
- final ConfProp prop = (ConfProp) button.getData("hProp");
-
- Display.getDefault().syncExec(new Runnable() {
- public void run() {
- // We want to receive the update also!
- mediator.notifyChange(null, prop, button.getSelection() ? "yes"
- : "no");
- }
- });
- }
-
- }
-
- private class TabAdvanced implements TabListener, ModifyListener {
- TabMediator mediator;
-
- private Composite panel;
-
- private Map textMap = new TreeMap();
-
- TabAdvanced(TabMediator mediator) {
- this.mediator = mediator;
- TabItem tab = new TabItem(mediator.folder, SWT.NONE);
- tab.setText("Advanced parameters");
- tab.setToolTipText("Access to advanced Hadoop parameters");
- tab.setImage(circle);
- tab.setControl(createControl(mediator.folder));
-
- }
-
- private Control createControl(Composite parent) {
- ScrolledComposite sc =
- new ScrolledComposite(parent, SWT.BORDER | SWT.H_SCROLL
- | SWT.V_SCROLL);
-
- panel = new Composite(sc, SWT.NONE);
- sc.setContent(panel);
-
- sc.setExpandHorizontal(true);
- sc.setExpandVertical(true);
-
- sc.setMinSize(640, 480);
-
- GridLayout layout = new GridLayout();
- layout.numColumns = 2;
- layout.makeColumnsEqualWidth = false;
- panel.setLayout(layout);
- panel.setLayoutData(new GridData(GridData.FILL, GridData.FILL, true,
- true, 1, 1));
-
- // Sort by property name
- Configuration config = location.getConfiguration();
- SortedMap map = new TreeMap();
- Iterator> it = config.iterator();
- while (it.hasNext()) {
- Entry entry = it.next();
- map.put(entry.getKey(), entry.getValue());
- }
-
- for (Entry entry : map.entrySet()) {
- Text text = createConfNameEditor(this, panel, entry.getKey(), null);
- textMap.put(entry.getKey(), text);
- }
-
- sc.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
-
- return sc;
- }
-
- public void notifyChange(ConfProp prop, final String propValue) {
- Text text = textMap.get(prop.name);
- text.setText(propValue);
- }
-
- public void modifyText(ModifyEvent e) {
- final Text text = (Text) e.widget;
- Object hProp = text.getData("hProp");
- final ConfProp prop = (hProp != null) ? (ConfProp) hProp : null;
- Object hPropName = text.getData("hPropName");
- final String propName =
- (hPropName != null) ? (String) hPropName : null;
-
- Display.getDefault().syncExec(new Runnable() {
- public void run() {
- if (prop != null)
- mediator.notifyChange(TabAdvanced.this, prop, text.getText());
- else
- mediator
- .notifyChange(TabAdvanced.this, propName, text.getText());
- }
- });
- }
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java
deleted file mode 100644
index 17f4acecf40..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.servers;
-
-import org.apache.hadoop.eclipse.server.HadoopServer;
-import org.eclipse.jface.viewers.IContentProvider;
-import org.eclipse.jface.viewers.ILabelProviderListener;
-import org.eclipse.jface.viewers.IStructuredContentProvider;
-import org.eclipse.jface.viewers.ITableLabelProvider;
-import org.eclipse.jface.viewers.Viewer;
-import org.eclipse.swt.graphics.Image;
-
-/**
- * Provider that enables selection of a predefined Hadoop server.
- */
-
-public class HadoopServerSelectionListContentProvider implements
- IContentProvider, ITableLabelProvider, IStructuredContentProvider {
- public void dispose() {
-
- }
-
- public void inputChanged(Viewer viewer, Object oldInput, Object newInput) {
-
- }
-
- public Image getColumnImage(Object element, int columnIndex) {
- return null;
- }
-
- public String getColumnText(Object element, int columnIndex) {
- if (element instanceof HadoopServer) {
- HadoopServer location = (HadoopServer) element;
- if (columnIndex == 0) {
- return location.getLocationName();
-
- } else if (columnIndex == 1) {
- return location.getMasterHostName();
- }
- }
-
- return element.toString();
- }
-
- public void addListener(ILabelProviderListener listener) {
-
- }
-
- public boolean isLabelProperty(Object element, String property) {
- return false;
- }
-
- public void removeListener(ILabelProviderListener listener) {
-
- }
-
- public Object[] getElements(Object inputElement) {
- return ServerRegistry.getInstance().getServers().toArray();
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/IHadoopServerListener.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/IHadoopServerListener.java
deleted file mode 100644
index 3c65173f9f3..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/IHadoopServerListener.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.servers;
-
-import org.apache.hadoop.eclipse.server.HadoopServer;
-
-/**
- * Interface for monitoring server changes
- */
-public interface IHadoopServerListener {
- void serverChanged(HadoopServer location, int type);
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java
deleted file mode 100644
index 2df29e9c16a..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java
+++ /dev/null
@@ -1,383 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.servers;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.eclipse.Activator;
-import org.apache.hadoop.eclipse.ErrorMessageDialog;
-import org.apache.hadoop.eclipse.server.HadoopServer;
-import org.apache.hadoop.eclipse.server.JarModule;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.mapred.JobConf;
-import org.eclipse.core.resources.IFile;
-import org.eclipse.core.runtime.CoreException;
-import org.eclipse.core.runtime.IPath;
-import org.eclipse.core.runtime.IProgressMonitor;
-import org.eclipse.core.runtime.Path;
-import org.eclipse.debug.core.ILaunchConfigurationWorkingCopy;
-import org.eclipse.jdt.launching.IJavaLaunchConfigurationConstants;
-import org.eclipse.jdt.launching.IRuntimeClasspathEntry;
-import org.eclipse.jdt.launching.JavaRuntime;
-import org.eclipse.jface.viewers.TableViewer;
-import org.eclipse.jface.wizard.Wizard;
-import org.eclipse.jface.wizard.WizardPage;
-import org.eclipse.swt.SWT;
-import org.eclipse.swt.events.SelectionEvent;
-import org.eclipse.swt.events.SelectionListener;
-import org.eclipse.swt.layout.FillLayout;
-import org.eclipse.swt.layout.GridData;
-import org.eclipse.swt.layout.GridLayout;
-import org.eclipse.swt.widgets.Button;
-import org.eclipse.swt.widgets.Composite;
-import org.eclipse.swt.widgets.Label;
-import org.eclipse.swt.widgets.Table;
-import org.eclipse.swt.widgets.TableColumn;
-import org.eclipse.swt.widgets.Text;
-
-/**
- * Wizard for publishing a job to a Hadoop server.
- */
-
-public class RunOnHadoopWizard extends Wizard {
-
- private MainWizardPage mainPage;
-
- private HadoopLocationWizard createNewPage;
-
- /**
- * The file resource (containing a main()) to run on the Hadoop location
- */
- private IFile resource;
-
- /**
- * The launch configuration to update
- */
- private ILaunchConfigurationWorkingCopy iConf;
-
- private IProgressMonitor progressMonitor;
-
- public RunOnHadoopWizard(IFile resource,
- ILaunchConfigurationWorkingCopy iConf) {
- this.resource = resource;
- this.iConf = iConf;
- setForcePreviousAndNextButtons(true);
- setNeedsProgressMonitor(true);
- setWindowTitle("Run on Hadoop");
- }
-
- /**
- * This wizard contains 2 pages:
- * the first one lets the user choose an already existing location
- * the second one allows the user to create a new location, in case it
- * does not already exist
- */
- /* @inheritDoc */
- @Override
- public void addPages() {
- addPage(this.mainPage = new MainWizardPage());
- addPage(this.createNewPage = new HadoopLocationWizard());
- }
-
- /**
- * Performs any actions appropriate in response to the user having pressed
- * the Finish button, or refuse if finishing now is not permitted.
- */
- /* @inheritDoc */
- @Override
- public boolean performFinish() {
-
- /*
- * Create a new location or get an existing one
- */
- HadoopServer location = null;
- if (mainPage.createNew.getSelection()) {
- location = createNewPage.performFinish();
-
- } else if (mainPage.table.getSelection().length == 1) {
- location = (HadoopServer) mainPage.table.getSelection()[0].getData();
- }
-
- if (location == null)
- return false;
-
- /*
- * Get the base directory of the plug-in for storing configurations and
- * JARs
- */
- File baseDir = Activator.getDefault().getStateLocation().toFile();
-
- // Package the Job into a JAR
- File jarFile = JarModule.createJarPackage(resource);
- if (jarFile == null) {
- ErrorMessageDialog.display("Run on Hadoop",
- "Unable to create or locate the JAR file for the Job");
- return false;
- }
-
- /*
- * Generate a temporary Hadoop configuration directory and add it to the
- * classpath of the launch configuration
- */
-
- File confDir;
- try {
- confDir = File.createTempFile("hadoop-conf-", "", baseDir);
- confDir.delete();
- confDir.mkdirs();
- if (!confDir.isDirectory()) {
- ErrorMessageDialog.display("Run on Hadoop",
- "Cannot create temporary directory: " + confDir);
- return false;
- }
- } catch (IOException ioe) {
- ioe.printStackTrace();
- return false;
- }
-
- // Prepare the Hadoop configuration
- JobConf conf = new JobConf(location.getConfiguration());
- conf.setJar(jarFile.getAbsolutePath());
-
- // Write it to the disk file
- try {
- // File confFile = File.createTempFile("core-site-", ".xml",
- // confDir);
- File confFile = new File(confDir, "core-site.xml");
- FileOutputStream fos = new FileOutputStream(confFile);
- try {
- conf.writeXml(fos);
- fos.close();
- fos = null;
- } finally {
- IOUtils.closeStream(fos);
- }
-
- } catch (IOException ioe) {
- ioe.printStackTrace();
- return false;
- }
-
- // Setup the Launch class path
- List classPath;
- try {
- classPath =
- iConf.getAttribute(
- IJavaLaunchConfigurationConstants.ATTR_CLASSPATH,
- new ArrayList());
- IPath confIPath = new Path(confDir.getAbsolutePath());
- IRuntimeClasspathEntry cpEntry =
- JavaRuntime.newArchiveRuntimeClasspathEntry(confIPath);
- classPath.add(0, cpEntry.getMemento());
- iConf.setAttribute(IJavaLaunchConfigurationConstants.ATTR_CLASSPATH,
- classPath);
- iConf.setAttribute(IJavaLaunchConfigurationConstants.ATTR_PROGRAM_ARGUMENTS, mainPage.argumentsText.getText());
-
- } catch (CoreException e) {
- e.printStackTrace();
- return false;
- }
-
- // location.runResource(resource, progressMonitor);
- return true;
- }
-
- private void refreshButtons() {
- getContainer().updateButtons();
- }
-
- /**
- * Allows finish when an existing server is selected or when a new server
- * location is defined
- */
- /* @inheritDoc */
- @Override
- public boolean canFinish() {
- if (mainPage != null)
- return mainPage.canFinish();
- return false;
- }
-
- /**
- * This is the main page of the wizard. It allows the user either to choose
- * an already existing location or to indicate he wants to create a new
- * location.
- */
- public class MainWizardPage extends WizardPage {
-
- private Button createNew;
-
- private Table table;
- private Text argumentsText;
-
- private Button chooseExisting;
-
- public MainWizardPage() {
- super("Select or define server to run on");
- setTitle("Select Hadoop location");
- setDescription("Select a Hadoop location to run on.");
- }
-
- /* @inheritDoc */
- @Override
- public boolean canFlipToNextPage() {
- return createNew.getSelection();
- }
-
- /* @inheritDoc */
- public void createControl(Composite parent) {
- Composite panel = new Composite(parent, SWT.NONE);
- panel.setLayout(new GridLayout(1, false));
-
- // Label
- Label label = new Label(panel, SWT.NONE);
- label.setText("Select a Hadoop Server to run on.");
- GridData gData = new GridData(GridData.FILL_BOTH);
- gData.grabExcessVerticalSpace = false;
- label.setLayoutData(gData);
-
- // Create location button
- createNew = new Button(panel, SWT.RADIO);
- createNew.setText("Define a new Hadoop server location");
- createNew.setLayoutData(gData);
- createNew.addSelectionListener(new SelectionListener() {
- public void widgetDefaultSelected(SelectionEvent e) {
- }
-
- public void widgetSelected(SelectionEvent e) {
- setPageComplete(true);
- RunOnHadoopWizard.this.refreshButtons();
- }
- });
- createNew.setSelection(true);
-
- // Select existing location button
- chooseExisting = new Button(panel, SWT.RADIO);
- chooseExisting
- .setText("Choose an existing server from the list below");
- chooseExisting.setLayoutData(gData);
- chooseExisting.addSelectionListener(new SelectionListener() {
- public void widgetDefaultSelected(SelectionEvent e) {
- }
-
- public void widgetSelected(SelectionEvent e) {
- if (chooseExisting.getSelection()
- && (table.getSelectionCount() == 0)) {
- if (table.getItems().length > 0) {
- table.setSelection(0);
- }
- }
- RunOnHadoopWizard.this.refreshButtons();
- }
- });
-
- // Table of existing locations
- Composite serverListPanel = new Composite(panel, SWT.FILL);
- gData = new GridData(GridData.FILL_BOTH);
- gData.horizontalSpan = 1;
- serverListPanel.setLayoutData(gData);
-
- FillLayout layout = new FillLayout();
- layout.marginHeight = layout.marginWidth = 12;
- serverListPanel.setLayout(layout);
-
- table =
- new Table(serverListPanel, SWT.BORDER | SWT.H_SCROLL
- | SWT.V_SCROLL | SWT.FULL_SELECTION);
- table.setHeaderVisible(true);
- table.setLinesVisible(true);
-
- TableColumn nameColumn = new TableColumn(table, SWT.LEFT);
- nameColumn.setText("Location");
- nameColumn.setWidth(450);
-
- TableColumn hostColumn = new TableColumn(table, SWT.LEFT);
- hostColumn.setText("Master host name");
- hostColumn.setWidth(250);
-
- // If the user select one entry, switch to "chooseExisting"
- table.addSelectionListener(new SelectionListener() {
- public void widgetDefaultSelected(SelectionEvent e) {
- }
-
- public void widgetSelected(SelectionEvent e) {
- chooseExisting.setSelection(true);
- createNew.setSelection(false);
- setPageComplete(table.getSelectionCount() == 1);
- RunOnHadoopWizard.this.refreshButtons();
- }
- });
-
- // Label
- Label argumentsLabel = new Label(panel, SWT.NONE);
- argumentsLabel.setText("Arguments:");
- GridData gDataArgumentsLabel = new GridData(GridData.FILL_BOTH);
- gDataArgumentsLabel.grabExcessVerticalSpace = false;
- argumentsLabel.setLayoutData(gDataArgumentsLabel);
-
- // Textbox
- argumentsText = new Text(panel, SWT.NONE);
- try {
- argumentsText.setText(iConf.getAttribute(IJavaLaunchConfigurationConstants.ATTR_PROGRAM_ARGUMENTS, ""));
- } catch (CoreException e1) {
- e1.printStackTrace();
- }
- GridData gDataArgumentsText = new GridData(GridData.FILL_BOTH);
- gDataArgumentsText.grabExcessVerticalSpace = false;
- argumentsText.setLayoutData(gDataArgumentsText);
-
-
- TableViewer viewer = new TableViewer(table);
- HadoopServerSelectionListContentProvider provider =
- new HadoopServerSelectionListContentProvider();
- viewer.setContentProvider(provider);
- viewer.setLabelProvider(provider);
- viewer.setInput(new Object());
- // don't care, get from singleton server registry
-
- this.setControl(panel);
- }
-
- /**
- * Returns whether this page state allows the Wizard to finish or not
- *
- * @return can the wizard finish or not?
- */
- public boolean canFinish() {
- if (!isControlCreated())
- return false;
-
- if (this.createNew.getSelection())
- return getNextPage().isPageComplete();
-
- return this.chooseExisting.getSelection();
- }
- }
-
- /**
- * @param progressMonitor
- */
- public void setProgressMonitor(IProgressMonitor progressMonitor) {
- this.progressMonitor = progressMonitor;
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/ServerRegistry.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/ServerRegistry.java
deleted file mode 100644
index 30a37cd439f..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/ServerRegistry.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.servers;
-
-import java.io.File;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.eclipse.Activator;
-import org.apache.hadoop.eclipse.server.HadoopServer;
-import org.eclipse.jface.dialogs.MessageDialog;
-
-/**
- * Register of Hadoop locations.
- *
- * Each location corresponds to a Hadoop {@link Configuration} stored as an
- * XML file in the workspace plug-in configuration directory:
- *
- *
- * <workspace-dir>/.metadata/.plugins/org.apache.hadoop.eclipse/locations/*.xml
- *
- *
- */
-public class ServerRegistry {
-
- private static final ServerRegistry INSTANCE = new ServerRegistry();
-
- public static final int SERVER_ADDED = 0;
-
- public static final int SERVER_REMOVED = 1;
-
- public static final int SERVER_STATE_CHANGED = 2;
-
- private final File baseDir =
- Activator.getDefault().getStateLocation().toFile();
-
- private final File saveDir = new File(baseDir, "locations");
-
- private ServerRegistry() {
- if (saveDir.exists() && !saveDir.isDirectory())
- saveDir.delete();
- if (!saveDir.exists())
- saveDir.mkdirs();
-
- load();
- }
-
- private Map servers;
-
- private Set listeners =
- new HashSet();
-
- public static ServerRegistry getInstance() {
- return INSTANCE;
- }
-
- public synchronized Collection getServers() {
- return Collections.unmodifiableCollection(servers.values());
- }
-
- /**
- * Load all available locations from the workspace configuration directory.
- */
- private synchronized void load() {
- Map map = new TreeMap();
- for (File file : saveDir.listFiles()) {
- try {
- HadoopServer server = new HadoopServer(file);
- map.put(server.getLocationName(), server);
-
- } catch (Exception exn) {
- System.err.println(exn);
- }
- }
- this.servers = map;
- }
-
- private synchronized void store() {
- try {
- File dir = File.createTempFile("locations", "new", baseDir);
- dir.delete();
- dir.mkdirs();
-
- for (HadoopServer server : servers.values()) {
- server.storeSettingsToFile(new File(dir, server.getLocationName()
- + ".xml"));
- }
-
- FilenameFilter XMLFilter = new FilenameFilter() {
- public boolean accept(File dir, String name) {
- String lower = name.toLowerCase();
- return lower.endsWith(".xml");
- }
- };
-
- File backup = new File(baseDir, "locations.backup");
- if (backup.exists()) {
- for (File file : backup.listFiles(XMLFilter))
- if (!file.delete())
- throw new IOException("Unable to delete backup location file: "
- + file);
- if (!backup.delete())
- throw new IOException(
- "Unable to delete backup location directory: " + backup);
- }
-
- saveDir.renameTo(backup);
- dir.renameTo(saveDir);
-
- } catch (IOException ioe) {
- ioe.printStackTrace();
- MessageDialog.openError(null,
- "Saving configuration of Hadoop locations failed", ioe.toString());
- }
- }
-
- public void dispose() {
- for (HadoopServer server : getServers()) {
- server.dispose();
- }
- }
-
- public synchronized HadoopServer getServer(String location) {
- return servers.get(location);
- }
-
- /*
- * HadoopServer map listeners
- */
-
- public void addListener(IHadoopServerListener l) {
- synchronized (listeners) {
- listeners.add(l);
- }
- }
-
- public void removeListener(IHadoopServerListener l) {
- synchronized (listeners) {
- listeners.remove(l);
- }
- }
-
- private void fireListeners(HadoopServer location, int kind) {
- synchronized (listeners) {
- for (IHadoopServerListener listener : listeners) {
- listener.serverChanged(location, kind);
- }
- }
- }
-
- public synchronized void removeServer(HadoopServer server) {
- this.servers.remove(server.getLocationName());
- store();
- fireListeners(server, SERVER_REMOVED);
- }
-
- public synchronized void addServer(HadoopServer server) {
- this.servers.put(server.getLocationName(), server);
- store();
- fireListeners(server, SERVER_ADDED);
- }
-
- /**
- * Update one Hadoop location
- *
- * @param originalName the original location name (might have changed)
- * @param server the location
- */
- public synchronized void updateServer(String originalName,
- HadoopServer server) {
-
- // Update the map if the location name has changed
- if (!server.getLocationName().equals(originalName)) {
- servers.remove(originalName);
- servers.put(server.getLocationName(), server);
- }
- store();
- fireListeners(server, SERVER_STATE_CHANGED);
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/view/servers/ServerView.java b/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/view/servers/ServerView.java
deleted file mode 100644
index 389d92e06ef..00000000000
--- a/hadoop-mapreduce-project/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/view/servers/ServerView.java
+++ /dev/null
@@ -1,460 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.eclipse.view.servers;
-
-import java.util.Collection;
-
-import org.apache.hadoop.eclipse.ImageLibrary;
-import org.apache.hadoop.eclipse.actions.EditLocationAction;
-import org.apache.hadoop.eclipse.actions.NewLocationAction;
-import org.apache.hadoop.eclipse.server.HadoopJob;
-import org.apache.hadoop.eclipse.server.HadoopServer;
-import org.apache.hadoop.eclipse.server.IJobListener;
-import org.apache.hadoop.eclipse.server.JarModule;
-import org.apache.hadoop.eclipse.servers.IHadoopServerListener;
-import org.apache.hadoop.eclipse.servers.ServerRegistry;
-import org.eclipse.jface.action.Action;
-import org.eclipse.jface.action.IAction;
-import org.eclipse.jface.action.IMenuListener;
-import org.eclipse.jface.action.IMenuManager;
-import org.eclipse.jface.action.MenuManager;
-import org.eclipse.jface.dialogs.MessageDialog;
-import org.eclipse.jface.viewers.ILabelProviderListener;
-import org.eclipse.jface.viewers.ISelection;
-import org.eclipse.jface.viewers.ISelectionChangedListener;
-import org.eclipse.jface.viewers.IStructuredSelection;
-import org.eclipse.jface.viewers.ITableLabelProvider;
-import org.eclipse.jface.viewers.ITreeContentProvider;
-import org.eclipse.jface.viewers.ITreeSelection;
-import org.eclipse.jface.viewers.SelectionChangedEvent;
-import org.eclipse.jface.viewers.TreeViewer;
-import org.eclipse.jface.viewers.Viewer;
-import org.eclipse.swt.SWT;
-import org.eclipse.swt.graphics.Image;
-import org.eclipse.swt.layout.GridData;
-import org.eclipse.swt.widgets.Composite;
-import org.eclipse.swt.widgets.Display;
-import org.eclipse.swt.widgets.Menu;
-import org.eclipse.swt.widgets.Tree;
-import org.eclipse.swt.widgets.TreeColumn;
-import org.eclipse.ui.IViewSite;
-import org.eclipse.ui.PartInitException;
-import org.eclipse.ui.actions.ActionFactory;
-import org.eclipse.ui.part.ViewPart;
-
-/**
- * Map/Reduce locations view: displays all available Hadoop locations and the
- * Jobs running/finished on these locations
- */
-public class ServerView extends ViewPart implements ITreeContentProvider,
- ITableLabelProvider, IJobListener, IHadoopServerListener {
-
- /**
- * Deletion action: delete a Hadoop location, kill a running job or remove
- * a finished job entry
- */
- class DeleteAction extends Action {
-
- DeleteAction() {
- setText("Delete");
- setImageDescriptor(ImageLibrary.get("server.view.action.delete"));
- }
-
- /* @inheritDoc */
- @Override
- public void run() {
- ISelection selection =
- getViewSite().getSelectionProvider().getSelection();
- if ((selection != null) && (selection instanceof IStructuredSelection)) {
- Object selItem =
- ((IStructuredSelection) selection).getFirstElement();
-
- if (selItem instanceof HadoopServer) {
- HadoopServer location = (HadoopServer) selItem;
- if (MessageDialog.openConfirm(Display.getDefault()
- .getActiveShell(), "Confirm delete Hadoop location",
- "Do you really want to remove the Hadoop location: "
- + location.getLocationName())) {
- ServerRegistry.getInstance().removeServer(location);
- }
-
- } else if (selItem instanceof HadoopJob) {
-
- // kill the job
- HadoopJob job = (HadoopJob) selItem;
- if (job.isCompleted()) {
- // Job already finished, remove the entry
- job.getLocation().purgeJob(job);
-
- } else {
- // Job is running, kill the job?
- if (MessageDialog.openConfirm(Display.getDefault()
- .getActiveShell(), "Confirm kill running Job",
- "Do you really want to kill running Job: " + job.getJobID())) {
- job.kill();
- }
- }
- }
- }
- }
- }
-
- /**
- * This object is the root content for this content provider
- */
- private static final Object CONTENT_ROOT = new Object();
-
- private final IAction deleteAction = new DeleteAction();
-
- private final IAction editServerAction = new EditLocationAction(this);
-
- private final IAction newLocationAction = new NewLocationAction();
-
- private TreeViewer viewer;
-
- public ServerView() {
- }
-
- /* @inheritDoc */
- @Override
- public void init(IViewSite site) throws PartInitException {
- super.init(site);
- }
-
- /* @inheritDoc */
- @Override
- public void dispose() {
- ServerRegistry.getInstance().removeListener(this);
- }
-
- /**
- * Creates the columns for the view
- */
- @Override
- public void createPartControl(Composite parent) {
- Tree main =
- new Tree(parent, SWT.SINGLE | SWT.FULL_SELECTION | SWT.H_SCROLL
- | SWT.V_SCROLL);
- main.setHeaderVisible(true);
- main.setLinesVisible(false);
- main.setLayoutData(new GridData(GridData.FILL_BOTH));
-
- TreeColumn serverCol = new TreeColumn(main, SWT.SINGLE);
- serverCol.setText("Location");
- serverCol.setWidth(300);
- serverCol.setResizable(true);
-
- TreeColumn locationCol = new TreeColumn(main, SWT.SINGLE);
- locationCol.setText("Master node");
- locationCol.setWidth(185);
- locationCol.setResizable(true);
-
- TreeColumn stateCol = new TreeColumn(main, SWT.SINGLE);
- stateCol.setText("State");
- stateCol.setWidth(95);
- stateCol.setResizable(true);
-
- TreeColumn statusCol = new TreeColumn(main, SWT.SINGLE);
- statusCol.setText("Status");
- statusCol.setWidth(300);
- statusCol.setResizable(true);
-
- viewer = new TreeViewer(main);
- viewer.setContentProvider(this);
- viewer.setLabelProvider(this);
- viewer.setInput(CONTENT_ROOT); // don't care
-
- getViewSite().setSelectionProvider(viewer);
-
- getViewSite().getActionBars().setGlobalActionHandler(
- ActionFactory.DELETE.getId(), deleteAction);
- getViewSite().getActionBars().getToolBarManager().add(editServerAction);
- getViewSite().getActionBars().getToolBarManager().add(newLocationAction);
-
- createActions();
- createContextMenu();
- }
-
- /**
- * Actions
- */
- private void createActions() {
- /*
- * addItemAction = new Action("Add...") { public void run() { addItem(); } };
- * addItemAction.setImageDescriptor(ImageLibrary
- * .get("server.view.location.new"));
- */
- /*
- * deleteItemAction = new Action("Delete") { public void run() {
- * deleteItem(); } };
- * deleteItemAction.setImageDescriptor(getImageDescriptor("delete.gif"));
- *
- * selectAllAction = new Action("Select All") { public void run() {
- * selectAll(); } };
- */
- // Add selection listener.
- viewer.addSelectionChangedListener(new ISelectionChangedListener() {
- public void selectionChanged(SelectionChangedEvent event) {
- updateActionEnablement();
- }
- });
- }
-
- private void addItem() {
- System.out.printf("ADD ITEM\n");
- }
-
- private void updateActionEnablement() {
- IStructuredSelection sel = (IStructuredSelection) viewer.getSelection();
- // deleteItemAction.setEnabled(sel.size() > 0);
- }
-
- /**
- * Contextual menu
- */
- private void createContextMenu() {
- // Create menu manager.
- MenuManager menuMgr = new MenuManager();
- menuMgr.setRemoveAllWhenShown(true);
- menuMgr.addMenuListener(new IMenuListener() {
- public void menuAboutToShow(IMenuManager mgr) {
- fillContextMenu(mgr);
- }
- });
-
- // Create menu.
- Menu menu = menuMgr.createContextMenu(viewer.getControl());
- viewer.getControl().setMenu(menu);
-
- // Register menu for extension.
- getSite().registerContextMenu(menuMgr, viewer);
- }
-
- private void fillContextMenu(IMenuManager mgr) {
- mgr.add(newLocationAction);
- mgr.add(editServerAction);
- mgr.add(deleteAction);
- /*
- * mgr.add(new GroupMarker(IWorkbenchActionConstants.MB_ADDITIONS));
- * mgr.add(deleteItemAction); mgr.add(new Separator());
- * mgr.add(selectAllAction);
- */
- }
-
- /* @inheritDoc */
- @Override
- public void setFocus() {
-
- }
-
- /*
- * IHadoopServerListener implementation
- */
-
- /* @inheritDoc */
- public void serverChanged(HadoopServer location, int type) {
- Display.getDefault().syncExec(new Runnable() {
- public void run() {
- ServerView.this.viewer.refresh();
- }
- });
- }
-
- /*
- * IStructuredContentProvider implementation
- */
-
- /* @inheritDoc */
- public void inputChanged(final Viewer viewer, Object oldInput,
- Object newInput) {
- if (oldInput == CONTENT_ROOT)
- ServerRegistry.getInstance().removeListener(this);
- if (newInput == CONTENT_ROOT)
- ServerRegistry.getInstance().addListener(this);
- }
-
- /**
- * The root elements displayed by this view are the existing Hadoop
- * locations
- */
- /* @inheritDoc */
- public Object[] getElements(Object inputElement) {
- return ServerRegistry.getInstance().getServers().toArray();
- }
-
- /*
- * ITreeStructuredContentProvider implementation
- */
-
- /**
- * Each location contains a child entry for each job it runs.
- */
- /* @inheritDoc */
- public Object[] getChildren(Object parent) {
-
- if (parent instanceof HadoopServer) {
- HadoopServer location = (HadoopServer) parent;
- location.addJobListener(this);
- Collection jobs = location.getJobs();
- return jobs.toArray();
- }
-
- return null;
- }
-
- /* @inheritDoc */
- public Object getParent(Object element) {
- if (element instanceof HadoopServer) {
- return CONTENT_ROOT;
-
- } else if (element instanceof HadoopJob) {
- return ((HadoopJob) element).getLocation();
- }
-
- return null;
- }
-
- /* @inheritDoc */
- public boolean hasChildren(Object element) {
- /* Only server entries have children */
- return (element instanceof HadoopServer);
- }
-
- /*
- * ITableLabelProvider implementation
- */
-
- /* @inheritDoc */
- public void addListener(ILabelProviderListener listener) {
- // no listeners handling
- }
-
- public boolean isLabelProperty(Object element, String property) {
- return false;
- }
-
- /* @inheritDoc */
- public void removeListener(ILabelProviderListener listener) {
- // no listener handling
- }
-
- /* @inheritDoc */
- public Image getColumnImage(Object element, int columnIndex) {
- if ((columnIndex == 0) && (element instanceof HadoopServer)) {
- return ImageLibrary.getImage("server.view.location.entry");
-
- } else if ((columnIndex == 0) && (element instanceof HadoopJob)) {
- return ImageLibrary.getImage("server.view.job.entry");
- }
- return null;
- }
-
- /* @inheritDoc */
- public String getColumnText(Object element, int columnIndex) {
- if (element instanceof HadoopServer) {
- HadoopServer server = (HadoopServer) element;
-
- switch (columnIndex) {
- case 0:
- return server.getLocationName();
- case 1:
- return server.getMasterHostName().toString();
- case 2:
- return server.getState();
- case 3:
- return "";
- }
- } else if (element instanceof HadoopJob) {
- HadoopJob job = (HadoopJob) element;
-
- switch (columnIndex) {
- case 0:
- return job.getJobID().toString();
- case 1:
- return "";
- case 2:
- return job.getState().toString();
- case 3:
- return job.getStatus();
- }
- } else if (element instanceof JarModule) {
- JarModule jar = (JarModule) element;
-
- switch (columnIndex) {
- case 0:
- return jar.toString();
- case 1:
- return "Publishing jar to server..";
- case 2:
- return "";
- }
- }
-
- return null;
- }
-
- /*
- * IJobListener (Map/Reduce Jobs listener) implementation
- */
-
- /* @inheritDoc */
- public void jobAdded(HadoopJob job) {
- viewer.refresh();
- }
-
- /* @inheritDoc */
- public void jobRemoved(HadoopJob job) {
- viewer.refresh();
- }
-
- /* @inheritDoc */
- public void jobChanged(HadoopJob job) {
- viewer.refresh(job);
- }
-
- /* @inheritDoc */
- public void publishDone(JarModule jar) {
- viewer.refresh();
- }
-
- /* @inheritDoc */
- public void publishStart(JarModule jar) {
- viewer.refresh();
- }
-
- /*
- * Miscellaneous
- */
-
- /**
- * Return the currently selected server (null if there is no selection or
- * if the selection is not a server)
- *
- * @return the currently selected server entry
- */
- public HadoopServer getSelectedServer() {
- ITreeSelection selection = (ITreeSelection) viewer.getSelection();
- Object first = selection.getFirstElement();
- if (first instanceof HadoopServer) {
- return (HadoopServer) first;
- }
- return null;
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/README b/hadoop-mapreduce-project/src/contrib/gridmix/README
deleted file mode 100644
index 3ca1cf1badc..00000000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/README
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-This project implements the third version of Gridmix, a benchmark for live
-clusters. Given a description of jobs (a "trace") annotated with information
-about I/O, memory, etc. a synthetic mix of jobs will be generated and submitted
-to the cluster.
-
-Documentation of usage and configuration properties in forrest is available in
-src/docs/src/documentation/content/xdocs/gridmix.xml
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/build.xml b/hadoop-mapreduce-project/src/contrib/gridmix/build.xml
deleted file mode 100644
index 72740e35cba..00000000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/build.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/ivy.xml b/hadoop-mapreduce-project/src/contrib/gridmix/ivy.xml
deleted file mode 100644
index d587a7b8752..00000000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/ivy.xml
+++ /dev/null
@@ -1,153 +0,0 @@
-
-
-
-
-
- Rumen
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/ivy/libraries.properties b/hadoop-mapreduce-project/src/contrib/gridmix/ivy/libraries.properties
deleted file mode 100644
index 5cc2be0253f..00000000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/ivy/libraries.properties
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#This properties file lists the versions of the various artifacts used by streaming.
-#It drives ivy and the generation of a maven POM
-
-#Please list the dependencies name with version if they are different from the ones
-#listed in the global libraries.properties file (in alphabetical order)
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestDistCacheEmulation.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestDistCacheEmulation.java
deleted file mode 100644
index 0f1c7e2f712..00000000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestDistCacheEmulation.java
+++ /dev/null
@@ -1,514 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import static org.junit.Assert.*;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.MapContext;
-import org.apache.hadoop.mapreduce.MapReduceTestUtil;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.input.FileSplit;
-import org.apache.hadoop.mapreduce.task.MapContextImpl;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Validate emulation of distributed cache load in gridmix simulated jobs.
- *
- */
-public class TestDistCacheEmulation {
-
- private DistributedCacheEmulator dce = null;
-
- @BeforeClass
- public static void init() throws IOException {
- GridmixTestUtils.initCluster();
- }
-
- @AfterClass
- public static void shutDown() throws IOException {
- GridmixTestUtils.shutdownCluster();
- }
-
- /**
- * Validate the dist cache files generated by GenerateDistCacheData job.
- * @param jobConf configuration of GenerateDistCacheData job.
- * @param sortedFileSizes array of sorted distributed cache file sizes
- * @throws IOException
- * @throws FileNotFoundException
- */
- private void validateDistCacheData(JobConf jobConf, long[] sortedFileSizes)
- throws FileNotFoundException, IOException {
- Path distCachePath = dce.getDistributedCacheDir();
- String filesListFile =
- jobConf.get(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_LIST);
- FileSystem fs = FileSystem.get(jobConf);
-
- // Validate the existence of Distributed Cache files list file directly
- // under distributed cache directory
- Path listFile = new Path(filesListFile);
- assertTrue("Path of Distributed Cache files list file is wrong.",
- distCachePath.equals(listFile.getParent().makeQualified(fs)));
-
- // Delete the dist cache files list file
- assertTrue("Failed to delete distributed Cache files list file " + listFile,
- fs.delete(listFile));
-
- List fileSizes = new ArrayList();
- for (long size : sortedFileSizes) {
- fileSizes.add(size);
- }
- // validate dist cache files after deleting the 'files list file'
- validateDistCacheFiles(fileSizes, distCachePath);
- }
-
- /**
- * Validate private/public distributed cache files.
- * @param filesSizesExpected list of sizes of expected dist cache files
- * @param distCacheDir the distributed cache dir to be validated
- * @throws IOException
- * @throws FileNotFoundException
- */
- private void validateDistCacheFiles(List filesSizesExpected,
- Path distCacheDir) throws FileNotFoundException, IOException {
- RemoteIterator iter =
- GridmixTestUtils.dfs.listFiles(distCacheDir, false);
- int numFiles = filesSizesExpected.size();
- for (int i = 0; i < numFiles; i++) {
- assertTrue("Missing distributed cache files.", iter.hasNext());
- LocatedFileStatus stat = iter.next();
- assertTrue("File size of distributed cache file "
- + stat.getPath().toUri().getPath() + " is wrong.",
- filesSizesExpected.remove(stat.getLen()));
-
- FsPermission perm = stat.getPermission();
- assertEquals("Wrong permissions for distributed cache file "
- + stat.getPath().toUri().getPath(),
- new FsPermission((short)0644), perm);
- }
- assertFalse("Number of files under distributed cache dir is wrong.",
- iter.hasNext());
- }
-
- /**
- * Configures 5 HDFS-based dist cache files and 1 local-FS-based dist cache
- * file in the given Configuration object conf
.
- * @param conf configuration where dist cache config properties are to be set
- * @param useOldProperties true
if old config properties are to
- * be set
- * @return array of sorted HDFS-based distributed cache file sizes
- * @throws IOException
- */
- private long[] configureDummyDistCacheFiles(Configuration conf,
- boolean useOldProperties) throws IOException {
- String user = UserGroupInformation.getCurrentUser().getShortUserName();
- conf.set(MRJobConfig.USER_NAME, user);
- conf.set("mapreduce.job.hdfs-servers", "");
- // Set some dummy dist cache files in gridmix configuration so that they go
- // into the configuration of JobStory objects.
- String[] distCacheFiles = {"hdfs:///tmp/file1.txt",
- "/tmp/" + user + "/.staging/job_1/file2.txt",
- "hdfs:///user/user1/file3.txt",
- "/home/user2/file4.txt",
- "subdir1/file5.txt",
- "subdir2/file6.gz"};
- String[] fileSizes = {"400", "2500", "700", "1200", "1500", "500"};
-
- String[] visibilities = {"true", "false", "false", "true", "true", "false"};
- String[] timeStamps = {"1234", "2345", "34567", "5434", "125", "134"};
- if (useOldProperties) {
- conf.setStrings("mapred.cache.files", distCacheFiles);
- conf.setStrings("mapred.cache.files.filesizes", fileSizes);
- conf.setStrings("mapred.cache.files.visibilities", visibilities);
- conf.setStrings("mapred.cache.files.timestamps", timeStamps);
- } else {
- conf.setStrings(MRJobConfig.CACHE_FILES, distCacheFiles);
- conf.setStrings(MRJobConfig.CACHE_FILES_SIZES, fileSizes);
- conf.setStrings(MRJobConfig.CACHE_FILE_VISIBILITIES, visibilities);
- conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timeStamps);
- }
- // local FS based dist cache file whose path contains /.staging is
- // not created on HDFS. So file size 2500 is not added to sortedFileSizes.
- long[] sortedFileSizes = new long[] {1500, 1200, 700, 500, 400};
- return sortedFileSizes;
- }
-
- /**
- * Runs setupGenerateDistCacheData() on a new DistrbutedCacheEmulator and
- * and returns the jobConf. Fills the array sortedFileSizes
that
- * can be used for validation.
- * Validation of exit code from setupGenerateDistCacheData() is done.
- * @param generate true if -generate option is specified
- * @param sortedFileSizes sorted HDFS-based distributed cache file sizes
- * @throws IOException
- * @throws InterruptedException
- */
- private JobConf runSetupGenerateDistCacheData(boolean generate,
- long[] sortedFileSizes) throws IOException, InterruptedException {
- Configuration conf = new Configuration();
- long[] fileSizes = configureDummyDistCacheFiles(conf, false);
- System.arraycopy(fileSizes, 0, sortedFileSizes, 0, fileSizes.length);
-
- // Job stories of all 3 jobs will have same dist cache files in their
- // configurations
- final int numJobs = 3;
- DebugJobProducer jobProducer = new DebugJobProducer(numJobs, conf);
-
- JobConf jobConf =
- GridmixTestUtils.mrCluster.createJobConf(new JobConf(conf));
- Path ioPath = new Path("testSetupGenerateDistCacheData")
- .makeQualified(GridmixTestUtils.dfs);
- FileSystem fs = FileSystem.get(jobConf);
- if (fs.exists(ioPath)) {
- fs.delete(ioPath, true);
- }
- FileSystem.mkdirs(fs, ioPath, new FsPermission((short)0777));
-
- dce = createDistributedCacheEmulator(jobConf, ioPath, generate);
- int exitCode = dce.setupGenerateDistCacheData(jobProducer);
- int expectedExitCode = generate ? 0 : dce.MISSING_DIST_CACHE_FILES_ERROR;
- assertEquals("setupGenerateDistCacheData failed.",
- expectedExitCode, exitCode);
-
- // reset back
- resetDistCacheConfigProperties(jobConf);
- return jobConf;
- }
-
- /**
- * Reset the config properties related to Distributed Cache in the given
- * job configuration jobConf
.
- * @param jobConf job configuration
- */
- private void resetDistCacheConfigProperties(JobConf jobConf) {
- // reset current/latest property names
- jobConf.setStrings(MRJobConfig.CACHE_FILES, "");
- jobConf.setStrings(MRJobConfig.CACHE_FILES_SIZES, "");
- jobConf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, "");
- jobConf.setStrings(MRJobConfig.CACHE_FILE_VISIBILITIES, "");
- // reset old property names
- jobConf.setStrings("mapred.cache.files", "");
- jobConf.setStrings("mapred.cache.files.filesizes", "");
- jobConf.setStrings("mapred.cache.files.visibilities", "");
- jobConf.setStrings("mapred.cache.files.timestamps", "");
- }
-
- /**
- * Validate GenerateDistCacheData job if it creates dist cache files properly.
- * @throws Exception
- */
- @Test
- public void testGenerateDistCacheData() throws Exception {
- long[] sortedFileSizes = new long[5];
- JobConf jobConf =
- runSetupGenerateDistCacheData(true, sortedFileSizes);
- GridmixJob gridmixJob = new GenerateDistCacheData(jobConf);
- Job job = gridmixJob.call();
- assertEquals("Number of reduce tasks in GenerateDistCacheData is not 0.",
- 0, job.getNumReduceTasks());
- assertTrue("GenerateDistCacheData job failed.",
- job.waitForCompletion(false));
- validateDistCacheData(jobConf, sortedFileSizes);
- }
-
- /**
- * Validate setupGenerateDistCacheData by validating
- * permissions of the distributed cache directories and
- * content of the generated sequence file. This includes validation of
- * dist cache file paths and their file sizes.
- */
- private void validateSetupGenDC(JobConf jobConf, long[] sortedFileSizes)
- throws IOException, InterruptedException {
- // build things needed for validation
- long sumOfFileSizes = 0;
- for (int i = 0; i < sortedFileSizes.length; i++) {
- sumOfFileSizes += sortedFileSizes[i];
- }
-
- FileSystem fs = FileSystem.get(jobConf);
- assertEquals("Number of distributed cache files to be generated is wrong.",
- sortedFileSizes.length,
- jobConf.getInt(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_COUNT, -1));
- assertEquals("Total size of dist cache files to be generated is wrong.",
- sumOfFileSizes, jobConf.getLong(
- GenerateDistCacheData.GRIDMIX_DISTCACHE_BYTE_COUNT, -1));
- Path filesListFile = new Path(jobConf.get(
- GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_LIST));
- FileStatus stat = fs.getFileStatus(filesListFile);
- assertEquals("Wrong permissions of dist Cache files list file "
- + filesListFile, new FsPermission((short)0644), stat.getPermission());
-
- InputSplit split =
- new FileSplit(filesListFile, 0, stat.getLen(), (String[])null);
- TaskAttemptContext taskContext =
- MapReduceTestUtil.createDummyMapTaskAttemptContext(jobConf);
- RecordReader reader =
- new GenerateDistCacheData.GenDCDataFormat().createRecordReader(
- split, taskContext);
- MapContext
- mapContext = new MapContextImpl(jobConf, taskContext.getTaskAttemptID(),
- reader, null, null, MapReduceTestUtil.createDummyReporter(), split);
- reader.initialize(split, mapContext);
-
- // start validating setupGenerateDistCacheData
- doValidateSetupGenDC(reader, fs, sortedFileSizes);
- }
-
- /**
- * Validate setupGenerateDistCacheData by validating
- * permissions of the distributed cache directory and
- * content of the generated sequence file. This includes validation of
- * dist cache file paths and their file sizes.
- */
- private void doValidateSetupGenDC(RecordReader
- reader, FileSystem fs, long[] sortedFileSizes)
- throws IOException, InterruptedException {
-
- // Validate permissions of dist cache directory
- Path distCacheDir = dce.getDistributedCacheDir();
- assertEquals("Wrong permissions for distributed cache dir " + distCacheDir,
- fs.getFileStatus(distCacheDir).getPermission()
- .getOtherAction().and(FsAction.EXECUTE), FsAction.EXECUTE);
-
- // Validate the content of the sequence file generated by
- // dce.setupGenerateDistCacheData().
- LongWritable key = new LongWritable();
- BytesWritable val = new BytesWritable();
- for (int i = 0; i < sortedFileSizes.length; i++) {
- assertTrue("Number of files written to the sequence file by "
- + "setupGenerateDistCacheData is less than the expected.",
- reader.nextKeyValue());
- key = reader.getCurrentKey();
- val = reader.getCurrentValue();
- long fileSize = key.get();
- String file = new String(val.getBytes(), 0, val.getLength());
-
- // Dist Cache files should be sorted based on file size.
- assertEquals("Dist cache file size is wrong.",
- sortedFileSizes[i], fileSize);
-
- // Validate dist cache file path.
-
- // parent dir of dist cache file
- Path parent = new Path(file).getParent().makeQualified(fs);
- // should exist in dist cache dir
- assertTrue("Public dist cache file path is wrong.",
- distCacheDir.equals(parent));
- }
- }
-
- /**
- * Test if DistributedCacheEmulator's setup of GenerateDistCacheData is
- * working as expected.
- * @throws IOException
- * @throws InterruptedException
- */
- @Test
- public void testSetupGenerateDistCacheData()
- throws IOException, InterruptedException {
- long[] sortedFileSizes = new long[5];
- JobConf jobConf = runSetupGenerateDistCacheData(true, sortedFileSizes);
- validateSetupGenDC(jobConf, sortedFileSizes);
-
- // Verify if correct exit code is seen when -generate option is missing and
- // distributed cache files are missing in the expected path.
- runSetupGenerateDistCacheData(false, sortedFileSizes);
- }
-
- /**
- * Create DistributedCacheEmulator object and do the initialization by
- * calling init() on it with dummy trace. Also configure the pseudo local FS.
- */
- private DistributedCacheEmulator createDistributedCacheEmulator(
- Configuration conf, Path ioPath, boolean generate) throws IOException {
- DistributedCacheEmulator dce =
- new DistributedCacheEmulator(conf, ioPath);
- JobCreator jobCreator = JobCreator.getPolicy(conf, JobCreator.LOADJOB);
- jobCreator.setDistCacheEmulator(dce);
- dce.init("dummytrace", jobCreator, generate);
- return dce;
- }
-
- /**
- * Test the configuration property for disabling/enabling emulation of
- * distributed cache load.
- */
- @Test
- public void testDistCacheEmulationConfigurability() throws IOException {
- Configuration conf = new Configuration();
- JobConf jobConf = GridmixTestUtils.mrCluster.createJobConf(
- new JobConf(conf));
- Path ioPath = new Path("testDistCacheEmulationConfigurability")
- .makeQualified(GridmixTestUtils.dfs);
- FileSystem fs = FileSystem.get(jobConf);
- FileSystem.mkdirs(fs, ioPath, new FsPermission((short)0777));
-
- // default config
- dce = createDistributedCacheEmulator(jobConf, ioPath, false);
- assertTrue("Default configuration of "
- + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
- + " is wrong.", dce.shouldEmulateDistCacheLoad());
-
- // config property set to false
- jobConf.setBoolean(
- DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE, false);
- dce = createDistributedCacheEmulator(jobConf, ioPath, false);
- assertFalse("Disabling of emulation of distributed cache load by setting "
- + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
- + " to false is not working.", dce.shouldEmulateDistCacheLoad());
- }
-
- /**
- * Verify if DistributedCacheEmulator can configure distributed cache files
- * for simulated job if job conf from trace had no dist cache files.
- * @param conf configuration for the simulated job to be run
- * @param jobConf job configuration of original cluster's job, obtained from
- * trace
- * @throws IOException
- */
- private void validateJobConfWithOutDCFiles(Configuration conf,
- JobConf jobConf) throws IOException {
- // Validate if Gridmix can configure dist cache files properly if there are
- // no HDFS-based dist cache files and localFS-based dist cache files in
- // trace for a job.
- dce.configureDistCacheFiles(conf, jobConf);
- assertNull("Distributed cache files configured by GridMix is wrong.",
- conf.get(MRJobConfig.CACHE_FILES));
- assertNull("Distributed cache files configured by Gridmix through -files "
- + "option is wrong.", conf.get("tmpfiles"));
- }
-
- /**
- * Verify if DistributedCacheEmulator can configure distributed cache files
- * for simulated job if job conf from trace had HDFS-based dist cache files
- * and local-FS-based dist cache files.
- *
Also validate if Gridmix can handle/read deprecated config properties
- * like mapred.cache.files.filesizes and mapred.cache.files.visibilities from
- * trace file.
- * @param conf configuration for the simulated job to be run
- * @param jobConf job configuration of original cluster's job, obtained from
- * trace
- * @throws IOException
- */
- private void validateJobConfWithDCFiles(Configuration conf,
- JobConf jobConf) throws IOException {
- long[] sortedFileSizes = configureDummyDistCacheFiles(jobConf, true);
-
- // Validate if Gridmix can handle deprecated config properties like
- // mapred.cache.files.filesizes and mapred.cache.files.visibilities.
- // 1 local FS based dist cache file and 5 HDFS based dist cache files. So
- // total expected dist cache files count is 6.
- assertEquals("Gridmix is not able to extract dist cache file sizes.",
- 6, jobConf.getStrings(MRJobConfig.CACHE_FILES_SIZES).length);
- assertEquals("Gridmix is not able to extract dist cache file visibilities.",
- 6, jobConf.getStrings(MRJobConfig.CACHE_FILE_VISIBILITIES).length);
-
- dce.configureDistCacheFiles(conf, jobConf);
-
- assertEquals("Configuring of HDFS-based dist cache files by gridmix is "
- + "wrong.", sortedFileSizes.length,
- conf.getStrings(MRJobConfig.CACHE_FILES).length);
- assertEquals("Configuring of local-FS-based dist cache files by gridmix is "
- + "wrong.", 1, conf.getStrings("tmpfiles").length);
- }
-
- /**
- * Verify if configureDistCacheFiles() works fine when there are distributed
- * cache files set but visibilities are not set. This is to handle history
- * traces of older hadoop version where there are no private/public
- * Distributed Caches.
- * @throws IOException
- */
- private void validateWithOutVisibilities() throws IOException {
- Configuration conf = new Configuration();// configuration for simulated job
- JobConf jobConf = new JobConf();
- String user = "user1";
- jobConf.setUser(user);
- String[] files = {"/tmp/hdfs1.txt", "/tmp/"+ user + "/.staging/file1"};
- jobConf.setStrings(MRJobConfig.CACHE_FILES, files);
- jobConf.setStrings(MRJobConfig.CACHE_FILES_SIZES, "12,200");
- jobConf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, "56789,98345");
- dce.configureDistCacheFiles(conf, jobConf);
- assertEquals("Configuring of HDFS-based dist cache files by gridmix is "
- + "wrong.", files.length,
- conf.getStrings(MRJobConfig.CACHE_FILES).length);
- assertNull("Configuring of local-FS-based dist cache files by gridmix is "
- + "wrong.", conf.get("tmpfiles"));
- }
-
- /**
- * Test if Gridmix can configure config properties related to Distributed
- * Cache properly. Also verify if Gridmix can handle deprecated config
- * properties related to Distributed Cache.
- * @throws IOException
- */
- @Test
- public void testDistCacheFilesConfiguration() throws IOException {
- Configuration conf = new Configuration();
- JobConf jobConf = GridmixTestUtils.mrCluster.createJobConf(
- new JobConf(conf));
- Path ioPath = new Path("testDistCacheEmulationConfigurability")
- .makeQualified(GridmixTestUtils.dfs);
- FileSystem fs = FileSystem.get(jobConf);
- FileSystem.mkdirs(fs, ioPath, new FsPermission((short)0777));
-
- // default config
- dce = createDistributedCacheEmulator(jobConf, ioPath, false);
- assertTrue("Default configuration of "
- + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
- + " is wrong.", dce.shouldEmulateDistCacheLoad());
-
- // Validate if DistributedCacheEmulator can handle a JobStory with out
- // Distributed Cache files properly.
- validateJobConfWithOutDCFiles(conf, jobConf);
-
- // Validate if Gridmix can configure dist cache files properly if there are
- // HDFS-based dist cache files and localFS-based dist cache files in trace
- // for a job. Set old config properties and validate.
- validateJobConfWithDCFiles(conf, jobConf);
-
- // Use new JobConf as JobStory conf and check if configureDistCacheFiles()
- // doesn't throw NPE when there are dist cache files set but visibilities
- // are not set.
- validateWithOutVisibilities();
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
deleted file mode 100644
index 22b742678f6..00000000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
+++ /dev/null
@@ -1,556 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import java.io.InputStream;
-import java.io.IOException;
-import java.text.DecimalFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.zip.GZIPInputStream;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.mapred.Counters;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobID;
-import org.apache.hadoop.mapred.TaskReport;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.tools.rumen.JobStory;
-import org.apache.hadoop.tools.rumen.JobStoryProducer;
-import org.apache.hadoop.tools.rumen.TaskInfo;
-import org.apache.hadoop.util.ToolRunner;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import static org.junit.Assert.*;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.log4j.Level;
-
-import static org.apache.hadoop.mapreduce.TaskCounter.MAP_INPUT_RECORDS;
-import static org.apache.hadoop.mapreduce.TaskCounter.MAP_OUTPUT_BYTES;
-import static org.apache.hadoop.mapreduce.TaskCounter.MAP_OUTPUT_RECORDS;
-import static org.apache.hadoop.mapreduce.TaskCounter.REDUCE_INPUT_RECORDS;
-import static org.apache.hadoop.mapreduce.TaskCounter.REDUCE_OUTPUT_RECORDS;
-import static org.apache.hadoop.mapreduce.TaskCounter.REDUCE_SHUFFLE_BYTES;
-import static org.apache.hadoop.mapreduce.TaskCounter.SPLIT_RAW_BYTES;
-
-public class TestGridmixSubmission {
- static GridmixJobSubmissionPolicy policy = GridmixJobSubmissionPolicy.REPLAY;
- public static final Log LOG = LogFactory.getLog(Gridmix.class);
-
- {
- ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.mapred.gridmix")
- ).getLogger().setLevel(Level.DEBUG);
- }
-
- private static final int NJOBS = 3;
- private static final long GENDATA = 30; // in megabytes
- private static final int GENSLOP = 100 * 1024; // +/- 100k for logs
-
- @BeforeClass
- public static void init() throws IOException {
- GridmixTestUtils.initCluster();
- }
-
- @AfterClass
- public static void shutDown() throws IOException {
- GridmixTestUtils.shutdownCluster();
- }
-
- static class TestMonitor extends JobMonitor {
-
- static final long SLOPBYTES = 1024;
- private final int expected;
- private final BlockingQueue retiredJobs;
-
- public TestMonitor(int expected, Statistics stats) {
- super(stats);
- this.expected = expected;
- retiredJobs = new LinkedBlockingQueue();
- }
-
- public void verify(ArrayList submitted, Configuration clientConf)
- throws Exception {
- final ArrayList succeeded = new ArrayList();
- assertEquals("Bad job count", expected, retiredJobs.drainTo(succeeded));
- final HashMap sub = new HashMap();
-
- // define the input and output path for the run
- final Path in = new Path("foo").makeQualified(GridmixTestUtils.dfs);
- final Path out =
- new Path(in, clientConf.get(Gridmix.GRIDMIX_OUT_DIR, "gridmix"));
-
- for (JobStory spec : submitted) {
- sub.put(spec.getJobID().toString(), spec);
- }
- final JobClient client = new JobClient(
- GridmixTestUtils.mrCluster.createJobConf());
- for (Job job : succeeded) {
- final String jobName = job.getJobName();
- Configuration conf = job.getConfiguration();
- if (GenerateData.JOB_NAME.equals(jobName)) {
- verifyQueue(conf, jobName);
-
- final ContentSummary generated = GridmixTestUtils.dfs.getContentSummary(in);
- assertTrue("Mismatched data gen", // +/- 100k for logs
- (GENDATA << 20) < generated.getLength() + GENSLOP ||
- (GENDATA << 20) > generated.getLength() - GENSLOP);
- FileStatus[] outstat = GridmixTestUtils.dfs.listStatus(out);
- assertEquals("Mismatched job count", NJOBS, outstat.length);
- continue;
- } else if (GenerateDistCacheData.JOB_NAME.equals(jobName)) {
- verifyQueue(conf, jobName);
- continue;
- }
-
- if (!conf.getBoolean(
- GridmixJob.GRIDMIX_USE_QUEUE_IN_TRACE, true)) {
- assertEquals(" Improper queue for " + jobName + " " ,
- conf.get(MRJobConfig.QUEUE_NAME), "q1" );
- } else {
- assertEquals(" Improper queue for " + jobName + " ",
- conf.get(MRJobConfig.QUEUE_NAME),
- sub.get(conf.get(Gridmix.ORIGINAL_JOB_ID)).getQueueName());
- }
-
- final String originalJobId = conf.get(Gridmix.ORIGINAL_JOB_ID);
- final JobStory spec = sub.get(originalJobId);
- assertNotNull("No spec for " + jobName, spec);
- assertNotNull("No counters for " + jobName, job.getCounters());
- final String originalJobName = spec.getName();
- System.out.println("originalJobName=" + originalJobName
- + ";GridmixJobName=" + jobName + ";originalJobID=" + originalJobId);
- assertTrue("Original job name is wrong.", originalJobName.equals(
- conf.get(Gridmix.ORIGINAL_JOB_NAME)));
-
- // Gridmix job seqNum contains 6 digits
- int seqNumLength = 6;
- String jobSeqNum = new DecimalFormat("000000").format(
- conf.getInt(GridmixJob.GRIDMIX_JOB_SEQ, -1));
- // Original job name is of the format MOCKJOB<6 digit sequence number>
- // because MockJob jobNames are of this format.
- assertTrue(originalJobName.substring(
- originalJobName.length() - seqNumLength).equals(jobSeqNum));
-
- assertTrue("Gridmix job name is not in the expected format.",
- jobName.equals(
- GridmixJob.JOB_NAME_PREFIX + jobSeqNum));
-
- final FileStatus stat =
- GridmixTestUtils.dfs.getFileStatus(
- new Path(out, "" + Integer.valueOf(jobSeqNum)));
- assertEquals("Wrong owner for " + jobName, spec.getUser(),
- stat.getOwner());
-
- final int nMaps = spec.getNumberMaps();
- final int nReds = spec.getNumberReduces();
-
- // TODO Blocked by MAPREDUCE-118
- if (true) return;
- // TODO
- System.out.println(jobName + ": " + nMaps + "/" + nReds);
- final TaskReport[] mReports =
- client.getMapTaskReports(JobID.downgrade(job.getJobID()));
- assertEquals("Mismatched map count", nMaps, mReports.length);
- check(TaskType.MAP, job, spec, mReports,
- 0, 0, SLOPBYTES, nReds);
-
- final TaskReport[] rReports =
- client.getReduceTaskReports(JobID.downgrade(job.getJobID()));
- assertEquals("Mismatched reduce count", nReds, rReports.length);
- check(TaskType.REDUCE, job, spec, rReports,
- nMaps * SLOPBYTES, 2 * nMaps, 0, 0);
- }
- }
-
- // Verify if correct job queue is used
- private void verifyQueue(Configuration conf, String jobName) {
- if (!conf.getBoolean(
- GridmixJob.GRIDMIX_USE_QUEUE_IN_TRACE, true)) {
- assertEquals(" Improper queue for " + jobName,
- conf.get("mapred.job.queue.name"), "q1");
- } else {
- assertEquals(" Improper queue for " + jobName,
- conf.get("mapred.job.queue.name"), "default");
- }
- }
-
- public void check(final TaskType type, Job job, JobStory spec,
- final TaskReport[] runTasks,
- long extraInputBytes, int extraInputRecords,
- long extraOutputBytes, int extraOutputRecords) throws Exception {
-
- long[] runInputRecords = new long[runTasks.length];
- long[] runInputBytes = new long[runTasks.length];
- long[] runOutputRecords = new long[runTasks.length];
- long[] runOutputBytes = new long[runTasks.length];
- long[] specInputRecords = new long[runTasks.length];
- long[] specInputBytes = new long[runTasks.length];
- long[] specOutputRecords = new long[runTasks.length];
- long[] specOutputBytes = new long[runTasks.length];
-
- for (int i = 0; i < runTasks.length; ++i) {
- final TaskInfo specInfo;
- final Counters counters = runTasks[i].getCounters();
- switch (type) {
- case MAP:
- runInputBytes[i] = counters.findCounter("FileSystemCounters",
- "HDFS_BYTES_READ").getValue() -
- counters.findCounter(SPLIT_RAW_BYTES).getValue();
- runInputRecords[i] =
- (int)counters.findCounter(MAP_INPUT_RECORDS).getValue();
- runOutputBytes[i] =
- counters.findCounter(MAP_OUTPUT_BYTES).getValue();
- runOutputRecords[i] =
- (int)counters.findCounter(MAP_OUTPUT_RECORDS).getValue();
-
- specInfo = spec.getTaskInfo(TaskType.MAP, i);
- specInputRecords[i] = specInfo.getInputRecords();
- specInputBytes[i] = specInfo.getInputBytes();
- specOutputRecords[i] = specInfo.getOutputRecords();
- specOutputBytes[i] = specInfo.getOutputBytes();
- System.out.printf(type + " SPEC: %9d -> %9d :: %5d -> %5d\n",
- specInputBytes[i], specOutputBytes[i],
- specInputRecords[i], specOutputRecords[i]);
- System.out.printf(type + " RUN: %9d -> %9d :: %5d -> %5d\n",
- runInputBytes[i], runOutputBytes[i],
- runInputRecords[i], runOutputRecords[i]);
- break;
- case REDUCE:
- runInputBytes[i] = 0;
- runInputRecords[i] =
- (int)counters.findCounter(REDUCE_INPUT_RECORDS).getValue();
- runOutputBytes[i] =
- counters.findCounter("FileSystemCounters",
- "HDFS_BYTES_WRITTEN").getValue();
- runOutputRecords[i] =
- (int)counters.findCounter(REDUCE_OUTPUT_RECORDS).getValue();
-
-
- specInfo = spec.getTaskInfo(TaskType.REDUCE, i);
- // There is no reliable counter for reduce input bytes. The
- // variable-length encoding of intermediate records and other noise
- // make this quantity difficult to estimate. The shuffle and spec
- // input bytes are included in debug output for reference, but are
- // not checked
- specInputBytes[i] = 0;
- specInputRecords[i] = specInfo.getInputRecords();
- specOutputRecords[i] = specInfo.getOutputRecords();
- specOutputBytes[i] = specInfo.getOutputBytes();
- System.out.printf(type + " SPEC: (%9d) -> %9d :: %5d -> %5d\n",
- specInfo.getInputBytes(), specOutputBytes[i],
- specInputRecords[i], specOutputRecords[i]);
- System.out.printf(type + " RUN: (%9d) -> %9d :: %5d -> %5d\n",
- counters.findCounter(REDUCE_SHUFFLE_BYTES).getValue(),
- runOutputBytes[i], runInputRecords[i], runOutputRecords[i]);
- break;
- default:
- specInfo = null;
- fail("Unexpected type: " + type);
- }
- }
-
- // Check input bytes
- Arrays.sort(specInputBytes);
- Arrays.sort(runInputBytes);
- for (int i = 0; i < runTasks.length; ++i) {
- assertTrue("Mismatched " + type + " input bytes " +
- specInputBytes[i] + "/" + runInputBytes[i],
- eqPlusMinus(runInputBytes[i], specInputBytes[i], extraInputBytes));
- }
-
- // Check input records
- Arrays.sort(specInputRecords);
- Arrays.sort(runInputRecords);
- for (int i = 0; i < runTasks.length; ++i) {
- assertTrue("Mismatched " + type + " input records " +
- specInputRecords[i] + "/" + runInputRecords[i],
- eqPlusMinus(runInputRecords[i], specInputRecords[i],
- extraInputRecords));
- }
-
- // Check output bytes
- Arrays.sort(specOutputBytes);
- Arrays.sort(runOutputBytes);
- for (int i = 0; i < runTasks.length; ++i) {
- assertTrue("Mismatched " + type + " output bytes " +
- specOutputBytes[i] + "/" + runOutputBytes[i],
- eqPlusMinus(runOutputBytes[i], specOutputBytes[i],
- extraOutputBytes));
- }
-
- // Check output records
- Arrays.sort(specOutputRecords);
- Arrays.sort(runOutputRecords);
- for (int i = 0; i < runTasks.length; ++i) {
- assertTrue("Mismatched " + type + " output records " +
- specOutputRecords[i] + "/" + runOutputRecords[i],
- eqPlusMinus(runOutputRecords[i], specOutputRecords[i],
- extraOutputRecords));
- }
-
- }
-
- private static boolean eqPlusMinus(long a, long b, long x) {
- final long diff = Math.abs(a - b);
- return diff <= x;
- }
-
- @Override
- protected void onSuccess(Job job) {
- retiredJobs.add(job);
- }
- @Override
- protected void onFailure(Job job) {
- fail("Job failure: " + job);
- }
- }
-
- static class DebugGridmix extends Gridmix {
-
- private JobFactory factory;
- private TestMonitor monitor;
-
- public void checkMonitor(Configuration conf) throws Exception {
- monitor.verify(((DebugJobFactory.Debuggable)factory).getSubmitted(),
- conf);
- }
-
- @Override
- protected JobMonitor createJobMonitor(Statistics stats) {
- monitor = new TestMonitor(NJOBS + 1, stats);
- return monitor;
- }
-
- @Override
- protected JobFactory createJobFactory(JobSubmitter submitter,
- String traceIn, Path scratchDir, Configuration conf,
- CountDownLatch startFlag, UserResolver userResolver)
- throws IOException {
- factory = DebugJobFactory.getFactory(
- submitter, scratchDir, NJOBS, conf, startFlag, userResolver);
- return factory;
- }
- }
-
- /**
- * Verifies that the given {@code JobStory} corresponds to the checked-in
- * WordCount {@code JobStory}. The verification is effected via JUnit
- * assertions.
- *
- * @param js the candidate JobStory.
- */
- private void verifyWordCountJobStory(JobStory js) {
- assertNotNull("Null JobStory", js);
- String expectedJobStory = "WordCount:johndoe:default:1285322645148:3:1";
- String actualJobStory = js.getName() + ":" + js.getUser() + ":"
- + js.getQueueName() + ":" + js.getSubmissionTime() + ":"
- + js.getNumberMaps() + ":" + js.getNumberReduces();
- assertEquals("Unexpected JobStory", expectedJobStory, actualJobStory);
- }
-
- /**
- * Expands a file compressed using {@code gzip}.
- *
- * @param fs the {@code FileSystem} corresponding to the given
- * file.
- *
- * @param in the path to the compressed file.
- *
- * @param out the path to the uncompressed output.
- *
- * @throws Exception if there was an error during the operation.
- */
- private void expandGzippedTrace(FileSystem fs, Path in, Path out)
- throws Exception {
- byte[] buff = new byte[4096];
- GZIPInputStream gis = new GZIPInputStream(fs.open(in));
- FSDataOutputStream fsdos = fs.create(out);
- int numRead;
- while ((numRead = gis.read(buff, 0, buff.length)) != -1) {
- fsdos.write(buff, 0, numRead);
- }
- gis.close();
- fsdos.close();
- }
-
- /**
- * Tests the reading of traces in GridMix3. These traces are generated
- * by Rumen and are in the JSON format. The traces can optionally be
- * compressed and uncompressed traces can also be passed to GridMix3 via
- * its standard input stream. The testing is effected via JUnit assertions.
- *
- * @throws Exception if there was an error.
- */
- @Test
- public void testTraceReader() throws Exception {
- Configuration conf = new Configuration();
- FileSystem lfs = FileSystem.getLocal(conf);
- Path rootInputDir = new Path(System.getProperty("src.test.data"));
- rootInputDir
- = rootInputDir.makeQualified(lfs.getUri(), lfs.getWorkingDirectory());
- Path rootTempDir
- = new Path(System.getProperty("test.build.data",
- System.getProperty("java.io.tmpdir")), "testTraceReader");
- rootTempDir
- = rootTempDir.makeQualified(lfs.getUri(), lfs.getWorkingDirectory());
- Path inputFile = new Path(rootInputDir, "wordcount.json.gz");
- Path tempFile = new Path(rootTempDir, "gridmix3-wc.json");
-
- InputStream origStdIn = System.in;
- InputStream tmpIs = null;
- try {
- DebugGridmix dgm = new DebugGridmix();
- JobStoryProducer jsp
- = dgm.createJobStoryProducer(inputFile.toString(), conf);
-
- System.out.println("Verifying JobStory from compressed trace...");
- verifyWordCountJobStory(jsp.getNextJob());
-
- expandGzippedTrace(lfs, inputFile, tempFile);
- jsp = dgm.createJobStoryProducer(tempFile.toString(), conf);
- System.out.println("Verifying JobStory from uncompressed trace...");
- verifyWordCountJobStory(jsp.getNextJob());
-
- tmpIs = lfs.open(tempFile);
- System.setIn(tmpIs);
- System.out.println("Verifying JobStory from trace in standard input...");
- jsp = dgm.createJobStoryProducer("-", conf);
- verifyWordCountJobStory(jsp.getNextJob());
- } finally {
- System.setIn(origStdIn);
- if (tmpIs != null) {
- tmpIs.close();
- }
- lfs.delete(rootTempDir, true);
- }
- }
-
- @Test
- public void testReplaySubmit() throws Exception {
- policy = GridmixJobSubmissionPolicy.REPLAY;
- System.out.println(" Replay started at " + System.currentTimeMillis());
- doSubmission(false, false);
- System.out.println(" Replay ended at " + System.currentTimeMillis());
-
- System.out.println(" Replay started with default output path at time "
- + System.currentTimeMillis());
- doSubmission(false, true);
- System.out.println(" Replay ended with default output path at time "
- + System.currentTimeMillis());
- }
-
- @Test
- public void testStressSubmit() throws Exception {
- policy = GridmixJobSubmissionPolicy.STRESS;
- System.out.println(" Stress started at " + System.currentTimeMillis());
- doSubmission(false, false);
- System.out.println(" Stress ended at " + System.currentTimeMillis());
- }
-
- @Test
- public void testStressSubmitWithDefaultQueue() throws Exception {
- policy = GridmixJobSubmissionPolicy.STRESS;
- System.out.println(" Stress with default q started at "
- + System.currentTimeMillis());
- doSubmission(true, false);
- System.out.println(" Stress with default q ended at "
- + System.currentTimeMillis());
- }
-
- @Test
- public void testSerialSubmit() throws Exception {
- policy = GridmixJobSubmissionPolicy.SERIAL;
- System.out.println("Serial started at " + System.currentTimeMillis());
- doSubmission(false, false);
- System.out.println("Serial ended at " + System.currentTimeMillis());
- }
-
- private void doSubmission(boolean useDefaultQueue,
- boolean defaultOutputPath) throws Exception {
- final Path in = new Path("foo").makeQualified(GridmixTestUtils.dfs);
- final Path out = GridmixTestUtils.DEST.makeQualified(GridmixTestUtils.dfs);
- final Path root = new Path("/user");
- Configuration conf = null;
-
- try{
- ArrayList argsList = new ArrayList();
-
- argsList.add("-D" + FilePool.GRIDMIX_MIN_FILE + "=0");
- argsList.add("-D" + Gridmix.GRIDMIX_USR_RSV + "="
- + EchoUserResolver.class.getName());
-
- // Set the config property gridmix.output.directory only if
- // defaultOutputPath is false. If defaultOutputPath is true, then
- // let us allow gridmix to use the path foo/gridmix/ as output dir.
- if (!defaultOutputPath) {
- argsList.add("-D" + Gridmix.GRIDMIX_OUT_DIR + "=" + out);
- }
- argsList.add("-generate");
- argsList.add(String.valueOf(GENDATA) + "m");
- argsList.add(in.toString());
- argsList.add("-"); // ignored by DebugGridmix
-
- String[] argv = argsList.toArray(new String[argsList.size()]);
-
- DebugGridmix client = new DebugGridmix();
- conf = new Configuration();
- conf.setEnum(GridmixJobSubmissionPolicy.JOB_SUBMISSION_POLICY,policy);
- conf.set("mapreduce.job.hdfs-servers", "");
- if (useDefaultQueue) {
- conf.setBoolean(GridmixJob.GRIDMIX_USE_QUEUE_IN_TRACE, false);
- conf.set(GridmixJob.GRIDMIX_DEFAULT_QUEUE, "q1");
- } else {
- conf.setBoolean(GridmixJob.GRIDMIX_USE_QUEUE_IN_TRACE, true);
- }
- conf = GridmixTestUtils.mrCluster.createJobConf(new JobConf(conf));
- // allow synthetic users to create home directories
- GridmixTestUtils.dfs.mkdirs(root, new FsPermission((short)0777));
- GridmixTestUtils.dfs.setPermission(root, new FsPermission((short)0777));
- int res = ToolRunner.run(conf, client, argv);
- assertEquals("Client exited with nonzero status", 0, res);
- client.checkMonitor(conf);
- } catch (Exception e) {
- e.printStackTrace();
- // fail the test if there is an exception
- throw new RuntimeException(e);
- } finally {
- in.getFileSystem(conf).delete(in, true);
- out.getFileSystem(conf).delete(out, true);
- root.getFileSystem(conf).delete(root,true);
- }
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestSleepJob.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestSleepJob.java
deleted file mode 100644
index 84f292e770c..00000000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestSleepJob.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.tools.rumen.JobStory;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.log4j.Level;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import static org.junit.Assert.*;
-
-public class TestSleepJob {
-
- public static final Log LOG = LogFactory.getLog(Gridmix.class);
-
- {
- ((Log4JLogger) LogFactory.getLog("org.apache.hadoop.mapred.gridmix"))
- .getLogger().setLevel(Level.DEBUG);
- }
-
- static GridmixJobSubmissionPolicy policy = GridmixJobSubmissionPolicy.REPLAY;
- private static final int NJOBS = 2;
- private static final long GENDATA = 50; // in megabytes
-
-
- @BeforeClass
- public static void init() throws IOException {
- GridmixTestUtils.initCluster();
- }
-
- @AfterClass
- public static void shutDown() throws IOException {
- GridmixTestUtils.shutdownCluster();
- }
-
- static class TestMonitor extends JobMonitor {
- private final BlockingQueue retiredJobs;
- private final int expected;
-
- public TestMonitor(int expected, Statistics stats) {
- super(stats);
- this.expected = expected;
- retiredJobs = new LinkedBlockingQueue();
- }
-
- @Override
- protected void onSuccess(Job job) {
- System.out.println(" Job Sucess " + job);
- retiredJobs.add(job);
- }
-
- @Override
- protected void onFailure(Job job) {
- fail("Job failure: " + job);
- }
-
- public void verify(ArrayList submitted) throws Exception {
- assertEquals("Bad job count", expected, retiredJobs.size());
- }
- }
-
-
- static class DebugGridmix extends Gridmix {
-
- private JobFactory factory;
- private TestMonitor monitor;
-
- @Override
- protected JobMonitor createJobMonitor(Statistics stats) {
- monitor = new TestMonitor(NJOBS + 1, stats);
- return monitor;
- }
-
- @Override
- protected JobFactory createJobFactory(
- JobSubmitter submitter, String traceIn, Path scratchDir,
- Configuration conf, CountDownLatch startFlag, UserResolver userResolver)
- throws IOException {
- factory =
- DebugJobFactory.getFactory(submitter, scratchDir, NJOBS, conf,
- startFlag, userResolver);
- return factory;
- }
-
- public void checkMonitor() throws Exception {
- monitor.verify(((DebugJobFactory.Debuggable) factory).getSubmitted());
- }
- }
-
-
- @Test
- public void testReplaySubmit() throws Exception {
- policy = GridmixJobSubmissionPolicy.REPLAY;
- System.out.println(" Replay started at " + System.currentTimeMillis());
- doSubmission();
- System.out.println(" Replay ended at " + System.currentTimeMillis());
- }
-
- @Test
- public void testRandomLocationSubmit() throws Exception {
- policy = GridmixJobSubmissionPolicy.STRESS;
- System.out.println(" Random locations started at " + System.currentTimeMillis());
- doSubmission("-D"+JobCreator.SLEEPJOB_RANDOM_LOCATIONS+"=3");
- System.out.println(" Random locations ended at " + System.currentTimeMillis());
- }
-
- @Test
- public void testMapTasksOnlySubmit() throws Exception {
- policy = GridmixJobSubmissionPolicy.STRESS;
- System.out.println(" Map tasks only at " + System.currentTimeMillis());
- doSubmission("-D"+SleepJob.SLEEPJOB_MAPTASK_ONLY+"=true");
- System.out.println(" Map tasks only ended at " + System.currentTimeMillis());
- }
-
- @Test
- public void testLimitTaskSleepTimeSubmit() throws Exception {
- policy = GridmixJobSubmissionPolicy.STRESS;
- System.out.println(" Limit sleep time only at " + System.currentTimeMillis());
- doSubmission("-D" + SleepJob.GRIDMIX_SLEEP_MAX_MAP_TIME + "=100", "-D"
- + SleepJob.GRIDMIX_SLEEP_MAX_REDUCE_TIME + "=200");
- System.out.println(" Limit sleep time ended at " + System.currentTimeMillis());
- }
-
- @Test
- public void testStressSubmit() throws Exception {
- policy = GridmixJobSubmissionPolicy.STRESS;
- System.out.println(" Stress started at " + System.currentTimeMillis());
- doSubmission();
- System.out.println(" Stress ended at " + System.currentTimeMillis());
- }
-
- @Test
- public void testSerialSubmit() throws Exception {
- policy = GridmixJobSubmissionPolicy.SERIAL;
- System.out.println("Serial started at " + System.currentTimeMillis());
- doSubmission();
- System.out.println("Serial ended at " + System.currentTimeMillis());
- }
-
- @Test
- public void testRandomLocation() throws Exception {
- UserGroupInformation ugi = UserGroupInformation.getLoginUser();
- // testRandomLocation(0, 10, ugi);
- testRandomLocation(1, 10, ugi);
- testRandomLocation(2, 10, ugi);
- }
-
- private void testRandomLocation(int locations, int njobs, UserGroupInformation ugi) throws Exception {
- Configuration conf = new Configuration();
- conf.setInt(JobCreator.SLEEPJOB_RANDOM_LOCATIONS, locations);
- DebugJobProducer jobProducer = new DebugJobProducer(njobs, conf);
- JobConf jconf = GridmixTestUtils.mrCluster.createJobConf(new JobConf(conf));
- JobStory story;
- int seq=1;
- while ((story = jobProducer.getNextJob()) != null) {
- GridmixJob gridmixJob = JobCreator.SLEEPJOB.createGridmixJob(jconf, 0,
- story, new Path("ignored"), ugi, seq++);
- gridmixJob.buildSplits(null);
- List splits = new SleepJob.SleepInputFormat()
- .getSplits(gridmixJob.getJob());
- for (InputSplit split : splits) {
- assertEquals(locations, split.getLocations().length);
- }
- }
- }
-
- @Test
- public void testMapTasksOnlySleepJobs()
- throws Exception {
- Configuration conf = new Configuration();
- conf.setBoolean(SleepJob.SLEEPJOB_MAPTASK_ONLY, true);
- conf.set("mapreduce.job.hdfs-servers", "");
- DebugJobProducer jobProducer = new DebugJobProducer(5, conf);
- JobConf jconf = GridmixTestUtils.mrCluster.createJobConf(new JobConf(conf));
- UserGroupInformation ugi = UserGroupInformation.getLoginUser();
- JobStory story;
- int seq = 1;
- while ((story = jobProducer.getNextJob()) != null) {
- GridmixJob gridmixJob = JobCreator.SLEEPJOB.createGridmixJob(jconf, 0,
- story, new Path("ignored"), ugi, seq++);
- gridmixJob.buildSplits(null);
- Job job = gridmixJob.call();
- assertEquals(0, job.getNumReduceTasks());
- }
- }
-
- private void doSubmission(String...optional) throws Exception {
- final Path in = new Path("foo").makeQualified(GridmixTestUtils.dfs);
- final Path out = GridmixTestUtils.DEST.makeQualified(GridmixTestUtils.dfs);
- final Path root = new Path("/user");
- Configuration conf = null;
- try {
- // required options
- final String[] required = {
- "-D" + FilePool.GRIDMIX_MIN_FILE + "=0",
- "-D" + Gridmix.GRIDMIX_OUT_DIR + "=" + out,
- "-D" + Gridmix.GRIDMIX_USR_RSV + "=" + EchoUserResolver.class.getName(),
- "-D" + JobCreator.GRIDMIX_JOB_TYPE + "=" + JobCreator.SLEEPJOB.name(),
- "-D" + SleepJob.GRIDMIX_SLEEP_INTERVAL + "=" + "10"
- };
- // mandatory arguments
- final String[] mandatory = {
- "-generate",String.valueOf(GENDATA) + "m", in.toString(), "-"
- // ignored by DebugGridmix
- };
-
- ArrayList argv = new ArrayList(required.length+optional.length+mandatory.length);
- for (String s : required) {
- argv.add(s);
- }
- for (String s : optional) {
- argv.add(s);
- }
- for (String s : mandatory) {
- argv.add(s);
- }
-
- DebugGridmix client = new DebugGridmix();
- conf = new Configuration();
- conf.setEnum(GridmixJobSubmissionPolicy.JOB_SUBMISSION_POLICY, policy);
- conf.set("mapreduce.job.hdfs-servers", "");
- conf = GridmixTestUtils.mrCluster.createJobConf(new JobConf(conf));
- // allow synthetic users to create home directories
- GridmixTestUtils.dfs.mkdirs(root, new FsPermission((short) 0777));
- GridmixTestUtils.dfs.setPermission(root, new FsPermission((short) 0777));
- String[] args = argv.toArray(new String[argv.size()]);
- System.out.println("Command line arguments:");
- for (int i=0; i
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Tests failed!
-
-
-
-
diff --git a/hadoop-mapreduce-project/src/contrib/index/conf/index-config.xml.template b/hadoop-mapreduce-project/src/contrib/index/conf/index-config.xml.template
deleted file mode 100755
index 96ba3cb8165..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/conf/index-config.xml.template
+++ /dev/null
@@ -1,48 +0,0 @@
-
-
-
-
-
-
-
-
- sea.distribution.policy
- org.apache.hadoop.contrib.index.example.HashingDistributionPolicy
-
-
-
- sea.document.analyzer
- org.apache.lucene.analysis.standard.StandardAnalyzer
-
-
-
- sea.input.format
- org.apache.hadoop.contrib.index.example.LineDocInputFormat
-
-
-
- sea.index.updater
- org.apache.hadoop.contrib.index.mapred.IndexUpdater
-
-
-
- sea.local.analysis
- org.apache.hadoop.contrib.index.example.LineDocLocalAnalysis
-
-
-
- sea.max.field.length
- 2000000
-
-
-
- sea.max.num.segments
- 10
-
-
-
- sea.use.compound.file
- true
-
-
-
diff --git a/hadoop-mapreduce-project/src/contrib/index/ivy.xml b/hadoop-mapreduce-project/src/contrib/index/ivy.xml
deleted file mode 100644
index 8e981373b23..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/ivy.xml
+++ /dev/null
@@ -1,69 +0,0 @@
-
-
-
-
-
-
-
-
- Apache Hadoop
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/hadoop-mapreduce-project/src/contrib/index/ivy/libraries.properties b/hadoop-mapreduce-project/src/contrib/index/ivy/libraries.properties
deleted file mode 100644
index 841cce3a42f..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/ivy/libraries.properties
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#This properties file lists the versions of the various artifacts used by index.
-
-#These are the versions of our dependencies (in alphabetical order)
-#Please list the dependencies name with version if they are different from the ones listed in the global libraries.propertiesfile
diff --git a/hadoop-mapreduce-project/src/contrib/index/sample/data.txt b/hadoop-mapreduce-project/src/contrib/index/sample/data.txt
deleted file mode 100755
index 4bc9a8f9d77..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/sample/data.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-0 ins apache dot org
-1 ins apache
-2 ins apache
-3 ins apache
-4 ins apache
-5 ins apache
-6 ins apache
-7 ins apache
-8 ins apache
-9 ins apache
diff --git a/hadoop-mapreduce-project/src/contrib/index/sample/data2.txt b/hadoop-mapreduce-project/src/contrib/index/sample/data2.txt
deleted file mode 100755
index 550a9967b21..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/sample/data2.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-0 del
-1 upd hadoop
-2 del
-3 upd hadoop
-4 del
-5 upd hadoop
-6 del
-7 upd hadoop
-8 del
-9 upd hadoop
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/HashingDistributionPolicy.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/HashingDistributionPolicy.java
deleted file mode 100755
index f3e463a9aed..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/HashingDistributionPolicy.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.example;
-
-import org.apache.hadoop.contrib.index.mapred.DocumentID;
-import org.apache.hadoop.contrib.index.mapred.IDistributionPolicy;
-import org.apache.hadoop.contrib.index.mapred.Shard;
-
-/**
- * Choose a shard for each insert or delete based on document id hashing. Do
- * NOT use this distribution policy when the number of shards changes.
- */
-public class HashingDistributionPolicy implements IDistributionPolicy {
-
- private int numShards;
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.contrib.index.mapred.IDistributionPolicy#init(org.apache.hadoop.contrib.index.mapred.Shard[])
- */
- public void init(Shard[] shards) {
- numShards = shards.length;
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.contrib.index.mapred.IDistributionPolicy#chooseShardForInsert(org.apache.hadoop.contrib.index.mapred.DocumentID)
- */
- public int chooseShardForInsert(DocumentID key) {
- int hashCode = key.hashCode();
- return hashCode >= 0 ? hashCode % numShards : (-hashCode) % numShards;
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.contrib.index.mapred.IDistributionPolicy#chooseShardForDelete(org.apache.hadoop.contrib.index.mapred.DocumentID)
- */
- public int chooseShardForDelete(DocumentID key) {
- int hashCode = key.hashCode();
- return hashCode >= 0 ? hashCode % numShards : (-hashCode) % numShards;
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/IdentityLocalAnalysis.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/IdentityLocalAnalysis.java
deleted file mode 100755
index 79324b5b026..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/IdentityLocalAnalysis.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.example;
-
-import java.io.IOException;
-
-import org.apache.hadoop.contrib.index.mapred.DocumentAndOp;
-import org.apache.hadoop.contrib.index.mapred.DocumentID;
-import org.apache.hadoop.contrib.index.mapred.ILocalAnalysis;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hadoop.mapred.Reporter;
-
-/**
- * Identity local analysis maps inputs directly into outputs.
- */
-public class IdentityLocalAnalysis implements
- ILocalAnalysis {
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.mapred.Mapper#map(java.lang.Object, java.lang.Object, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)
- */
- public void map(DocumentID key, DocumentAndOp value,
- OutputCollector output, Reporter reporter)
- throws IOException {
- output.collect(key, value);
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.mapred.JobConfigurable#configure(org.apache.hadoop.mapred.JobConf)
- */
- public void configure(JobConf job) {
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.io.Closeable#close()
- */
- public void close() throws IOException {
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocInputFormat.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocInputFormat.java
deleted file mode 100755
index a09064bb2a9..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocInputFormat.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.example;
-
-import java.io.IOException;
-
-import org.apache.hadoop.contrib.index.mapred.DocumentID;
-import org.apache.hadoop.mapred.FileInputFormat;
-import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hadoop.mapred.Reporter;
-
-/**
- * An InputFormat for LineDoc for plain text files where each line is a doc.
- */
-public class LineDocInputFormat extends
- FileInputFormat {
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.mapred.FileInputFormat#getRecordReader(org.apache.hadoop.mapred.InputSplit, org.apache.hadoop.mapred.JobConf, org.apache.hadoop.mapred.Reporter)
- */
- public RecordReader getRecordReader(
- InputSplit split, JobConf job, Reporter reporter) throws IOException {
- reporter.setStatus(split.toString());
- return new LineDocRecordReader(job, (FileSplit) split);
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocLocalAnalysis.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocLocalAnalysis.java
deleted file mode 100755
index c5a5a5a6d54..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocLocalAnalysis.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.example;
-
-import java.io.IOException;
-
-import org.apache.hadoop.contrib.index.mapred.DocumentAndOp;
-import org.apache.hadoop.contrib.index.mapred.DocumentID;
-import org.apache.hadoop.contrib.index.mapred.ILocalAnalysis;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.Term;
-
-/**
- * Convert LineDocTextAndOp to DocumentAndOp as required by ILocalAnalysis.
- */
-public class LineDocLocalAnalysis implements
- ILocalAnalysis {
-
- private static String docidFieldName = "id";
- private static String contentFieldName = "content";
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.mapred.Mapper#map(java.lang.Object, java.lang.Object, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)
- */
- public void map(DocumentID key, LineDocTextAndOp value,
- OutputCollector output, Reporter reporter)
- throws IOException {
-
- DocumentAndOp.Op op = value.getOp();
- Document doc = null;
- Term term = null;
-
- if (op == DocumentAndOp.Op.INSERT || op == DocumentAndOp.Op.UPDATE) {
- doc = new Document();
- doc.add(new Field(docidFieldName, key.getText().toString(),
- Field.Store.YES, Field.Index.UN_TOKENIZED));
- doc.add(new Field(contentFieldName, value.getText().toString(),
- Field.Store.NO, Field.Index.TOKENIZED));
- }
-
- if (op == DocumentAndOp.Op.DELETE || op == DocumentAndOp.Op.UPDATE) {
- term = new Term(docidFieldName, key.getText().toString());
- }
-
- output.collect(key, new DocumentAndOp(op, doc, term));
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.mapred.JobConfigurable#configure(org.apache.hadoop.mapred.JobConf)
- */
- public void configure(JobConf job) {
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.io.Closeable#close()
- */
- public void close() throws IOException {
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocRecordReader.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocRecordReader.java
deleted file mode 100755
index bc6e6ba48d8..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocRecordReader.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.example;
-
-import java.io.BufferedInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.contrib.index.mapred.DocumentAndOp;
-import org.apache.hadoop.contrib.index.mapred.DocumentID;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.RecordReader;
-
-/**
- * A simple RecordReader for LineDoc for plain text files where each line is a
- * doc. Each line is as follows: documentIDopcontent,
- * where op can be "i", "ins" or "insert" for insert, "d", "del" or "delete"
- * for delete, or "u", "upd" or "update" for update.
- */
-public class LineDocRecordReader implements
- RecordReader {
- private static final char SPACE = ' ';
- private static final char EOL = '\n';
-
- private long start;
- private long pos;
- private long end;
- private BufferedInputStream in;
- private ByteArrayOutputStream buffer = new ByteArrayOutputStream(256);
-
- /**
- * Provide a bridge to get the bytes from the ByteArrayOutputStream without
- * creating a new byte array.
- */
- private static class TextStuffer extends OutputStream {
- public Text target;
-
- public void write(int b) {
- throw new UnsupportedOperationException("write(byte) not supported");
- }
-
- public void write(byte[] data, int offset, int len) throws IOException {
- target.set(data, offset, len);
- }
- }
-
- private TextStuffer bridge = new TextStuffer();
-
- /**
- * Constructor
- * @param job
- * @param split
- * @throws IOException
- */
- public LineDocRecordReader(Configuration job, FileSplit split)
- throws IOException {
- long start = split.getStart();
- long end = start + split.getLength();
- final Path file = split.getPath();
-
- // open the file and seek to the start of the split
- FileSystem fs = file.getFileSystem(job);
- FSDataInputStream fileIn = fs.open(split.getPath());
- InputStream in = fileIn;
- boolean skipFirstLine = false;
- if (start != 0) {
- skipFirstLine = true; // wait till BufferedInputStream to skip
- --start;
- fileIn.seek(start);
- }
-
- this.in = new BufferedInputStream(in);
- if (skipFirstLine) { // skip first line and re-establish "start".
- start += LineDocRecordReader.readData(this.in, null, EOL);
- }
- this.start = start;
- this.pos = start;
- this.end = end;
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.mapred.RecordReader#close()
- */
- public void close() throws IOException {
- in.close();
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.mapred.RecordReader#createKey()
- */
- public DocumentID createKey() {
- return new DocumentID();
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.mapred.RecordReader#createValue()
- */
- public LineDocTextAndOp createValue() {
- return new LineDocTextAndOp();
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.mapred.RecordReader#getPos()
- */
- public long getPos() throws IOException {
- return pos;
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.mapred.RecordReader#getProgress()
- */
- public float getProgress() throws IOException {
- if (start == end) {
- return 0.0f;
- } else {
- return Math.min(1.0f, (pos - start) / (float) (end - start));
- }
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.mapred.RecordReader#next(java.lang.Object, java.lang.Object)
- */
- public synchronized boolean next(DocumentID key, LineDocTextAndOp value)
- throws IOException {
- if (pos >= end) {
- return false;
- }
-
- // key is document id, which are bytes until first space
- if (!readInto(key.getText(), SPACE)) {
- return false;
- }
-
- // read operation: i/d/u, or ins/del/upd, or insert/delete/update
- Text opText = new Text();
- if (!readInto(opText, SPACE)) {
- return false;
- }
- String opStr = opText.toString();
- DocumentAndOp.Op op;
- if (opStr.equals("i") || opStr.equals("ins") || opStr.equals("insert")) {
- op = DocumentAndOp.Op.INSERT;
- } else if (opStr.equals("d") || opStr.equals("del")
- || opStr.equals("delete")) {
- op = DocumentAndOp.Op.DELETE;
- } else if (opStr.equals("u") || opStr.equals("upd")
- || opStr.equals("update")) {
- op = DocumentAndOp.Op.UPDATE;
- } else {
- // default is insert
- op = DocumentAndOp.Op.INSERT;
- }
- value.setOp(op);
-
- if (op == DocumentAndOp.Op.DELETE) {
- return true;
- } else {
- // read rest of the line
- return readInto(value.getText(), EOL);
- }
- }
-
- private boolean readInto(Text text, char delimiter) throws IOException {
- buffer.reset();
- long bytesRead = readData(in, buffer, delimiter);
- if (bytesRead == 0) {
- return false;
- }
- pos += bytesRead;
- bridge.target = text;
- buffer.writeTo(bridge);
- return true;
- }
-
- private static long readData(InputStream in, OutputStream out, char delimiter)
- throws IOException {
- long bytes = 0;
- while (true) {
-
- int b = in.read();
- if (b == -1) {
- break;
- }
- bytes += 1;
-
- byte c = (byte) b;
- if (c == EOL || c == delimiter) {
- break;
- }
-
- if (c == '\r') {
- in.mark(1);
- byte nextC = (byte) in.read();
- if (nextC != EOL || c == delimiter) {
- in.reset();
- } else {
- bytes += 1;
- }
- break;
- }
-
- if (out != null) {
- out.write(c);
- }
- }
- return bytes;
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocTextAndOp.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocTextAndOp.java
deleted file mode 100755
index be707517f40..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocTextAndOp.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.example;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.contrib.index.mapred.DocumentAndOp;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-
-/**
- * This class represents an operation. The operation can be an insert, a delete
- * or an update. If the operation is an insert or an update, a (new) document,
- * which is in the form of text, is specified.
- */
-public class LineDocTextAndOp implements Writable {
- private DocumentAndOp.Op op;
- private Text doc;
-
- /**
- * Constructor
- */
- public LineDocTextAndOp() {
- doc = new Text();
- }
-
- /**
- * Set the type of the operation.
- * @param op the type of the operation
- */
- public void setOp(DocumentAndOp.Op op) {
- this.op = op;
- }
-
- /**
- * Get the type of the operation.
- * @return the type of the operation
- */
- public DocumentAndOp.Op getOp() {
- return op;
- }
-
- /**
- * Get the text that represents a document.
- * @return the text that represents a document
- */
- public Text getText() {
- return doc;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- public String toString() {
- return this.getClass().getName() + "[op=" + op + ", text=" + doc + "]";
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.io.Writable#write(java.io.DataOutput)
- */
- public void write(DataOutput out) throws IOException {
- throw new IOException(this.getClass().getName()
- + ".write should never be called");
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.io.Writable#readFields(java.io.DataInput)
- */
- public void readFields(DataInput in) throws IOException {
- throw new IOException(this.getClass().getName()
- + ".readFields should never be called");
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/RoundRobinDistributionPolicy.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/RoundRobinDistributionPolicy.java
deleted file mode 100755
index 8d69025dfdd..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/RoundRobinDistributionPolicy.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.example;
-
-import org.apache.hadoop.contrib.index.mapred.DocumentID;
-import org.apache.hadoop.contrib.index.mapred.IDistributionPolicy;
-import org.apache.hadoop.contrib.index.mapred.Shard;
-
-/**
- * Choose a shard for each insert in a round-robin fashion. Choose all the
- * shards for each delete because we don't know where it is stored.
- */
-public class RoundRobinDistributionPolicy implements IDistributionPolicy {
-
- private int numShards;
- private int rr; // round-robin implementation
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.contrib.index.mapred.IDistributionPolicy#init(org.apache.hadoop.contrib.index.mapred.Shard[])
- */
- public void init(Shard[] shards) {
- numShards = shards.length;
- rr = 0;
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.contrib.index.mapred.IDistributionPolicy#chooseShardForInsert(org.apache.hadoop.contrib.index.mapred.DocumentID)
- */
- public int chooseShardForInsert(DocumentID key) {
- int chosen = rr;
- rr = (rr + 1) % numShards;
- return chosen;
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.contrib.index.mapred.IDistributionPolicy#chooseShardForDelete(org.apache.hadoop.contrib.index.mapred.DocumentID)
- */
- public int chooseShardForDelete(DocumentID key) {
- // -1 represents all the shards
- return -1;
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/FileSystemDirectory.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/FileSystemDirectory.java
deleted file mode 100755
index cd547eadd67..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/FileSystemDirectory.java
+++ /dev/null
@@ -1,349 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.lucene;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.store.BufferedIndexInput;
-import org.apache.lucene.store.BufferedIndexOutput;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.Lock;
-
-/**
- * This class implements a Lucene Directory on top of a general FileSystem.
- * Currently it does not support locking.
- */
-public class FileSystemDirectory extends Directory {
-
- private final FileSystem fs;
- private final Path directory;
- private final int ioFileBufferSize;
-
- /**
- * Constructor
- * @param fs
- * @param directory
- * @param create
- * @param conf
- * @throws IOException
- */
- public FileSystemDirectory(FileSystem fs, Path directory, boolean create,
- Configuration conf) throws IOException {
-
- this.fs = fs;
- this.directory = directory;
- this.ioFileBufferSize = conf.getInt("io.file.buffer.size", 4096);
-
- if (create) {
- create();
- }
-
- boolean isDir = false;
- try {
- FileStatus status = fs.getFileStatus(directory);
- if (status != null) {
- isDir = status.isDirectory();
- }
- } catch (IOException e) {
- // file does not exist, isDir already set to false
- }
- if (!isDir) {
- throw new IOException(directory + " is not a directory");
- }
- }
-
- private void create() throws IOException {
- if (!fs.exists(directory)) {
- fs.mkdirs(directory);
- }
-
- boolean isDir = false;
- try {
- FileStatus status = fs.getFileStatus(directory);
- if (status != null) {
- isDir = status.isDirectory();
- }
- } catch (IOException e) {
- // file does not exist, isDir already set to false
- }
- if (!isDir) {
- throw new IOException(directory + " is not a directory");
- }
-
- // clear old index files
- FileStatus[] fileStatus =
- fs.listStatus(directory, LuceneIndexFileNameFilter.getFilter());
- for (int i = 0; i < fileStatus.length; i++) {
- if (!fs.delete(fileStatus[i].getPath(), true)) {
- throw new IOException("Cannot delete index file "
- + fileStatus[i].getPath());
- }
- }
- }
-
- /* (non-Javadoc)
- * @see org.apache.lucene.store.Directory#list()
- */
- public String[] list() throws IOException {
- FileStatus[] fileStatus =
- fs.listStatus(directory, LuceneIndexFileNameFilter.getFilter());
- String[] result = new String[fileStatus.length];
- for (int i = 0; i < fileStatus.length; i++) {
- result[i] = fileStatus[i].getPath().getName();
- }
- return result;
- }
-
- /* (non-Javadoc)
- * @see org.apache.lucene.store.Directory#fileExists(java.lang.String)
- */
- public boolean fileExists(String name) throws IOException {
- return fs.exists(new Path(directory, name));
- }
-
- /* (non-Javadoc)
- * @see org.apache.lucene.store.Directory#fileModified(java.lang.String)
- */
- public long fileModified(String name) {
- throw new UnsupportedOperationException();
- }
-
- /* (non-Javadoc)
- * @see org.apache.lucene.store.Directory#touchFile(java.lang.String)
- */
- public void touchFile(String name) {
- throw new UnsupportedOperationException();
- }
-
- /* (non-Javadoc)
- * @see org.apache.lucene.store.Directory#fileLength(java.lang.String)
- */
- public long fileLength(String name) throws IOException {
- return fs.getFileStatus(new Path(directory, name)).getLen();
- }
-
- /* (non-Javadoc)
- * @see org.apache.lucene.store.Directory#deleteFile(java.lang.String)
- */
- public void deleteFile(String name) throws IOException {
- if (!fs.delete(new Path(directory, name), true)) {
- throw new IOException("Cannot delete index file " + name);
- }
- }
-
- /* (non-Javadoc)
- * @see org.apache.lucene.store.Directory#renameFile(java.lang.String, java.lang.String)
- */
- public void renameFile(String from, String to) throws IOException {
- fs.rename(new Path(directory, from), new Path(directory, to));
- }
-
- /* (non-Javadoc)
- * @see org.apache.lucene.store.Directory#createOutput(java.lang.String)
- */
- public IndexOutput createOutput(String name) throws IOException {
- Path file = new Path(directory, name);
- if (fs.exists(file) && !fs.delete(file, true)) {
- // delete the existing one if applicable
- throw new IOException("Cannot overwrite index file " + file);
- }
-
- return new FileSystemIndexOutput(file, ioFileBufferSize);
- }
-
- /* (non-Javadoc)
- * @see org.apache.lucene.store.Directory#openInput(java.lang.String)
- */
- public IndexInput openInput(String name) throws IOException {
- return openInput(name, ioFileBufferSize);
- }
-
- /* (non-Javadoc)
- * @see org.apache.lucene.store.Directory#openInput(java.lang.String, int)
- */
- public IndexInput openInput(String name, int bufferSize) throws IOException {
- return new FileSystemIndexInput(new Path(directory, name), bufferSize);
- }
-
- /* (non-Javadoc)
- * @see org.apache.lucene.store.Directory#makeLock(java.lang.String)
- */
- public Lock makeLock(final String name) {
- return new Lock() {
- public boolean obtain() {
- return true;
- }
-
- public void release() {
- }
-
- public boolean isLocked() {
- throw new UnsupportedOperationException();
- }
-
- public String toString() {
- return "Lock@" + new Path(directory, name);
- }
- };
- }
-
- /* (non-Javadoc)
- * @see org.apache.lucene.store.Directory#close()
- */
- public void close() throws IOException {
- // do not close the file system
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- public String toString() {
- return this.getClass().getName() + "@" + directory;
- }
-
- private class FileSystemIndexInput extends BufferedIndexInput {
-
- // shared by clones
- private class Descriptor {
- public final FSDataInputStream in;
- public long position; // cache of in.getPos()
-
- public Descriptor(Path file, int ioFileBufferSize) throws IOException {
- this.in = fs.open(file, ioFileBufferSize);
- }
- }
-
- private final Path filePath; // for debugging
- private final Descriptor descriptor;
- private final long length;
- private boolean isOpen;
- private boolean isClone;
-
- public FileSystemIndexInput(Path path, int ioFileBufferSize)
- throws IOException {
- filePath = path;
- descriptor = new Descriptor(path, ioFileBufferSize);
- length = fs.getFileStatus(path).getLen();
- isOpen = true;
- }
-
- protected void readInternal(byte[] b, int offset, int len)
- throws IOException {
- synchronized (descriptor) {
- long position = getFilePointer();
- if (position != descriptor.position) {
- descriptor.in.seek(position);
- descriptor.position = position;
- }
- int total = 0;
- do {
- int i = descriptor.in.read(b, offset + total, len - total);
- if (i == -1) {
- throw new IOException("Read past EOF");
- }
- descriptor.position += i;
- total += i;
- } while (total < len);
- }
- }
-
- public void close() throws IOException {
- if (!isClone) {
- if (isOpen) {
- descriptor.in.close();
- isOpen = false;
- } else {
- throw new IOException("Index file " + filePath + " already closed");
- }
- }
- }
-
- protected void seekInternal(long position) {
- // handled in readInternal()
- }
-
- public long length() {
- return length;
- }
-
- protected void finalize() throws IOException {
- if (!isClone && isOpen) {
- close(); // close the file
- }
- }
-
- public Object clone() {
- FileSystemIndexInput clone = (FileSystemIndexInput) super.clone();
- clone.isClone = true;
- return clone;
- }
- }
-
- private class FileSystemIndexOutput extends BufferedIndexOutput {
-
- private final Path filePath; // for debugging
- private final FSDataOutputStream out;
- private boolean isOpen;
-
- public FileSystemIndexOutput(Path path, int ioFileBufferSize)
- throws IOException {
- filePath = path;
- // overwrite is true by default
- out = fs.create(path, true, ioFileBufferSize);
- isOpen = true;
- }
-
- public void flushBuffer(byte[] b, int offset, int size) throws IOException {
- out.write(b, offset, size);
- }
-
- public void close() throws IOException {
- if (isOpen) {
- super.close();
- out.close();
- isOpen = false;
- } else {
- throw new IOException("Index file " + filePath + " already closed");
- }
- }
-
- public void seek(long pos) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- public long length() throws IOException {
- return out.getPos();
- }
-
- protected void finalize() throws IOException {
- if (isOpen) {
- close(); // close the file
- }
- }
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneIndexFileNameFilter.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneIndexFileNameFilter.java
deleted file mode 100755
index 286e95de9ee..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneIndexFileNameFilter.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.lucene;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.lucene.index.IndexFileNameFilter;
-
-/**
- * A wrapper class to convert an IndexFileNameFilter which implements
- * java.io.FilenameFilter to an org.apache.hadoop.fs.PathFilter.
- */
-class LuceneIndexFileNameFilter implements PathFilter {
-
- private static final LuceneIndexFileNameFilter singleton =
- new LuceneIndexFileNameFilter();
-
- /**
- * Get a static instance.
- * @return the static instance
- */
- public static LuceneIndexFileNameFilter getFilter() {
- return singleton;
- }
-
- private final IndexFileNameFilter luceneFilter;
-
- private LuceneIndexFileNameFilter() {
- luceneFilter = IndexFileNameFilter.getFilter();
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.fs.PathFilter#accept(org.apache.hadoop.fs.Path)
- */
- public boolean accept(Path path) {
- return luceneFilter.accept(null, path.getName());
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneUtil.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneUtil.java
deleted file mode 100755
index eff980824e8..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneUtil.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.lucene;
-
-import java.io.IOException;
-
-import org.apache.lucene.store.Directory;
-
-/**
- * This class copies some methods from Lucene's SegmentInfos since that class
- * is not public.
- */
-public final class LuceneUtil {
-
- static final class IndexFileNames {
- /** Name of the index segment file */
- static final String SEGMENTS = "segments";
-
- /** Name of the generation reference file name */
- static final String SEGMENTS_GEN = "segments.gen";
- }
-
- /**
- * Check if the file is a segments_N file
- * @param name
- * @return true if the file is a segments_N file
- */
- public static boolean isSegmentsFile(String name) {
- return name.startsWith(IndexFileNames.SEGMENTS)
- && !name.equals(IndexFileNames.SEGMENTS_GEN);
- }
-
- /**
- * Check if the file is the segments.gen file
- * @param name
- * @return true if the file is the segments.gen file
- */
- public static boolean isSegmentsGenFile(String name) {
- return name.equals(IndexFileNames.SEGMENTS_GEN);
- }
-
- /**
- * Get the generation (N) of the current segments_N file in the directory.
- *
- * @param directory -- directory to search for the latest segments_N file
- */
- public static long getCurrentSegmentGeneration(Directory directory)
- throws IOException {
- String[] files = directory.list();
- if (files == null)
- throw new IOException("cannot read directory " + directory
- + ": list() returned null");
- return getCurrentSegmentGeneration(files);
- }
-
- /**
- * Get the generation (N) of the current segments_N file from a list of
- * files.
- *
- * @param files -- array of file names to check
- */
- public static long getCurrentSegmentGeneration(String[] files) {
- if (files == null) {
- return -1;
- }
- long max = -1;
- for (int i = 0; i < files.length; i++) {
- String file = files[i];
- if (file.startsWith(IndexFileNames.SEGMENTS)
- && !file.equals(IndexFileNames.SEGMENTS_GEN)) {
- long gen = generationFromSegmentsFileName(file);
- if (gen > max) {
- max = gen;
- }
- }
- }
- return max;
- }
-
- /**
- * Parse the generation off the segments file name and return it.
- */
- public static long generationFromSegmentsFileName(String fileName) {
- if (fileName.equals(IndexFileNames.SEGMENTS)) {
- return 0;
- } else if (fileName.startsWith(IndexFileNames.SEGMENTS)) {
- return Long.parseLong(
- fileName.substring(1 + IndexFileNames.SEGMENTS.length()),
- Character.MAX_RADIX);
- } else {
- throw new IllegalArgumentException("fileName \"" + fileName
- + "\" is not a segments file");
- }
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDeletionPolicy.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDeletionPolicy.java
deleted file mode 100755
index 01ef01e999f..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDeletionPolicy.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.lucene;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.lucene.index.IndexCommitPoint;
-import org.apache.lucene.index.IndexDeletionPolicy;
-
-/**
- * For mixed directory. Use KeepAllDeletionPolicy for the read-only directory
- * (keep all from init) and use KeepOnlyLastCommitDeletionPolicy for the
- * writable directory (initially empty, keep latest after init).
- */
-class MixedDeletionPolicy implements IndexDeletionPolicy {
-
- private int keepAllFromInit = 0;
-
- public void onInit(List commits) throws IOException {
- keepAllFromInit = commits.size();
- }
-
- public void onCommit(List commits) throws IOException {
- int size = commits.size();
- assert (size > keepAllFromInit);
- // keep all from init and the latest, delete the rest
- for (int i = keepAllFromInit; i < size - 1; i++) {
- ((IndexCommitPoint) commits.get(i)).delete();
- }
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDirectory.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDirectory.java
deleted file mode 100755
index 037e168b4bd..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDirectory.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.lucene;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.NoLockFactory;
-
-/**
- * The initial version of an index is stored in a read-only FileSystem dir
- * (FileSystemDirectory). Index files created by newer versions are written to
- * a writable local FS dir (Lucene's FSDirectory). We should use the general
- * FileSystemDirectory for the writable dir as well. But have to use Lucene's
- * FSDirectory because currently Lucene does randome write and
- * FileSystemDirectory only supports sequential write.
- *
- * Note: We may delete files from the read-only FileSystem dir because there
- * can be some segment files from an uncommitted checkpoint. For the same
- * reason, we may create files in the writable dir which already exist in the
- * read-only dir and logically they overwrite the ones in the read-only dir.
- */
-class MixedDirectory extends Directory {
-
- private final Directory readDir; // FileSystemDirectory
- private final Directory writeDir; // Lucene's FSDirectory
-
- // take advantage of the fact that Lucene's FSDirectory.fileExists is faster
-
- public MixedDirectory(FileSystem readFs, Path readPath, FileSystem writeFs,
- Path writePath, Configuration conf) throws IOException {
-
- try {
- readDir = new FileSystemDirectory(readFs, readPath, false, conf);
- // check writeFS is a local FS?
- writeDir = FSDirectory.getDirectory(writePath.toString());
-
- } catch (IOException e) {
- try {
- close();
- } catch (IOException e1) {
- // ignore this one, throw the original one
- }
- throw e;
- }
-
- lockFactory = new NoLockFactory();
- }
-
- // for debugging
- MixedDirectory(Directory readDir, Directory writeDir) throws IOException {
- this.readDir = readDir;
- this.writeDir = writeDir;
-
- lockFactory = new NoLockFactory();
- }
-
- @Override
- public String[] list() throws IOException {
- String[] readFiles = readDir.list();
- String[] writeFiles = writeDir.list();
-
- if (readFiles == null || readFiles.length == 0) {
- return writeFiles;
- } else if (writeFiles == null || writeFiles.length == 0) {
- return readFiles;
- } else {
- String[] result = new String[readFiles.length + writeFiles.length];
- System.arraycopy(readFiles, 0, result, 0, readFiles.length);
- System.arraycopy(writeFiles, 0, result, readFiles.length,
- writeFiles.length);
- return result;
- }
- }
-
- @Override
- public void deleteFile(String name) throws IOException {
- if (writeDir.fileExists(name)) {
- writeDir.deleteFile(name);
- }
- if (readDir.fileExists(name)) {
- readDir.deleteFile(name);
- }
- }
-
- @Override
- public boolean fileExists(String name) throws IOException {
- return writeDir.fileExists(name) || readDir.fileExists(name);
- }
-
- @Override
- public long fileLength(String name) throws IOException {
- if (writeDir.fileExists(name)) {
- return writeDir.fileLength(name);
- } else {
- return readDir.fileLength(name);
- }
- }
-
- @Override
- public long fileModified(String name) throws IOException {
- if (writeDir.fileExists(name)) {
- return writeDir.fileModified(name);
- } else {
- return readDir.fileModified(name);
- }
- }
-
- @Override
- public void renameFile(String from, String to) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void touchFile(String name) throws IOException {
- if (writeDir.fileExists(name)) {
- writeDir.touchFile(name);
- } else {
- readDir.touchFile(name);
- }
- }
-
- @Override
- public IndexOutput createOutput(String name) throws IOException {
- return writeDir.createOutput(name);
- }
-
- @Override
- public IndexInput openInput(String name) throws IOException {
- if (writeDir.fileExists(name)) {
- return writeDir.openInput(name);
- } else {
- return readDir.openInput(name);
- }
- }
-
- @Override
- public IndexInput openInput(String name, int bufferSize) throws IOException {
- if (writeDir.fileExists(name)) {
- return writeDir.openInput(name, bufferSize);
- } else {
- return readDir.openInput(name, bufferSize);
- }
- }
-
- @Override
- public void close() throws IOException {
- try {
- if (readDir != null) {
- readDir.close();
- }
- } finally {
- if (writeDir != null) {
- writeDir.close();
- }
- }
- }
-
- public String toString() {
- return this.getClass().getName() + "@" + readDir + "&" + writeDir;
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/RAMDirectoryUtil.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/RAMDirectoryUtil.java
deleted file mode 100755
index 29aca3bc49b..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/RAMDirectoryUtil.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.lucene;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Text;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.RAMDirectory;
-
-/**
- * A utility class which writes an index in a ram dir into a DataOutput and
- * read from a DataInput an index into a ram dir.
- */
-public class RAMDirectoryUtil {
- private static final int BUFFER_SIZE = 1024; // RAMOutputStream.BUFFER_SIZE;
-
- /**
- * Write a number of files from a ram directory to a data output.
- * @param out the data output
- * @param dir the ram directory
- * @param names the names of the files to write
- * @throws IOException
- */
- public static void writeRAMFiles(DataOutput out, RAMDirectory dir,
- String[] names) throws IOException {
- out.writeInt(names.length);
-
- for (int i = 0; i < names.length; i++) {
- Text.writeString(out, names[i]);
- long length = dir.fileLength(names[i]);
- out.writeLong(length);
-
- if (length > 0) {
- // can we avoid the extra copy?
- IndexInput input = null;
- try {
- input = dir.openInput(names[i], BUFFER_SIZE);
-
- int position = 0;
- byte[] buffer = new byte[BUFFER_SIZE];
-
- while (position < length) {
- int len =
- position + BUFFER_SIZE <= length ? BUFFER_SIZE
- : (int) (length - position);
- input.readBytes(buffer, 0, len);
- out.write(buffer, 0, len);
- position += len;
- }
- } finally {
- if (input != null) {
- input.close();
- }
- }
- }
- }
- }
-
- /**
- * Read a number of files from a data input to a ram directory.
- * @param in the data input
- * @param dir the ram directory
- * @throws IOException
- */
- public static void readRAMFiles(DataInput in, RAMDirectory dir)
- throws IOException {
- int numFiles = in.readInt();
-
- for (int i = 0; i < numFiles; i++) {
- String name = Text.readString(in);
- long length = in.readLong();
-
- if (length > 0) {
- // can we avoid the extra copy?
- IndexOutput output = null;
- try {
- output = dir.createOutput(name);
-
- int position = 0;
- byte[] buffer = new byte[BUFFER_SIZE];
-
- while (position < length) {
- int len =
- position + BUFFER_SIZE <= length ? BUFFER_SIZE
- : (int) (length - position);
- in.readFully(buffer, 0, len);
- output.writeBytes(buffer, 0, len);
- position += len;
- }
- } finally {
- if (output != null) {
- output.close();
- }
- }
- }
- }
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/ShardWriter.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/ShardWriter.java
deleted file mode 100755
index b4b09160bc1..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/ShardWriter.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.lucene;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.contrib.index.mapred.IndexUpdateConfiguration;
-import org.apache.hadoop.contrib.index.mapred.IntermediateForm;
-import org.apache.hadoop.contrib.index.mapred.Shard;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.store.Directory;
-
-/**
- * The initial version of an index is stored in the perm dir. Index files
- * created by newer versions are written to a temp dir on the local FS. After
- * successfully creating the new version in the temp dir, the shard writer
- * moves the new files to the perm dir and deletes the temp dir in close().
- */
-public class ShardWriter {
- static final Log LOG = LogFactory.getLog(ShardWriter.class);
-
- private final FileSystem fs;
- private final FileSystem localFs;
- private final Path perm;
- private final Path temp;
- private final Directory dir;
- private final IndexWriter writer;
- private int maxNumSegments;
- private long numForms = 0;
-
- /**
- * Constructor
- * @param fs
- * @param shard
- * @param tempDir
- * @param iconf
- * @throws IOException
- */
- public ShardWriter(FileSystem fs, Shard shard, String tempDir,
- IndexUpdateConfiguration iconf) throws IOException {
- LOG.info("Construct a shard writer");
-
- this.fs = fs;
- localFs = FileSystem.getLocal(iconf.getConfiguration());
- perm = new Path(shard.getDirectory());
- temp = new Path(tempDir);
-
- long initGeneration = shard.getGeneration();
- if (!fs.exists(perm)) {
- assert (initGeneration < 0);
- fs.mkdirs(perm);
- } else {
- restoreGeneration(fs, perm, initGeneration);
- }
- dir =
- new MixedDirectory(fs, perm, localFs, fs.startLocalOutput(perm, temp),
- iconf.getConfiguration());
-
- // analyzer is null because we only use addIndexes, not addDocument
- writer =
- new IndexWriter(dir, false, null,
- initGeneration < 0 ? new KeepOnlyLastCommitDeletionPolicy()
- : new MixedDeletionPolicy());
- setParameters(iconf);
- }
-
- /**
- * Process an intermediate form by carrying out, on the Lucene instance of
- * the shard, the deletes and the inserts (a ram index) in the form.
- * @param form the intermediate form containing deletes and a ram index
- * @throws IOException
- */
- public void process(IntermediateForm form) throws IOException {
- // first delete
- Iterator iter = form.deleteTermIterator();
- while (iter.hasNext()) {
- writer.deleteDocuments(iter.next());
- }
- // then insert
- writer.addIndexesNoOptimize(new Directory[] { form.getDirectory() });
- numForms++;
- }
-
- /**
- * Close the shard writer. Optimize the Lucene instance of the shard before
- * closing if necessary, and copy the files created in the temp directory
- * to the permanent directory after closing.
- * @throws IOException
- */
- public void close() throws IOException {
- LOG.info("Closing the shard writer, processed " + numForms + " forms");
- try {
- try {
- if (maxNumSegments > 0) {
- writer.optimize(maxNumSegments);
- LOG.info("Optimized the shard into at most " + maxNumSegments
- + " segments");
- }
- } finally {
- writer.close();
- LOG.info("Closed Lucene index writer");
- }
-
- moveFromTempToPerm();
- LOG.info("Moved new index files to " + perm);
-
- } finally {
- dir.close();
- LOG.info("Closed the shard writer");
- }
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- public String toString() {
- return this.getClass().getName() + "@" + perm + "&" + temp;
- }
-
- private void setParameters(IndexUpdateConfiguration iconf) {
- int maxFieldLength = iconf.getIndexMaxFieldLength();
- if (maxFieldLength > 0) {
- writer.setMaxFieldLength(maxFieldLength);
- }
- writer.setUseCompoundFile(iconf.getIndexUseCompoundFile());
- maxNumSegments = iconf.getIndexMaxNumSegments();
-
- if (maxFieldLength > 0) {
- LOG.info("sea.max.field.length = " + writer.getMaxFieldLength());
- }
- LOG.info("sea.use.compound.file = " + writer.getUseCompoundFile());
- LOG.info("sea.max.num.segments = " + maxNumSegments);
- }
-
- // in case a previous reduce task fails, restore the generation to
- // the original starting point by deleting the segments.gen file
- // and the segments_N files whose generations are greater than the
- // starting generation; rest of the unwanted files will be deleted
- // once the unwanted segments_N files are deleted
- private void restoreGeneration(FileSystem fs, Path perm, long startGen)
- throws IOException {
-
- FileStatus[] fileStatus = fs.listStatus(perm, new PathFilter() {
- public boolean accept(Path path) {
- return LuceneUtil.isSegmentsFile(path.getName());
- }
- });
-
- // remove the segments_N files whose generation are greater than
- // the starting generation
- for (int i = 0; i < fileStatus.length; i++) {
- Path path = fileStatus[i].getPath();
- if (startGen < LuceneUtil.generationFromSegmentsFileName(path.getName())) {
- fs.delete(path, true);
- }
- }
-
- // always remove segments.gen in case last failed try removed segments_N
- // but not segments.gen, and segments.gen will be overwritten anyway.
- Path segmentsGenFile = new Path(LuceneUtil.IndexFileNames.SEGMENTS_GEN);
- if (fs.exists(segmentsGenFile)) {
- fs.delete(segmentsGenFile, true);
- }
- }
-
- // move the files created in the temp dir into the perm dir
- // and then delete the temp dir from the local FS
- private void moveFromTempToPerm() throws IOException {
- try {
- FileStatus[] fileStatus =
- localFs.listStatus(temp, LuceneIndexFileNameFilter.getFilter());
- Path segmentsPath = null;
- Path segmentsGenPath = null;
-
- // move the files created in temp dir except segments_N and segments.gen
- for (int i = 0; i < fileStatus.length; i++) {
- Path path = fileStatus[i].getPath();
- String name = path.getName();
-
- if (LuceneUtil.isSegmentsGenFile(name)) {
- assert (segmentsGenPath == null);
- segmentsGenPath = path;
- } else if (LuceneUtil.isSegmentsFile(name)) {
- assert (segmentsPath == null);
- segmentsPath = path;
- } else {
- fs.completeLocalOutput(new Path(perm, name), path);
- }
- }
-
- // move the segments_N file
- if (segmentsPath != null) {
- fs.completeLocalOutput(new Path(perm, segmentsPath.getName()),
- segmentsPath);
- }
-
- // move the segments.gen file
- if (segmentsGenPath != null) {
- fs.completeLocalOutput(new Path(perm, segmentsGenPath.getName()),
- segmentsGenPath);
- }
- } finally {
- // finally delete the temp dir (files should have been deleted)
- localFs.delete(temp, true);
- }
- }
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/main/UpdateIndex.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/main/UpdateIndex.java
deleted file mode 100755
index 778f18bcc9a..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/main/UpdateIndex.java
+++ /dev/null
@@ -1,276 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.main;
-
-import java.io.IOException;
-import java.text.NumberFormat;
-import java.util.Arrays;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.contrib.index.mapred.IndexUpdateConfiguration;
-import org.apache.hadoop.contrib.index.mapred.IIndexUpdater;
-import org.apache.hadoop.contrib.index.mapred.Shard;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.FileOutputFormat;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.FileInputFormat;
-import org.apache.hadoop.util.ReflectionUtils;
-
-/**
- * A distributed "index" is partitioned into "shards". Each shard corresponds
- * to a Lucene instance. This class contains the main() method which uses a
- * Map/Reduce job to analyze documents and update Lucene instances in parallel.
- *
- * The main() method in UpdateIndex requires the following information for
- * updating the shards:
- * - Input formatter. This specifies how to format the input documents.
- * - Analysis. This defines the analyzer to use on the input. The analyzer
- * determines whether a document is being inserted, updated, or deleted.
- * For inserts or updates, the analyzer also converts each input document
- * into a Lucene document.
- * - Input paths. This provides the location(s) of updated documents,
- * e.g., HDFS files or directories, or HBase tables.
- * - Shard paths, or index path with the number of shards. Either specify
- * the path for each shard, or specify an index path and the shards are
- * the sub-directories of the index directory.
- * - Output path. When the update to a shard is done, a message is put here.
- * - Number of map tasks.
- *
- * All of the information can be specified in a configuration file. All but
- * the first two can also be specified as command line options. Check out
- * conf/index-config.xml.template for other configurable parameters.
- *
- * Note: Because of the parallel nature of Map/Reduce, the behaviour of
- * multiple inserts, deletes or updates to the same document is undefined.
- */
-public class UpdateIndex {
- public static final Log LOG = LogFactory.getLog(UpdateIndex.class);
-
- private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance();
- static {
- NUMBER_FORMAT.setMinimumIntegerDigits(5);
- NUMBER_FORMAT.setGroupingUsed(false);
- }
-
- private static long now() {
- return System.currentTimeMillis();
- }
-
- private static void printUsage(String cmd) {
- System.err.println("Usage: java " + UpdateIndex.class.getName() + "\n"
- + " -inputPaths \n"
- + " -outputPath \n"
- + " -shards \n"
- + " -indexPath \n"
- + " -numShards \n"
- + " -numMapTasks \n"
- + " -conf \n"
- + "Note: Do not use both -shards option and -indexPath option.");
- }
-
- private static String getIndexPath(Configuration conf) {
- return conf.get("sea.index.path");
- }
-
- private static int getNumShards(Configuration conf) {
- return conf.getInt("sea.num.shards", 1);
- }
-
- private static Shard[] createShards(String indexPath, int numShards,
- Configuration conf) throws IOException {
-
- String parent = Shard.normalizePath(indexPath) + Path.SEPARATOR;
- long versionNumber = -1;
- long generation = -1;
-
- FileSystem fs = FileSystem.get(conf);
- Path path = new Path(indexPath);
-
- if (fs.exists(path)) {
- FileStatus[] fileStatus = fs.listStatus(path);
- String[] shardNames = new String[fileStatus.length];
- int count = 0;
- for (int i = 0; i < fileStatus.length; i++) {
- if (fileStatus[i].isDirectory()) {
- shardNames[count] = fileStatus[i].getPath().getName();
- count++;
- }
- }
- Arrays.sort(shardNames, 0, count);
-
- Shard[] shards = new Shard[count >= numShards ? count : numShards];
- for (int i = 0; i < count; i++) {
- shards[i] =
- new Shard(versionNumber, parent + shardNames[i], generation);
- }
-
- int number = count;
- for (int i = count; i < numShards; i++) {
- String shardPath;
- while (true) {
- shardPath = parent + NUMBER_FORMAT.format(number++);
- if (!fs.exists(new Path(shardPath))) {
- break;
- }
- }
- shards[i] = new Shard(versionNumber, shardPath, generation);
- }
- return shards;
- } else {
- Shard[] shards = new Shard[numShards];
- for (int i = 0; i < shards.length; i++) {
- shards[i] =
- new Shard(versionNumber, parent + NUMBER_FORMAT.format(i),
- generation);
- }
- return shards;
- }
- }
-
- /**
- * The main() method
- * @param argv
- */
- public static void main(String[] argv) {
- if (argv.length == 0) {
- printUsage("");
- System.exit(-1);
- }
-
- String inputPathsString = null;
- Path outputPath = null;
- String shardsString = null;
- String indexPath = null;
- int numShards = -1;
- int numMapTasks = -1;
- Configuration conf = new Configuration();
- String confPath = null;
-
- // parse the command line
- for (int i = 0; i < argv.length; i++) { // parse command line
- if (argv[i].equals("-inputPaths")) {
- inputPathsString = argv[++i];
- } else if (argv[i].equals("-outputPath")) {
- outputPath = new Path(argv[++i]);
- } else if (argv[i].equals("-shards")) {
- shardsString = argv[++i];
- } else if (argv[i].equals("-indexPath")) {
- indexPath = argv[++i];
- } else if (argv[i].equals("-numShards")) {
- numShards = Integer.parseInt(argv[++i]);
- } else if (argv[i].equals("-numMapTasks")) {
- numMapTasks = Integer.parseInt(argv[++i]);
- } else if (argv[i].equals("-conf")) {
- // add as a local FS resource
- confPath = argv[++i];
- conf.addResource(new Path(confPath));
- } else {
- System.out.println("Unknown option " + argv[i] + " w/ value "
- + argv[++i]);
- }
- }
- LOG.info("inputPaths = " + inputPathsString);
- LOG.info("outputPath = " + outputPath);
- LOG.info("shards = " + shardsString);
- LOG.info("indexPath = " + indexPath);
- LOG.info("numShards = " + numShards);
- LOG.info("numMapTasks= " + numMapTasks);
- LOG.info("confPath = " + confPath);
-
- Path[] inputPaths = null;
- Shard[] shards = null;
-
- JobConf jobConf = new JobConf(conf);
- IndexUpdateConfiguration iconf = new IndexUpdateConfiguration(jobConf);
-
- if (inputPathsString != null) {
- jobConf.set(org.apache.hadoop.mapreduce.lib.input.
- FileInputFormat.INPUT_DIR, inputPathsString);
- }
- inputPaths = FileInputFormat.getInputPaths(jobConf);
- if (inputPaths.length == 0) {
- inputPaths = null;
- }
-
- if (outputPath == null) {
- outputPath = FileOutputFormat.getOutputPath(jobConf);
- }
-
- if (inputPaths == null || outputPath == null) {
- System.err.println("InputPaths and outputPath must be specified.");
- printUsage("");
- System.exit(-1);
- }
-
- if (shardsString != null) {
- iconf.setIndexShards(shardsString);
- }
- shards = Shard.getIndexShards(iconf);
- if (shards != null && shards.length == 0) {
- shards = null;
- }
-
- if (indexPath == null) {
- indexPath = getIndexPath(conf);
- }
- if (numShards <= 0) {
- numShards = getNumShards(conf);
- }
-
- if (shards == null && indexPath == null) {
- System.err.println("Either shards or indexPath must be specified.");
- printUsage("");
- System.exit(-1);
- }
-
- if (numMapTasks <= 0) {
- numMapTasks = jobConf.getNumMapTasks();
- }
-
- try {
- // create shards and set their directories if necessary
- if (shards == null) {
- shards = createShards(indexPath, numShards, conf);
- }
-
- long startTime = now();
- try {
- IIndexUpdater updater =
- (IIndexUpdater) ReflectionUtils.newInstance(
- iconf.getIndexUpdaterClass(), conf);
- LOG.info("sea.index.updater = "
- + iconf.getIndexUpdaterClass().getName());
-
- updater.run(conf, inputPaths, outputPath, numMapTasks, shards);
- LOG.info("Index update job is done");
-
- } finally {
- long elapsedTime = now() - startTime;
- LOG.info("Elapsed time is " + (elapsedTime / 1000) + "s");
- System.out.println("Elapsed time is " + (elapsedTime / 1000) + "s");
- }
- } catch (Exception e) {
- e.printStackTrace(System.err);
- }
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentAndOp.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentAndOp.java
deleted file mode 100755
index f07008446f7..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentAndOp.java
+++ /dev/null
@@ -1,208 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.mapred;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.Term;
-
-/**
- * This class represents an indexing operation. The operation can be an insert,
- * a delete or an update. If the operation is an insert or an update, a (new)
- * document must be specified. If the operation is a delete or an update, a
- * delete term must be specified.
- */
-public class DocumentAndOp implements Writable {
-
- /**
- * This class represents the type of an operation - an insert, a delete or
- * an update.
- */
- public static final class Op {
- public static final Op INSERT = new Op("INSERT");
- public static final Op DELETE = new Op("DELETE");
- public static final Op UPDATE = new Op("UPDATE");
-
- private String name;
-
- private Op(String name) {
- this.name = name;
- }
-
- public String toString() {
- return name;
- }
- }
-
- private Op op;
- private Document doc;
- private Term term;
-
- /**
- * Constructor for no operation.
- */
- public DocumentAndOp() {
- }
-
- /**
- * Constructor for an insert operation.
- * @param op
- * @param doc
- */
- public DocumentAndOp(Op op, Document doc) {
- assert (op == Op.INSERT);
- this.op = op;
- this.doc = doc;
- this.term = null;
- }
-
- /**
- * Constructor for a delete operation.
- * @param op
- * @param term
- */
- public DocumentAndOp(Op op, Term term) {
- assert (op == Op.DELETE);
- this.op = op;
- this.doc = null;
- this.term = term;
- }
-
- /**
- * Constructor for an insert, a delete or an update operation.
- * @param op
- * @param doc
- * @param term
- */
- public DocumentAndOp(Op op, Document doc, Term term) {
- if (op == Op.INSERT) {
- assert (doc != null);
- assert (term == null);
- } else if (op == Op.DELETE) {
- assert (doc == null);
- assert (term != null);
- } else {
- assert (op == Op.UPDATE);
- assert (doc != null);
- assert (term != null);
- }
- this.op = op;
- this.doc = doc;
- this.term = term;
- }
-
- /**
- * Set the instance to be an insert operation.
- * @param doc
- */
- public void setInsert(Document doc) {
- this.op = Op.INSERT;
- this.doc = doc;
- this.term = null;
- }
-
- /**
- * Set the instance to be a delete operation.
- * @param term
- */
- public void setDelete(Term term) {
- this.op = Op.DELETE;
- this.doc = null;
- this.term = term;
- }
-
- /**
- * Set the instance to be an update operation.
- * @param doc
- * @param term
- */
- public void setUpdate(Document doc, Term term) {
- this.op = Op.UPDATE;
- this.doc = doc;
- this.term = term;
- }
-
- /**
- * Get the type of operation.
- * @return the type of the operation.
- */
- public Op getOp() {
- return op;
- }
-
- /**
- * Get the document.
- * @return the document
- */
- public Document getDocument() {
- return doc;
- }
-
- /**
- * Get the term.
- * @return the term
- */
- public Term getTerm() {
- return term;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- public String toString() {
- StringBuilder buffer = new StringBuilder();
- buffer.append(this.getClass().getName());
- buffer.append("[op=");
- buffer.append(op);
- buffer.append(", doc=");
- if (doc != null) {
- buffer.append(doc);
- } else {
- buffer.append("null");
- }
- buffer.append(", term=");
- if (term != null) {
- buffer.append(term);
- } else {
- buffer.append("null");
- }
- buffer.append("]");
- return buffer.toString();
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.io.Writable#write(java.io.DataOutput)
- */
- public void write(DataOutput out) throws IOException {
- throw new IOException(this.getClass().getName()
- + ".write should never be called");
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.io.Writable#readFields(java.io.DataInput)
- */
- public void readFields(DataInput in) throws IOException {
- throw new IOException(this.getClass().getName()
- + ".readFields should never be called");
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentID.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentID.java
deleted file mode 100755
index d5d0d9e0dd2..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentID.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.mapred;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableComparable;
-
-/**
- * The class represents a document id, which is of type text.
- */
-public class DocumentID implements WritableComparable {
- private final Text docID;
-
- /**
- * Constructor.
- */
- public DocumentID() {
- docID = new Text();
- }
-
- /**
- * The text of the document id.
- * @return the text
- */
- public Text getText() {
- return docID;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Comparable#compareTo(java.lang.Object)
- */
- public int compareTo(Object obj) {
- if (this == obj) {
- return 0;
- } else {
- return docID.compareTo(((DocumentID) obj).docID);
- }
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#hashCode()
- */
- public int hashCode() {
- return docID.hashCode();
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- public String toString() {
- return this.getClass().getName() + "[" + docID + "]";
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.io.Writable#write(java.io.DataOutput)
- */
- public void write(DataOutput out) throws IOException {
- throw new IOException(this.getClass().getName()
- + ".write should never be called");
- }
-
- /* (non-Javadoc)
- * @see org.apache.hadoop.io.Writable#readFields(java.io.DataInput)
- */
- public void readFields(DataInput in) throws IOException {
- throw new IOException(this.getClass().getName()
- + ".readFields should never be called");
- }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IDistributionPolicy.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IDistributionPolicy.java
deleted file mode 100755
index f454ad61397..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IDistributionPolicy.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.mapred;
-
-/**
- * A distribution policy decides, given a document with a document id, which
- * one shard the request should be sent to if the request is an insert, and
- * which shard(s) the request should be sent to if the request is a delete.
- */
-public interface IDistributionPolicy {
-
- /**
- * Initialization. It must be called before any chooseShard() is called.
- * @param shards
- */
- void init(Shard[] shards);
-
- /**
- * Choose a shard to send an insert request.
- * @param key
- * @return the index of the chosen shard
- */
- int chooseShardForInsert(DocumentID key);
-
- /**
- * Choose a shard or all shards to send a delete request. E.g. a round-robin
- * distribution policy would send a delete request to all the shards.
- * -1 represents all the shards.
- * @param key
- * @return the index of the chosen shard, -1 if all the shards are chosen
- */
- int chooseShardForDelete(DocumentID key);
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IIndexUpdater.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IIndexUpdater.java
deleted file mode 100755
index 9feb9a2db9a..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IIndexUpdater.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.mapred;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-
-/**
- * A class implements an index updater interface should create a Map/Reduce job
- * configuration and run the Map/Reduce job to analyze documents and update
- * Lucene instances in parallel.
- */
-public interface IIndexUpdater {
-
- /**
- * Create a Map/Reduce job configuration and run the Map/Reduce job to
- * analyze documents and update Lucene instances in parallel.
- * @param conf
- * @param inputPaths
- * @param outputPath
- * @param numMapTasks
- * @param shards
- * @throws IOException
- */
- void run(Configuration conf, Path[] inputPaths, Path outputPath,
- int numMapTasks, Shard[] shards) throws IOException;
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/ILocalAnalysis.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/ILocalAnalysis.java
deleted file mode 100755
index 32d59c57664..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/ILocalAnalysis.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.mapred;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.mapred.Mapper;
-
-/**
- * Application specific local analysis. The output type must be (DocumentID,
- * DocumentAndOp).
- */
-public interface ILocalAnalysis
- extends Mapper {
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateCombiner.java b/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateCombiner.java
deleted file mode 100755
index bb84ba8253f..00000000000
--- a/hadoop-mapreduce-project/src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateCombiner.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.contrib.index.mapred;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.MapReduceBase;
-import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hadoop.mapred.Reducer;
-import org.apache.hadoop.mapred.Reporter;
-
-/**
- * This combiner combines multiple intermediate forms into one intermediate
- * form. More specifically, the input intermediate forms are a single-document
- * ram index and/or a single delete term. An output intermediate form contains
- * a multi-document ram index and/or multiple delete terms.
- */
-public class IndexUpdateCombiner extends MapReduceBase implements
- Reducer